]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/target.c
convert to_insert_hw_breakpoint
[thirdparty/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (struct target_ops *, const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
56 CORE_ADDR, int);
57
58 static void default_rcmd (struct target_ops *, char *, struct ui_file *);
59
60 static void tcomplain (void) ATTRIBUTE_NORETURN;
61
62 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
63
64 static int return_zero (void);
65
66 static int return_one (void);
67
68 static int return_minus_one (void);
69
70 static void *return_null (void);
71
72 void target_ignore (void);
73
74 static void target_command (char *, int);
75
76 static struct target_ops *find_default_run_target (char *);
77
78 static target_xfer_partial_ftype default_xfer_partial;
79
80 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
81 ptid_t ptid);
82
83 static int find_default_can_async_p (struct target_ops *ignore);
84
85 static int find_default_is_async_p (struct target_ops *ignore);
86
87 #include "target-delegates.c"
88
89 static void init_dummy_target (void);
90
91 static struct target_ops debug_target;
92
93 static void debug_to_open (char *, int);
94
95 static void debug_to_prepare_to_store (struct target_ops *self,
96 struct regcache *);
97
98 static void debug_to_files_info (struct target_ops *);
99
100 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
101 struct bp_target_info *);
102
103 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
104 struct bp_target_info *);
105
106 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
107 int, int, int);
108
109 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
110 struct gdbarch *,
111 struct bp_target_info *);
112
113 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
114 struct gdbarch *,
115 struct bp_target_info *);
116
117 static int debug_to_insert_watchpoint (struct target_ops *self,
118 CORE_ADDR, int, int,
119 struct expression *);
120
121 static int debug_to_remove_watchpoint (struct target_ops *self,
122 CORE_ADDR, int, int,
123 struct expression *);
124
125 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
126
127 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
128 CORE_ADDR, CORE_ADDR, int);
129
130 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
131 CORE_ADDR, int);
132
133 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
134 CORE_ADDR, int, int,
135 struct expression *);
136
137 static void debug_to_terminal_init (struct target_ops *self);
138
139 static void debug_to_terminal_inferior (struct target_ops *self);
140
141 static void debug_to_terminal_ours_for_output (struct target_ops *self);
142
143 static void debug_to_terminal_save_ours (struct target_ops *self);
144
145 static void debug_to_terminal_ours (struct target_ops *self);
146
147 static void debug_to_load (struct target_ops *self, char *, int);
148
149 static int debug_to_can_run (struct target_ops *self);
150
151 static void debug_to_stop (struct target_ops *self, ptid_t);
152
153 /* Pointer to array of target architecture structures; the size of the
154 array; the current index into the array; the allocated size of the
155 array. */
156 struct target_ops **target_structs;
157 unsigned target_struct_size;
158 unsigned target_struct_allocsize;
159 #define DEFAULT_ALLOCSIZE 10
160
161 /* The initial current target, so that there is always a semi-valid
162 current target. */
163
164 static struct target_ops dummy_target;
165
166 /* Top of target stack. */
167
168 static struct target_ops *target_stack;
169
170 /* The target structure we are currently using to talk to a process
171 or file or whatever "inferior" we have. */
172
173 struct target_ops current_target;
174
175 /* Command list for target. */
176
177 static struct cmd_list_element *targetlist = NULL;
178
179 /* Nonzero if we should trust readonly sections from the
180 executable when reading memory. */
181
182 static int trust_readonly = 0;
183
184 /* Nonzero if we should show true memory content including
185 memory breakpoint inserted by gdb. */
186
187 static int show_memory_breakpoints = 0;
188
189 /* These globals control whether GDB attempts to perform these
190 operations; they are useful for targets that need to prevent
191 inadvertant disruption, such as in non-stop mode. */
192
193 int may_write_registers = 1;
194
195 int may_write_memory = 1;
196
197 int may_insert_breakpoints = 1;
198
199 int may_insert_tracepoints = 1;
200
201 int may_insert_fast_tracepoints = 1;
202
203 int may_stop = 1;
204
205 /* Non-zero if we want to see trace of target level stuff. */
206
207 static unsigned int targetdebug = 0;
208 static void
209 show_targetdebug (struct ui_file *file, int from_tty,
210 struct cmd_list_element *c, const char *value)
211 {
212 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
213 }
214
215 static void setup_target_debug (void);
216
217 /* The user just typed 'target' without the name of a target. */
218
219 static void
220 target_command (char *arg, int from_tty)
221 {
222 fputs_filtered ("Argument required (target name). Try `help target'\n",
223 gdb_stdout);
224 }
225
226 /* Default target_has_* methods for process_stratum targets. */
227
228 int
229 default_child_has_all_memory (struct target_ops *ops)
230 {
231 /* If no inferior selected, then we can't read memory here. */
232 if (ptid_equal (inferior_ptid, null_ptid))
233 return 0;
234
235 return 1;
236 }
237
238 int
239 default_child_has_memory (struct target_ops *ops)
240 {
241 /* If no inferior selected, then we can't read memory here. */
242 if (ptid_equal (inferior_ptid, null_ptid))
243 return 0;
244
245 return 1;
246 }
247
248 int
249 default_child_has_stack (struct target_ops *ops)
250 {
251 /* If no inferior selected, there's no stack. */
252 if (ptid_equal (inferior_ptid, null_ptid))
253 return 0;
254
255 return 1;
256 }
257
258 int
259 default_child_has_registers (struct target_ops *ops)
260 {
261 /* Can't read registers from no inferior. */
262 if (ptid_equal (inferior_ptid, null_ptid))
263 return 0;
264
265 return 1;
266 }
267
268 int
269 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
270 {
271 /* If there's no thread selected, then we can't make it run through
272 hoops. */
273 if (ptid_equal (the_ptid, null_ptid))
274 return 0;
275
276 return 1;
277 }
278
279
280 int
281 target_has_all_memory_1 (void)
282 {
283 struct target_ops *t;
284
285 for (t = current_target.beneath; t != NULL; t = t->beneath)
286 if (t->to_has_all_memory (t))
287 return 1;
288
289 return 0;
290 }
291
292 int
293 target_has_memory_1 (void)
294 {
295 struct target_ops *t;
296
297 for (t = current_target.beneath; t != NULL; t = t->beneath)
298 if (t->to_has_memory (t))
299 return 1;
300
301 return 0;
302 }
303
304 int
305 target_has_stack_1 (void)
306 {
307 struct target_ops *t;
308
309 for (t = current_target.beneath; t != NULL; t = t->beneath)
310 if (t->to_has_stack (t))
311 return 1;
312
313 return 0;
314 }
315
316 int
317 target_has_registers_1 (void)
318 {
319 struct target_ops *t;
320
321 for (t = current_target.beneath; t != NULL; t = t->beneath)
322 if (t->to_has_registers (t))
323 return 1;
324
325 return 0;
326 }
327
328 int
329 target_has_execution_1 (ptid_t the_ptid)
330 {
331 struct target_ops *t;
332
333 for (t = current_target.beneath; t != NULL; t = t->beneath)
334 if (t->to_has_execution (t, the_ptid))
335 return 1;
336
337 return 0;
338 }
339
340 int
341 target_has_execution_current (void)
342 {
343 return target_has_execution_1 (inferior_ptid);
344 }
345
346 /* Complete initialization of T. This ensures that various fields in
347 T are set, if needed by the target implementation. */
348
349 void
350 complete_target_initialization (struct target_ops *t)
351 {
352 /* Provide default values for all "must have" methods. */
353 if (t->to_xfer_partial == NULL)
354 t->to_xfer_partial = default_xfer_partial;
355
356 if (t->to_has_all_memory == NULL)
357 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
358
359 if (t->to_has_memory == NULL)
360 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
361
362 if (t->to_has_stack == NULL)
363 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
364
365 if (t->to_has_registers == NULL)
366 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
367
368 if (t->to_has_execution == NULL)
369 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
370
371 install_delegators (t);
372 }
373
374 /* Add possible target architecture T to the list and add a new
375 command 'target T->to_shortname'. Set COMPLETER as the command's
376 completer if not NULL. */
377
378 void
379 add_target_with_completer (struct target_ops *t,
380 completer_ftype *completer)
381 {
382 struct cmd_list_element *c;
383
384 complete_target_initialization (t);
385
386 if (!target_structs)
387 {
388 target_struct_allocsize = DEFAULT_ALLOCSIZE;
389 target_structs = (struct target_ops **) xmalloc
390 (target_struct_allocsize * sizeof (*target_structs));
391 }
392 if (target_struct_size >= target_struct_allocsize)
393 {
394 target_struct_allocsize *= 2;
395 target_structs = (struct target_ops **)
396 xrealloc ((char *) target_structs,
397 target_struct_allocsize * sizeof (*target_structs));
398 }
399 target_structs[target_struct_size++] = t;
400
401 if (targetlist == NULL)
402 add_prefix_cmd ("target", class_run, target_command, _("\
403 Connect to a target machine or process.\n\
404 The first argument is the type or protocol of the target machine.\n\
405 Remaining arguments are interpreted by the target protocol. For more\n\
406 information on the arguments for a particular protocol, type\n\
407 `help target ' followed by the protocol name."),
408 &targetlist, "target ", 0, &cmdlist);
409 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
410 &targetlist);
411 if (completer != NULL)
412 set_cmd_completer (c, completer);
413 }
414
415 /* Add a possible target architecture to the list. */
416
417 void
418 add_target (struct target_ops *t)
419 {
420 add_target_with_completer (t, NULL);
421 }
422
423 /* See target.h. */
424
425 void
426 add_deprecated_target_alias (struct target_ops *t, char *alias)
427 {
428 struct cmd_list_element *c;
429 char *alt;
430
431 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
432 see PR cli/15104. */
433 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
434 alt = xstrprintf ("target %s", t->to_shortname);
435 deprecate_cmd (c, alt);
436 }
437
438 /* Stub functions */
439
440 void
441 target_ignore (void)
442 {
443 }
444
445 void
446 target_kill (void)
447 {
448 struct target_ops *t;
449
450 for (t = current_target.beneath; t != NULL; t = t->beneath)
451 if (t->to_kill != NULL)
452 {
453 if (targetdebug)
454 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
455
456 t->to_kill (t);
457 return;
458 }
459
460 noprocess ();
461 }
462
463 void
464 target_load (char *arg, int from_tty)
465 {
466 target_dcache_invalidate ();
467 (*current_target.to_load) (&current_target, arg, from_tty);
468 }
469
470 void
471 target_create_inferior (char *exec_file, char *args,
472 char **env, int from_tty)
473 {
474 struct target_ops *t;
475
476 for (t = current_target.beneath; t != NULL; t = t->beneath)
477 {
478 if (t->to_create_inferior != NULL)
479 {
480 t->to_create_inferior (t, exec_file, args, env, from_tty);
481 if (targetdebug)
482 fprintf_unfiltered (gdb_stdlog,
483 "target_create_inferior (%s, %s, xxx, %d)\n",
484 exec_file, args, from_tty);
485 return;
486 }
487 }
488
489 internal_error (__FILE__, __LINE__,
490 _("could not find a target to create inferior"));
491 }
492
493 void
494 target_terminal_inferior (void)
495 {
496 /* A background resume (``run&'') should leave GDB in control of the
497 terminal. Use target_can_async_p, not target_is_async_p, since at
498 this point the target is not async yet. However, if sync_execution
499 is not set, we know it will become async prior to resume. */
500 if (target_can_async_p () && !sync_execution)
501 return;
502
503 /* If GDB is resuming the inferior in the foreground, install
504 inferior's terminal modes. */
505 (*current_target.to_terminal_inferior) (&current_target);
506 }
507
508 static int
509 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
510 struct target_ops *t)
511 {
512 errno = EIO; /* Can't read/write this location. */
513 return 0; /* No bytes handled. */
514 }
515
516 static void
517 tcomplain (void)
518 {
519 error (_("You can't do that when your target is `%s'"),
520 current_target.to_shortname);
521 }
522
523 void
524 noprocess (void)
525 {
526 error (_("You can't do that without a process to debug."));
527 }
528
529 static void
530 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
531 {
532 printf_unfiltered (_("No saved terminal information.\n"));
533 }
534
535 /* A default implementation for the to_get_ada_task_ptid target method.
536
537 This function builds the PTID by using both LWP and TID as part of
538 the PTID lwp and tid elements. The pid used is the pid of the
539 inferior_ptid. */
540
541 static ptid_t
542 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
543 {
544 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
545 }
546
547 static enum exec_direction_kind
548 default_execution_direction (struct target_ops *self)
549 {
550 if (!target_can_execute_reverse)
551 return EXEC_FORWARD;
552 else if (!target_can_async_p ())
553 return EXEC_FORWARD;
554 else
555 gdb_assert_not_reached ("\
556 to_execution_direction must be implemented for reverse async");
557 }
558
559 /* Go through the target stack from top to bottom, copying over zero
560 entries in current_target, then filling in still empty entries. In
561 effect, we are doing class inheritance through the pushed target
562 vectors.
563
564 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
565 is currently implemented, is that it discards any knowledge of
566 which target an inherited method originally belonged to.
567 Consequently, new new target methods should instead explicitly and
568 locally search the target stack for the target that can handle the
569 request. */
570
571 static void
572 update_current_target (void)
573 {
574 struct target_ops *t;
575
576 /* First, reset current's contents. */
577 memset (&current_target, 0, sizeof (current_target));
578
579 /* Install the delegators. */
580 install_delegators (&current_target);
581
582 #define INHERIT(FIELD, TARGET) \
583 if (!current_target.FIELD) \
584 current_target.FIELD = (TARGET)->FIELD
585
586 for (t = target_stack; t; t = t->beneath)
587 {
588 INHERIT (to_shortname, t);
589 INHERIT (to_longname, t);
590 INHERIT (to_doc, t);
591 /* Do not inherit to_open. */
592 /* Do not inherit to_close. */
593 /* Do not inherit to_attach. */
594 /* Do not inherit to_post_attach. */
595 INHERIT (to_attach_no_wait, t);
596 /* Do not inherit to_detach. */
597 /* Do not inherit to_disconnect. */
598 /* Do not inherit to_resume. */
599 /* Do not inherit to_wait. */
600 /* Do not inherit to_fetch_registers. */
601 /* Do not inherit to_store_registers. */
602 /* Do not inherit to_prepare_to_store. */
603 INHERIT (deprecated_xfer_memory, t);
604 /* Do not inherit to_files_info. */
605 /* Do not inherit to_insert_breakpoint. */
606 /* Do not inherit to_remove_breakpoint. */
607 /* Do not inherit to_can_use_hw_breakpoint. */
608 /* Do not inherit to_insert_hw_breakpoint. */
609 INHERIT (to_remove_hw_breakpoint, t);
610 /* Do not inherit to_ranged_break_num_registers. */
611 INHERIT (to_insert_watchpoint, t);
612 INHERIT (to_remove_watchpoint, t);
613 /* Do not inherit to_insert_mask_watchpoint. */
614 /* Do not inherit to_remove_mask_watchpoint. */
615 /* Do not inherit to_stopped_data_address. */
616 INHERIT (to_have_steppable_watchpoint, t);
617 INHERIT (to_have_continuable_watchpoint, t);
618 /* Do not inherit to_stopped_by_watchpoint. */
619 INHERIT (to_watchpoint_addr_within_range, t);
620 INHERIT (to_region_ok_for_hw_watchpoint, t);
621 INHERIT (to_can_accel_watchpoint_condition, t);
622 /* Do not inherit to_masked_watch_num_registers. */
623 INHERIT (to_terminal_init, t);
624 INHERIT (to_terminal_inferior, t);
625 INHERIT (to_terminal_ours_for_output, t);
626 INHERIT (to_terminal_ours, t);
627 INHERIT (to_terminal_save_ours, t);
628 INHERIT (to_terminal_info, t);
629 /* Do not inherit to_kill. */
630 INHERIT (to_load, t);
631 /* Do no inherit to_create_inferior. */
632 INHERIT (to_post_startup_inferior, t);
633 INHERIT (to_insert_fork_catchpoint, t);
634 INHERIT (to_remove_fork_catchpoint, t);
635 INHERIT (to_insert_vfork_catchpoint, t);
636 INHERIT (to_remove_vfork_catchpoint, t);
637 /* Do not inherit to_follow_fork. */
638 INHERIT (to_insert_exec_catchpoint, t);
639 INHERIT (to_remove_exec_catchpoint, t);
640 INHERIT (to_set_syscall_catchpoint, t);
641 INHERIT (to_has_exited, t);
642 /* Do not inherit to_mourn_inferior. */
643 INHERIT (to_can_run, t);
644 /* Do not inherit to_pass_signals. */
645 /* Do not inherit to_program_signals. */
646 /* Do not inherit to_thread_alive. */
647 /* Do not inherit to_find_new_threads. */
648 /* Do not inherit to_pid_to_str. */
649 INHERIT (to_extra_thread_info, t);
650 INHERIT (to_thread_name, t);
651 INHERIT (to_stop, t);
652 /* Do not inherit to_xfer_partial. */
653 /* Do not inherit to_rcmd. */
654 INHERIT (to_pid_to_exec_file, t);
655 INHERIT (to_log_command, t);
656 INHERIT (to_stratum, t);
657 /* Do not inherit to_has_all_memory. */
658 /* Do not inherit to_has_memory. */
659 /* Do not inherit to_has_stack. */
660 /* Do not inherit to_has_registers. */
661 /* Do not inherit to_has_execution. */
662 INHERIT (to_has_thread_control, t);
663 /* Do not inherit to_can_async_p. */
664 /* Do not inherit to_is_async_p. */
665 /* Do not inherit to_async. */
666 INHERIT (to_find_memory_regions, t);
667 INHERIT (to_make_corefile_notes, t);
668 INHERIT (to_get_bookmark, t);
669 INHERIT (to_goto_bookmark, t);
670 /* Do not inherit to_get_thread_local_address. */
671 INHERIT (to_can_execute_reverse, t);
672 INHERIT (to_execution_direction, t);
673 INHERIT (to_thread_architecture, t);
674 /* Do not inherit to_read_description. */
675 INHERIT (to_get_ada_task_ptid, t);
676 /* Do not inherit to_search_memory. */
677 INHERIT (to_supports_multi_process, t);
678 INHERIT (to_supports_enable_disable_tracepoint, t);
679 INHERIT (to_supports_string_tracing, t);
680 INHERIT (to_trace_init, t);
681 INHERIT (to_download_tracepoint, t);
682 INHERIT (to_can_download_tracepoint, t);
683 INHERIT (to_download_trace_state_variable, t);
684 INHERIT (to_enable_tracepoint, t);
685 INHERIT (to_disable_tracepoint, t);
686 INHERIT (to_trace_set_readonly_regions, t);
687 INHERIT (to_trace_start, t);
688 INHERIT (to_get_trace_status, t);
689 INHERIT (to_get_tracepoint_status, t);
690 INHERIT (to_trace_stop, t);
691 INHERIT (to_trace_find, t);
692 INHERIT (to_get_trace_state_variable_value, t);
693 INHERIT (to_save_trace_data, t);
694 INHERIT (to_upload_tracepoints, t);
695 INHERIT (to_upload_trace_state_variables, t);
696 INHERIT (to_get_raw_trace_data, t);
697 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
698 INHERIT (to_set_disconnected_tracing, t);
699 INHERIT (to_set_circular_trace_buffer, t);
700 INHERIT (to_set_trace_buffer_size, t);
701 INHERIT (to_set_trace_notes, t);
702 INHERIT (to_get_tib_address, t);
703 INHERIT (to_set_permissions, t);
704 INHERIT (to_static_tracepoint_marker_at, t);
705 INHERIT (to_static_tracepoint_markers_by_strid, t);
706 INHERIT (to_traceframe_info, t);
707 INHERIT (to_use_agent, t);
708 INHERIT (to_can_use_agent, t);
709 INHERIT (to_augmented_libraries_svr4_read, t);
710 INHERIT (to_magic, t);
711 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
712 INHERIT (to_can_run_breakpoint_commands, t);
713 /* Do not inherit to_memory_map. */
714 /* Do not inherit to_flash_erase. */
715 /* Do not inherit to_flash_done. */
716 }
717 #undef INHERIT
718
719 /* Clean up a target struct so it no longer has any zero pointers in
720 it. Some entries are defaulted to a method that print an error,
721 others are hard-wired to a standard recursive default. */
722
723 #define de_fault(field, value) \
724 if (!current_target.field) \
725 current_target.field = value
726
727 de_fault (to_open,
728 (void (*) (char *, int))
729 tcomplain);
730 de_fault (to_close,
731 (void (*) (struct target_ops *))
732 target_ignore);
733 de_fault (deprecated_xfer_memory,
734 (int (*) (CORE_ADDR, gdb_byte *, int, int,
735 struct mem_attrib *, struct target_ops *))
736 nomemory);
737 de_fault (to_remove_hw_breakpoint,
738 (int (*) (struct target_ops *, struct gdbarch *,
739 struct bp_target_info *))
740 return_minus_one);
741 de_fault (to_insert_watchpoint,
742 (int (*) (struct target_ops *, CORE_ADDR, int, int,
743 struct expression *))
744 return_minus_one);
745 de_fault (to_remove_watchpoint,
746 (int (*) (struct target_ops *, CORE_ADDR, int, int,
747 struct expression *))
748 return_minus_one);
749 de_fault (to_watchpoint_addr_within_range,
750 default_watchpoint_addr_within_range);
751 de_fault (to_region_ok_for_hw_watchpoint,
752 default_region_ok_for_hw_watchpoint);
753 de_fault (to_can_accel_watchpoint_condition,
754 (int (*) (struct target_ops *, CORE_ADDR, int, int,
755 struct expression *))
756 return_zero);
757 de_fault (to_terminal_init,
758 (void (*) (struct target_ops *))
759 target_ignore);
760 de_fault (to_terminal_inferior,
761 (void (*) (struct target_ops *))
762 target_ignore);
763 de_fault (to_terminal_ours_for_output,
764 (void (*) (struct target_ops *))
765 target_ignore);
766 de_fault (to_terminal_ours,
767 (void (*) (struct target_ops *))
768 target_ignore);
769 de_fault (to_terminal_save_ours,
770 (void (*) (struct target_ops *))
771 target_ignore);
772 de_fault (to_terminal_info,
773 default_terminal_info);
774 de_fault (to_load,
775 (void (*) (struct target_ops *, char *, int))
776 tcomplain);
777 de_fault (to_post_startup_inferior,
778 (void (*) (struct target_ops *, ptid_t))
779 target_ignore);
780 de_fault (to_insert_fork_catchpoint,
781 (int (*) (struct target_ops *, int))
782 return_one);
783 de_fault (to_remove_fork_catchpoint,
784 (int (*) (struct target_ops *, int))
785 return_one);
786 de_fault (to_insert_vfork_catchpoint,
787 (int (*) (struct target_ops *, int))
788 return_one);
789 de_fault (to_remove_vfork_catchpoint,
790 (int (*) (struct target_ops *, int))
791 return_one);
792 de_fault (to_insert_exec_catchpoint,
793 (int (*) (struct target_ops *, int))
794 return_one);
795 de_fault (to_remove_exec_catchpoint,
796 (int (*) (struct target_ops *, int))
797 return_one);
798 de_fault (to_set_syscall_catchpoint,
799 (int (*) (struct target_ops *, int, int, int, int, int *))
800 return_one);
801 de_fault (to_has_exited,
802 (int (*) (struct target_ops *, int, int, int *))
803 return_zero);
804 de_fault (to_can_run,
805 (int (*) (struct target_ops *))
806 return_zero);
807 de_fault (to_extra_thread_info,
808 (char *(*) (struct target_ops *, struct thread_info *))
809 return_null);
810 de_fault (to_thread_name,
811 (char *(*) (struct target_ops *, struct thread_info *))
812 return_null);
813 de_fault (to_stop,
814 (void (*) (struct target_ops *, ptid_t))
815 target_ignore);
816 de_fault (to_pid_to_exec_file,
817 (char *(*) (struct target_ops *, int))
818 return_null);
819 de_fault (to_thread_architecture,
820 default_thread_architecture);
821 current_target.to_read_description = NULL;
822 de_fault (to_get_ada_task_ptid,
823 (ptid_t (*) (struct target_ops *, long, long))
824 default_get_ada_task_ptid);
825 de_fault (to_supports_multi_process,
826 (int (*) (struct target_ops *))
827 return_zero);
828 de_fault (to_supports_enable_disable_tracepoint,
829 (int (*) (struct target_ops *))
830 return_zero);
831 de_fault (to_supports_string_tracing,
832 (int (*) (struct target_ops *))
833 return_zero);
834 de_fault (to_trace_init,
835 (void (*) (struct target_ops *))
836 tcomplain);
837 de_fault (to_download_tracepoint,
838 (void (*) (struct target_ops *, struct bp_location *))
839 tcomplain);
840 de_fault (to_can_download_tracepoint,
841 (int (*) (struct target_ops *))
842 return_zero);
843 de_fault (to_download_trace_state_variable,
844 (void (*) (struct target_ops *, struct trace_state_variable *))
845 tcomplain);
846 de_fault (to_enable_tracepoint,
847 (void (*) (struct target_ops *, struct bp_location *))
848 tcomplain);
849 de_fault (to_disable_tracepoint,
850 (void (*) (struct target_ops *, struct bp_location *))
851 tcomplain);
852 de_fault (to_trace_set_readonly_regions,
853 (void (*) (struct target_ops *))
854 tcomplain);
855 de_fault (to_trace_start,
856 (void (*) (struct target_ops *))
857 tcomplain);
858 de_fault (to_get_trace_status,
859 (int (*) (struct target_ops *, struct trace_status *))
860 return_minus_one);
861 de_fault (to_get_tracepoint_status,
862 (void (*) (struct target_ops *, struct breakpoint *,
863 struct uploaded_tp *))
864 tcomplain);
865 de_fault (to_trace_stop,
866 (void (*) (struct target_ops *))
867 tcomplain);
868 de_fault (to_trace_find,
869 (int (*) (struct target_ops *,
870 enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
871 return_minus_one);
872 de_fault (to_get_trace_state_variable_value,
873 (int (*) (struct target_ops *, int, LONGEST *))
874 return_zero);
875 de_fault (to_save_trace_data,
876 (int (*) (struct target_ops *, const char *))
877 tcomplain);
878 de_fault (to_upload_tracepoints,
879 (int (*) (struct target_ops *, struct uploaded_tp **))
880 return_zero);
881 de_fault (to_upload_trace_state_variables,
882 (int (*) (struct target_ops *, struct uploaded_tsv **))
883 return_zero);
884 de_fault (to_get_raw_trace_data,
885 (LONGEST (*) (struct target_ops *, gdb_byte *, ULONGEST, LONGEST))
886 tcomplain);
887 de_fault (to_get_min_fast_tracepoint_insn_len,
888 (int (*) (struct target_ops *))
889 return_minus_one);
890 de_fault (to_set_disconnected_tracing,
891 (void (*) (struct target_ops *, int))
892 target_ignore);
893 de_fault (to_set_circular_trace_buffer,
894 (void (*) (struct target_ops *, int))
895 target_ignore);
896 de_fault (to_set_trace_buffer_size,
897 (void (*) (struct target_ops *, LONGEST))
898 target_ignore);
899 de_fault (to_set_trace_notes,
900 (int (*) (struct target_ops *,
901 const char *, const char *, const char *))
902 return_zero);
903 de_fault (to_get_tib_address,
904 (int (*) (struct target_ops *, ptid_t, CORE_ADDR *))
905 tcomplain);
906 de_fault (to_set_permissions,
907 (void (*) (struct target_ops *))
908 target_ignore);
909 de_fault (to_static_tracepoint_marker_at,
910 (int (*) (struct target_ops *,
911 CORE_ADDR, struct static_tracepoint_marker *))
912 return_zero);
913 de_fault (to_static_tracepoint_markers_by_strid,
914 (VEC(static_tracepoint_marker_p) * (*) (struct target_ops *,
915 const char *))
916 tcomplain);
917 de_fault (to_traceframe_info,
918 (struct traceframe_info * (*) (struct target_ops *))
919 return_null);
920 de_fault (to_supports_evaluation_of_breakpoint_conditions,
921 (int (*) (struct target_ops *))
922 return_zero);
923 de_fault (to_can_run_breakpoint_commands,
924 (int (*) (struct target_ops *))
925 return_zero);
926 de_fault (to_use_agent,
927 (int (*) (struct target_ops *, int))
928 tcomplain);
929 de_fault (to_can_use_agent,
930 (int (*) (struct target_ops *))
931 return_zero);
932 de_fault (to_augmented_libraries_svr4_read,
933 (int (*) (struct target_ops *))
934 return_zero);
935 de_fault (to_execution_direction, default_execution_direction);
936
937 #undef de_fault
938
939 /* Finally, position the target-stack beneath the squashed
940 "current_target". That way code looking for a non-inherited
941 target method can quickly and simply find it. */
942 current_target.beneath = target_stack;
943
944 if (targetdebug)
945 setup_target_debug ();
946 }
947
948 /* Push a new target type into the stack of the existing target accessors,
949 possibly superseding some of the existing accessors.
950
951 Rather than allow an empty stack, we always have the dummy target at
952 the bottom stratum, so we can call the function vectors without
953 checking them. */
954
955 void
956 push_target (struct target_ops *t)
957 {
958 struct target_ops **cur;
959
960 /* Check magic number. If wrong, it probably means someone changed
961 the struct definition, but not all the places that initialize one. */
962 if (t->to_magic != OPS_MAGIC)
963 {
964 fprintf_unfiltered (gdb_stderr,
965 "Magic number of %s target struct wrong\n",
966 t->to_shortname);
967 internal_error (__FILE__, __LINE__,
968 _("failed internal consistency check"));
969 }
970
971 /* Find the proper stratum to install this target in. */
972 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
973 {
974 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
975 break;
976 }
977
978 /* If there's already targets at this stratum, remove them. */
979 /* FIXME: cagney/2003-10-15: I think this should be popping all
980 targets to CUR, and not just those at this stratum level. */
981 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
982 {
983 /* There's already something at this stratum level. Close it,
984 and un-hook it from the stack. */
985 struct target_ops *tmp = (*cur);
986
987 (*cur) = (*cur)->beneath;
988 tmp->beneath = NULL;
989 target_close (tmp);
990 }
991
992 /* We have removed all targets in our stratum, now add the new one. */
993 t->beneath = (*cur);
994 (*cur) = t;
995
996 update_current_target ();
997 }
998
999 /* Remove a target_ops vector from the stack, wherever it may be.
1000 Return how many times it was removed (0 or 1). */
1001
1002 int
1003 unpush_target (struct target_ops *t)
1004 {
1005 struct target_ops **cur;
1006 struct target_ops *tmp;
1007
1008 if (t->to_stratum == dummy_stratum)
1009 internal_error (__FILE__, __LINE__,
1010 _("Attempt to unpush the dummy target"));
1011
1012 /* Look for the specified target. Note that we assume that a target
1013 can only occur once in the target stack. */
1014
1015 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1016 {
1017 if ((*cur) == t)
1018 break;
1019 }
1020
1021 /* If we don't find target_ops, quit. Only open targets should be
1022 closed. */
1023 if ((*cur) == NULL)
1024 return 0;
1025
1026 /* Unchain the target. */
1027 tmp = (*cur);
1028 (*cur) = (*cur)->beneath;
1029 tmp->beneath = NULL;
1030
1031 update_current_target ();
1032
1033 /* Finally close the target. Note we do this after unchaining, so
1034 any target method calls from within the target_close
1035 implementation don't end up in T anymore. */
1036 target_close (t);
1037
1038 return 1;
1039 }
1040
1041 void
1042 pop_all_targets_above (enum strata above_stratum)
1043 {
1044 while ((int) (current_target.to_stratum) > (int) above_stratum)
1045 {
1046 if (!unpush_target (target_stack))
1047 {
1048 fprintf_unfiltered (gdb_stderr,
1049 "pop_all_targets couldn't find target %s\n",
1050 target_stack->to_shortname);
1051 internal_error (__FILE__, __LINE__,
1052 _("failed internal consistency check"));
1053 break;
1054 }
1055 }
1056 }
1057
1058 void
1059 pop_all_targets (void)
1060 {
1061 pop_all_targets_above (dummy_stratum);
1062 }
1063
1064 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1065
1066 int
1067 target_is_pushed (struct target_ops *t)
1068 {
1069 struct target_ops **cur;
1070
1071 /* Check magic number. If wrong, it probably means someone changed
1072 the struct definition, but not all the places that initialize one. */
1073 if (t->to_magic != OPS_MAGIC)
1074 {
1075 fprintf_unfiltered (gdb_stderr,
1076 "Magic number of %s target struct wrong\n",
1077 t->to_shortname);
1078 internal_error (__FILE__, __LINE__,
1079 _("failed internal consistency check"));
1080 }
1081
1082 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1083 if (*cur == t)
1084 return 1;
1085
1086 return 0;
1087 }
1088
1089 /* Using the objfile specified in OBJFILE, find the address for the
1090 current thread's thread-local storage with offset OFFSET. */
1091 CORE_ADDR
1092 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1093 {
1094 volatile CORE_ADDR addr = 0;
1095 struct target_ops *target;
1096
1097 for (target = current_target.beneath;
1098 target != NULL;
1099 target = target->beneath)
1100 {
1101 if (target->to_get_thread_local_address != NULL)
1102 break;
1103 }
1104
1105 if (target != NULL
1106 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1107 {
1108 ptid_t ptid = inferior_ptid;
1109 volatile struct gdb_exception ex;
1110
1111 TRY_CATCH (ex, RETURN_MASK_ALL)
1112 {
1113 CORE_ADDR lm_addr;
1114
1115 /* Fetch the load module address for this objfile. */
1116 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1117 objfile);
1118 /* If it's 0, throw the appropriate exception. */
1119 if (lm_addr == 0)
1120 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1121 _("TLS load module not found"));
1122
1123 addr = target->to_get_thread_local_address (target, ptid,
1124 lm_addr, offset);
1125 }
1126 /* If an error occurred, print TLS related messages here. Otherwise,
1127 throw the error to some higher catcher. */
1128 if (ex.reason < 0)
1129 {
1130 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1131
1132 switch (ex.error)
1133 {
1134 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1135 error (_("Cannot find thread-local variables "
1136 "in this thread library."));
1137 break;
1138 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1139 if (objfile_is_library)
1140 error (_("Cannot find shared library `%s' in dynamic"
1141 " linker's load module list"), objfile_name (objfile));
1142 else
1143 error (_("Cannot find executable file `%s' in dynamic"
1144 " linker's load module list"), objfile_name (objfile));
1145 break;
1146 case TLS_NOT_ALLOCATED_YET_ERROR:
1147 if (objfile_is_library)
1148 error (_("The inferior has not yet allocated storage for"
1149 " thread-local variables in\n"
1150 "the shared library `%s'\n"
1151 "for %s"),
1152 objfile_name (objfile), target_pid_to_str (ptid));
1153 else
1154 error (_("The inferior has not yet allocated storage for"
1155 " thread-local variables in\n"
1156 "the executable `%s'\n"
1157 "for %s"),
1158 objfile_name (objfile), target_pid_to_str (ptid));
1159 break;
1160 case TLS_GENERIC_ERROR:
1161 if (objfile_is_library)
1162 error (_("Cannot find thread-local storage for %s, "
1163 "shared library %s:\n%s"),
1164 target_pid_to_str (ptid),
1165 objfile_name (objfile), ex.message);
1166 else
1167 error (_("Cannot find thread-local storage for %s, "
1168 "executable file %s:\n%s"),
1169 target_pid_to_str (ptid),
1170 objfile_name (objfile), ex.message);
1171 break;
1172 default:
1173 throw_exception (ex);
1174 break;
1175 }
1176 }
1177 }
1178 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1179 TLS is an ABI-specific thing. But we don't do that yet. */
1180 else
1181 error (_("Cannot find thread-local variables on this target"));
1182
1183 return addr;
1184 }
1185
1186 const char *
1187 target_xfer_status_to_string (enum target_xfer_status err)
1188 {
1189 #define CASE(X) case X: return #X
1190 switch (err)
1191 {
1192 CASE(TARGET_XFER_E_IO);
1193 CASE(TARGET_XFER_E_UNAVAILABLE);
1194 default:
1195 return "<unknown>";
1196 }
1197 #undef CASE
1198 };
1199
1200
1201 #undef MIN
1202 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1203
1204 /* target_read_string -- read a null terminated string, up to LEN bytes,
1205 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1206 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1207 is responsible for freeing it. Return the number of bytes successfully
1208 read. */
1209
1210 int
1211 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1212 {
1213 int tlen, offset, i;
1214 gdb_byte buf[4];
1215 int errcode = 0;
1216 char *buffer;
1217 int buffer_allocated;
1218 char *bufptr;
1219 unsigned int nbytes_read = 0;
1220
1221 gdb_assert (string);
1222
1223 /* Small for testing. */
1224 buffer_allocated = 4;
1225 buffer = xmalloc (buffer_allocated);
1226 bufptr = buffer;
1227
1228 while (len > 0)
1229 {
1230 tlen = MIN (len, 4 - (memaddr & 3));
1231 offset = memaddr & 3;
1232
1233 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1234 if (errcode != 0)
1235 {
1236 /* The transfer request might have crossed the boundary to an
1237 unallocated region of memory. Retry the transfer, requesting
1238 a single byte. */
1239 tlen = 1;
1240 offset = 0;
1241 errcode = target_read_memory (memaddr, buf, 1);
1242 if (errcode != 0)
1243 goto done;
1244 }
1245
1246 if (bufptr - buffer + tlen > buffer_allocated)
1247 {
1248 unsigned int bytes;
1249
1250 bytes = bufptr - buffer;
1251 buffer_allocated *= 2;
1252 buffer = xrealloc (buffer, buffer_allocated);
1253 bufptr = buffer + bytes;
1254 }
1255
1256 for (i = 0; i < tlen; i++)
1257 {
1258 *bufptr++ = buf[i + offset];
1259 if (buf[i + offset] == '\000')
1260 {
1261 nbytes_read += i + 1;
1262 goto done;
1263 }
1264 }
1265
1266 memaddr += tlen;
1267 len -= tlen;
1268 nbytes_read += tlen;
1269 }
1270 done:
1271 *string = buffer;
1272 if (errnop != NULL)
1273 *errnop = errcode;
1274 return nbytes_read;
1275 }
1276
1277 struct target_section_table *
1278 target_get_section_table (struct target_ops *target)
1279 {
1280 struct target_ops *t;
1281
1282 if (targetdebug)
1283 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1284
1285 for (t = target; t != NULL; t = t->beneath)
1286 if (t->to_get_section_table != NULL)
1287 return (*t->to_get_section_table) (t);
1288
1289 return NULL;
1290 }
1291
1292 /* Find a section containing ADDR. */
1293
1294 struct target_section *
1295 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1296 {
1297 struct target_section_table *table = target_get_section_table (target);
1298 struct target_section *secp;
1299
1300 if (table == NULL)
1301 return NULL;
1302
1303 for (secp = table->sections; secp < table->sections_end; secp++)
1304 {
1305 if (addr >= secp->addr && addr < secp->endaddr)
1306 return secp;
1307 }
1308 return NULL;
1309 }
1310
1311 /* Read memory from the live target, even if currently inspecting a
1312 traceframe. The return is the same as that of target_read. */
1313
1314 static enum target_xfer_status
1315 target_read_live_memory (enum target_object object,
1316 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1317 ULONGEST *xfered_len)
1318 {
1319 enum target_xfer_status ret;
1320 struct cleanup *cleanup;
1321
1322 /* Switch momentarily out of tfind mode so to access live memory.
1323 Note that this must not clear global state, such as the frame
1324 cache, which must still remain valid for the previous traceframe.
1325 We may be _building_ the frame cache at this point. */
1326 cleanup = make_cleanup_restore_traceframe_number ();
1327 set_traceframe_number (-1);
1328
1329 ret = target_xfer_partial (current_target.beneath, object, NULL,
1330 myaddr, NULL, memaddr, len, xfered_len);
1331
1332 do_cleanups (cleanup);
1333 return ret;
1334 }
1335
1336 /* Using the set of read-only target sections of OPS, read live
1337 read-only memory. Note that the actual reads start from the
1338 top-most target again.
1339
1340 For interface/parameters/return description see target.h,
1341 to_xfer_partial. */
1342
1343 static enum target_xfer_status
1344 memory_xfer_live_readonly_partial (struct target_ops *ops,
1345 enum target_object object,
1346 gdb_byte *readbuf, ULONGEST memaddr,
1347 ULONGEST len, ULONGEST *xfered_len)
1348 {
1349 struct target_section *secp;
1350 struct target_section_table *table;
1351
1352 secp = target_section_by_addr (ops, memaddr);
1353 if (secp != NULL
1354 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1355 secp->the_bfd_section)
1356 & SEC_READONLY))
1357 {
1358 struct target_section *p;
1359 ULONGEST memend = memaddr + len;
1360
1361 table = target_get_section_table (ops);
1362
1363 for (p = table->sections; p < table->sections_end; p++)
1364 {
1365 if (memaddr >= p->addr)
1366 {
1367 if (memend <= p->endaddr)
1368 {
1369 /* Entire transfer is within this section. */
1370 return target_read_live_memory (object, memaddr,
1371 readbuf, len, xfered_len);
1372 }
1373 else if (memaddr >= p->endaddr)
1374 {
1375 /* This section ends before the transfer starts. */
1376 continue;
1377 }
1378 else
1379 {
1380 /* This section overlaps the transfer. Just do half. */
1381 len = p->endaddr - memaddr;
1382 return target_read_live_memory (object, memaddr,
1383 readbuf, len, xfered_len);
1384 }
1385 }
1386 }
1387 }
1388
1389 return TARGET_XFER_EOF;
1390 }
1391
1392 /* Read memory from more than one valid target. A core file, for
1393 instance, could have some of memory but delegate other bits to
1394 the target below it. So, we must manually try all targets. */
1395
1396 static enum target_xfer_status
1397 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1398 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1399 ULONGEST *xfered_len)
1400 {
1401 enum target_xfer_status res;
1402
1403 do
1404 {
1405 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1406 readbuf, writebuf, memaddr, len,
1407 xfered_len);
1408 if (res == TARGET_XFER_OK)
1409 break;
1410
1411 /* Stop if the target reports that the memory is not available. */
1412 if (res == TARGET_XFER_E_UNAVAILABLE)
1413 break;
1414
1415 /* We want to continue past core files to executables, but not
1416 past a running target's memory. */
1417 if (ops->to_has_all_memory (ops))
1418 break;
1419
1420 ops = ops->beneath;
1421 }
1422 while (ops != NULL);
1423
1424 return res;
1425 }
1426
1427 /* Perform a partial memory transfer.
1428 For docs see target.h, to_xfer_partial. */
1429
1430 static enum target_xfer_status
1431 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1432 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1433 ULONGEST len, ULONGEST *xfered_len)
1434 {
1435 enum target_xfer_status res;
1436 int reg_len;
1437 struct mem_region *region;
1438 struct inferior *inf;
1439
1440 /* For accesses to unmapped overlay sections, read directly from
1441 files. Must do this first, as MEMADDR may need adjustment. */
1442 if (readbuf != NULL && overlay_debugging)
1443 {
1444 struct obj_section *section = find_pc_overlay (memaddr);
1445
1446 if (pc_in_unmapped_range (memaddr, section))
1447 {
1448 struct target_section_table *table
1449 = target_get_section_table (ops);
1450 const char *section_name = section->the_bfd_section->name;
1451
1452 memaddr = overlay_mapped_address (memaddr, section);
1453 return section_table_xfer_memory_partial (readbuf, writebuf,
1454 memaddr, len, xfered_len,
1455 table->sections,
1456 table->sections_end,
1457 section_name);
1458 }
1459 }
1460
1461 /* Try the executable files, if "trust-readonly-sections" is set. */
1462 if (readbuf != NULL && trust_readonly)
1463 {
1464 struct target_section *secp;
1465 struct target_section_table *table;
1466
1467 secp = target_section_by_addr (ops, memaddr);
1468 if (secp != NULL
1469 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1470 secp->the_bfd_section)
1471 & SEC_READONLY))
1472 {
1473 table = target_get_section_table (ops);
1474 return section_table_xfer_memory_partial (readbuf, writebuf,
1475 memaddr, len, xfered_len,
1476 table->sections,
1477 table->sections_end,
1478 NULL);
1479 }
1480 }
1481
1482 /* If reading unavailable memory in the context of traceframes, and
1483 this address falls within a read-only section, fallback to
1484 reading from live memory. */
1485 if (readbuf != NULL && get_traceframe_number () != -1)
1486 {
1487 VEC(mem_range_s) *available;
1488
1489 /* If we fail to get the set of available memory, then the
1490 target does not support querying traceframe info, and so we
1491 attempt reading from the traceframe anyway (assuming the
1492 target implements the old QTro packet then). */
1493 if (traceframe_available_memory (&available, memaddr, len))
1494 {
1495 struct cleanup *old_chain;
1496
1497 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1498
1499 if (VEC_empty (mem_range_s, available)
1500 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1501 {
1502 /* Don't read into the traceframe's available
1503 memory. */
1504 if (!VEC_empty (mem_range_s, available))
1505 {
1506 LONGEST oldlen = len;
1507
1508 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1509 gdb_assert (len <= oldlen);
1510 }
1511
1512 do_cleanups (old_chain);
1513
1514 /* This goes through the topmost target again. */
1515 res = memory_xfer_live_readonly_partial (ops, object,
1516 readbuf, memaddr,
1517 len, xfered_len);
1518 if (res == TARGET_XFER_OK)
1519 return TARGET_XFER_OK;
1520 else
1521 {
1522 /* No use trying further, we know some memory starting
1523 at MEMADDR isn't available. */
1524 *xfered_len = len;
1525 return TARGET_XFER_E_UNAVAILABLE;
1526 }
1527 }
1528
1529 /* Don't try to read more than how much is available, in
1530 case the target implements the deprecated QTro packet to
1531 cater for older GDBs (the target's knowledge of read-only
1532 sections may be outdated by now). */
1533 len = VEC_index (mem_range_s, available, 0)->length;
1534
1535 do_cleanups (old_chain);
1536 }
1537 }
1538
1539 /* Try GDB's internal data cache. */
1540 region = lookup_mem_region (memaddr);
1541 /* region->hi == 0 means there's no upper bound. */
1542 if (memaddr + len < region->hi || region->hi == 0)
1543 reg_len = len;
1544 else
1545 reg_len = region->hi - memaddr;
1546
1547 switch (region->attrib.mode)
1548 {
1549 case MEM_RO:
1550 if (writebuf != NULL)
1551 return TARGET_XFER_E_IO;
1552 break;
1553
1554 case MEM_WO:
1555 if (readbuf != NULL)
1556 return TARGET_XFER_E_IO;
1557 break;
1558
1559 case MEM_FLASH:
1560 /* We only support writing to flash during "load" for now. */
1561 if (writebuf != NULL)
1562 error (_("Writing to flash memory forbidden in this context"));
1563 break;
1564
1565 case MEM_NONE:
1566 return TARGET_XFER_E_IO;
1567 }
1568
1569 if (!ptid_equal (inferior_ptid, null_ptid))
1570 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1571 else
1572 inf = NULL;
1573
1574 if (inf != NULL
1575 /* The dcache reads whole cache lines; that doesn't play well
1576 with reading from a trace buffer, because reading outside of
1577 the collected memory range fails. */
1578 && get_traceframe_number () == -1
1579 && (region->attrib.cache
1580 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1581 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1582 {
1583 DCACHE *dcache = target_dcache_get_or_init ();
1584 int l;
1585
1586 if (readbuf != NULL)
1587 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1588 else
1589 /* FIXME drow/2006-08-09: If we're going to preserve const
1590 correctness dcache_xfer_memory should take readbuf and
1591 writebuf. */
1592 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1593 reg_len, 1);
1594 if (l <= 0)
1595 return TARGET_XFER_E_IO;
1596 else
1597 {
1598 *xfered_len = (ULONGEST) l;
1599 return TARGET_XFER_OK;
1600 }
1601 }
1602
1603 /* If none of those methods found the memory we wanted, fall back
1604 to a target partial transfer. Normally a single call to
1605 to_xfer_partial is enough; if it doesn't recognize an object
1606 it will call the to_xfer_partial of the next target down.
1607 But for memory this won't do. Memory is the only target
1608 object which can be read from more than one valid target.
1609 A core file, for instance, could have some of memory but
1610 delegate other bits to the target below it. So, we must
1611 manually try all targets. */
1612
1613 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1614 xfered_len);
1615
1616 /* Make sure the cache gets updated no matter what - if we are writing
1617 to the stack. Even if this write is not tagged as such, we still need
1618 to update the cache. */
1619
1620 if (res == TARGET_XFER_OK
1621 && inf != NULL
1622 && writebuf != NULL
1623 && target_dcache_init_p ()
1624 && !region->attrib.cache
1625 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1626 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1627 {
1628 DCACHE *dcache = target_dcache_get ();
1629
1630 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1631 }
1632
1633 /* If we still haven't got anything, return the last error. We
1634 give up. */
1635 return res;
1636 }
1637
1638 /* Perform a partial memory transfer. For docs see target.h,
1639 to_xfer_partial. */
1640
1641 static enum target_xfer_status
1642 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1643 gdb_byte *readbuf, const gdb_byte *writebuf,
1644 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1645 {
1646 enum target_xfer_status res;
1647
1648 /* Zero length requests are ok and require no work. */
1649 if (len == 0)
1650 return TARGET_XFER_EOF;
1651
1652 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1653 breakpoint insns, thus hiding out from higher layers whether
1654 there are software breakpoints inserted in the code stream. */
1655 if (readbuf != NULL)
1656 {
1657 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1658 xfered_len);
1659
1660 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1661 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1662 }
1663 else
1664 {
1665 void *buf;
1666 struct cleanup *old_chain;
1667
1668 /* A large write request is likely to be partially satisfied
1669 by memory_xfer_partial_1. We will continually malloc
1670 and free a copy of the entire write request for breakpoint
1671 shadow handling even though we only end up writing a small
1672 subset of it. Cap writes to 4KB to mitigate this. */
1673 len = min (4096, len);
1674
1675 buf = xmalloc (len);
1676 old_chain = make_cleanup (xfree, buf);
1677 memcpy (buf, writebuf, len);
1678
1679 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1680 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1681 xfered_len);
1682
1683 do_cleanups (old_chain);
1684 }
1685
1686 return res;
1687 }
1688
1689 static void
1690 restore_show_memory_breakpoints (void *arg)
1691 {
1692 show_memory_breakpoints = (uintptr_t) arg;
1693 }
1694
1695 struct cleanup *
1696 make_show_memory_breakpoints_cleanup (int show)
1697 {
1698 int current = show_memory_breakpoints;
1699
1700 show_memory_breakpoints = show;
1701 return make_cleanup (restore_show_memory_breakpoints,
1702 (void *) (uintptr_t) current);
1703 }
1704
1705 /* For docs see target.h, to_xfer_partial. */
1706
1707 enum target_xfer_status
1708 target_xfer_partial (struct target_ops *ops,
1709 enum target_object object, const char *annex,
1710 gdb_byte *readbuf, const gdb_byte *writebuf,
1711 ULONGEST offset, ULONGEST len,
1712 ULONGEST *xfered_len)
1713 {
1714 enum target_xfer_status retval;
1715
1716 gdb_assert (ops->to_xfer_partial != NULL);
1717
1718 /* Transfer is done when LEN is zero. */
1719 if (len == 0)
1720 return TARGET_XFER_EOF;
1721
1722 if (writebuf && !may_write_memory)
1723 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1724 core_addr_to_string_nz (offset), plongest (len));
1725
1726 *xfered_len = 0;
1727
1728 /* If this is a memory transfer, let the memory-specific code
1729 have a look at it instead. Memory transfers are more
1730 complicated. */
1731 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1732 || object == TARGET_OBJECT_CODE_MEMORY)
1733 retval = memory_xfer_partial (ops, object, readbuf,
1734 writebuf, offset, len, xfered_len);
1735 else if (object == TARGET_OBJECT_RAW_MEMORY)
1736 {
1737 /* Request the normal memory object from other layers. */
1738 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1739 xfered_len);
1740 }
1741 else
1742 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1743 writebuf, offset, len, xfered_len);
1744
1745 if (targetdebug)
1746 {
1747 const unsigned char *myaddr = NULL;
1748
1749 fprintf_unfiltered (gdb_stdlog,
1750 "%s:target_xfer_partial "
1751 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1752 ops->to_shortname,
1753 (int) object,
1754 (annex ? annex : "(null)"),
1755 host_address_to_string (readbuf),
1756 host_address_to_string (writebuf),
1757 core_addr_to_string_nz (offset),
1758 pulongest (len), retval,
1759 pulongest (*xfered_len));
1760
1761 if (readbuf)
1762 myaddr = readbuf;
1763 if (writebuf)
1764 myaddr = writebuf;
1765 if (retval == TARGET_XFER_OK && myaddr != NULL)
1766 {
1767 int i;
1768
1769 fputs_unfiltered (", bytes =", gdb_stdlog);
1770 for (i = 0; i < *xfered_len; i++)
1771 {
1772 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1773 {
1774 if (targetdebug < 2 && i > 0)
1775 {
1776 fprintf_unfiltered (gdb_stdlog, " ...");
1777 break;
1778 }
1779 fprintf_unfiltered (gdb_stdlog, "\n");
1780 }
1781
1782 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1783 }
1784 }
1785
1786 fputc_unfiltered ('\n', gdb_stdlog);
1787 }
1788
1789 /* Check implementations of to_xfer_partial update *XFERED_LEN
1790 properly. Do assertion after printing debug messages, so that we
1791 can find more clues on assertion failure from debugging messages. */
1792 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1793 gdb_assert (*xfered_len > 0);
1794
1795 return retval;
1796 }
1797
1798 /* Read LEN bytes of target memory at address MEMADDR, placing the
1799 results in GDB's memory at MYADDR. Returns either 0 for success or
1800 TARGET_XFER_E_IO if any error occurs.
1801
1802 If an error occurs, no guarantee is made about the contents of the data at
1803 MYADDR. In particular, the caller should not depend upon partial reads
1804 filling the buffer with good data. There is no way for the caller to know
1805 how much good data might have been transfered anyway. Callers that can
1806 deal with partial reads should call target_read (which will retry until
1807 it makes no progress, and then return how much was transferred). */
1808
1809 int
1810 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1811 {
1812 /* Dispatch to the topmost target, not the flattened current_target.
1813 Memory accesses check target->to_has_(all_)memory, and the
1814 flattened target doesn't inherit those. */
1815 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1816 myaddr, memaddr, len) == len)
1817 return 0;
1818 else
1819 return TARGET_XFER_E_IO;
1820 }
1821
1822 /* Like target_read_memory, but specify explicitly that this is a read
1823 from the target's raw memory. That is, this read bypasses the
1824 dcache, breakpoint shadowing, etc. */
1825
1826 int
1827 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1828 {
1829 /* See comment in target_read_memory about why the request starts at
1830 current_target.beneath. */
1831 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1832 myaddr, memaddr, len) == len)
1833 return 0;
1834 else
1835 return TARGET_XFER_E_IO;
1836 }
1837
1838 /* Like target_read_memory, but specify explicitly that this is a read from
1839 the target's stack. This may trigger different cache behavior. */
1840
1841 int
1842 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1843 {
1844 /* See comment in target_read_memory about why the request starts at
1845 current_target.beneath. */
1846 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1847 myaddr, memaddr, len) == len)
1848 return 0;
1849 else
1850 return TARGET_XFER_E_IO;
1851 }
1852
1853 /* Like target_read_memory, but specify explicitly that this is a read from
1854 the target's code. This may trigger different cache behavior. */
1855
1856 int
1857 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1858 {
1859 /* See comment in target_read_memory about why the request starts at
1860 current_target.beneath. */
1861 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1862 myaddr, memaddr, len) == len)
1863 return 0;
1864 else
1865 return TARGET_XFER_E_IO;
1866 }
1867
1868 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1869 Returns either 0 for success or TARGET_XFER_E_IO if any
1870 error occurs. If an error occurs, no guarantee is made about how
1871 much data got written. Callers that can deal with partial writes
1872 should call target_write. */
1873
1874 int
1875 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1876 {
1877 /* See comment in target_read_memory about why the request starts at
1878 current_target.beneath. */
1879 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1880 myaddr, memaddr, len) == len)
1881 return 0;
1882 else
1883 return TARGET_XFER_E_IO;
1884 }
1885
1886 /* Write LEN bytes from MYADDR to target raw memory at address
1887 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1888 if any error occurs. If an error occurs, no guarantee is made
1889 about how much data got written. Callers that can deal with
1890 partial writes should call target_write. */
1891
1892 int
1893 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1894 {
1895 /* See comment in target_read_memory about why the request starts at
1896 current_target.beneath. */
1897 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1898 myaddr, memaddr, len) == len)
1899 return 0;
1900 else
1901 return TARGET_XFER_E_IO;
1902 }
1903
1904 /* Fetch the target's memory map. */
1905
1906 VEC(mem_region_s) *
1907 target_memory_map (void)
1908 {
1909 VEC(mem_region_s) *result;
1910 struct mem_region *last_one, *this_one;
1911 int ix;
1912 struct target_ops *t;
1913
1914 if (targetdebug)
1915 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1916
1917 for (t = current_target.beneath; t != NULL; t = t->beneath)
1918 if (t->to_memory_map != NULL)
1919 break;
1920
1921 if (t == NULL)
1922 return NULL;
1923
1924 result = t->to_memory_map (t);
1925 if (result == NULL)
1926 return NULL;
1927
1928 qsort (VEC_address (mem_region_s, result),
1929 VEC_length (mem_region_s, result),
1930 sizeof (struct mem_region), mem_region_cmp);
1931
1932 /* Check that regions do not overlap. Simultaneously assign
1933 a numbering for the "mem" commands to use to refer to
1934 each region. */
1935 last_one = NULL;
1936 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1937 {
1938 this_one->number = ix;
1939
1940 if (last_one && last_one->hi > this_one->lo)
1941 {
1942 warning (_("Overlapping regions in memory map: ignoring"));
1943 VEC_free (mem_region_s, result);
1944 return NULL;
1945 }
1946 last_one = this_one;
1947 }
1948
1949 return result;
1950 }
1951
1952 void
1953 target_flash_erase (ULONGEST address, LONGEST length)
1954 {
1955 struct target_ops *t;
1956
1957 for (t = current_target.beneath; t != NULL; t = t->beneath)
1958 if (t->to_flash_erase != NULL)
1959 {
1960 if (targetdebug)
1961 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1962 hex_string (address), phex (length, 0));
1963 t->to_flash_erase (t, address, length);
1964 return;
1965 }
1966
1967 tcomplain ();
1968 }
1969
1970 void
1971 target_flash_done (void)
1972 {
1973 struct target_ops *t;
1974
1975 for (t = current_target.beneath; t != NULL; t = t->beneath)
1976 if (t->to_flash_done != NULL)
1977 {
1978 if (targetdebug)
1979 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1980 t->to_flash_done (t);
1981 return;
1982 }
1983
1984 tcomplain ();
1985 }
1986
1987 static void
1988 show_trust_readonly (struct ui_file *file, int from_tty,
1989 struct cmd_list_element *c, const char *value)
1990 {
1991 fprintf_filtered (file,
1992 _("Mode for reading from readonly sections is %s.\n"),
1993 value);
1994 }
1995
1996 /* More generic transfers. */
1997
1998 static enum target_xfer_status
1999 default_xfer_partial (struct target_ops *ops, enum target_object object,
2000 const char *annex, gdb_byte *readbuf,
2001 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
2002 ULONGEST *xfered_len)
2003 {
2004 if (object == TARGET_OBJECT_MEMORY
2005 && ops->deprecated_xfer_memory != NULL)
2006 /* If available, fall back to the target's
2007 "deprecated_xfer_memory" method. */
2008 {
2009 int xfered = -1;
2010
2011 errno = 0;
2012 if (writebuf != NULL)
2013 {
2014 void *buffer = xmalloc (len);
2015 struct cleanup *cleanup = make_cleanup (xfree, buffer);
2016
2017 memcpy (buffer, writebuf, len);
2018 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
2019 1/*write*/, NULL, ops);
2020 do_cleanups (cleanup);
2021 }
2022 if (readbuf != NULL)
2023 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
2024 0/*read*/, NULL, ops);
2025 if (xfered > 0)
2026 {
2027 *xfered_len = (ULONGEST) xfered;
2028 return TARGET_XFER_E_IO;
2029 }
2030 else if (xfered == 0 && errno == 0)
2031 /* "deprecated_xfer_memory" uses 0, cross checked against
2032 ERRNO as one indication of an error. */
2033 return TARGET_XFER_EOF;
2034 else
2035 return TARGET_XFER_E_IO;
2036 }
2037 else
2038 {
2039 gdb_assert (ops->beneath != NULL);
2040 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
2041 readbuf, writebuf, offset, len,
2042 xfered_len);
2043 }
2044 }
2045
2046 /* Target vector read/write partial wrapper functions. */
2047
2048 static enum target_xfer_status
2049 target_read_partial (struct target_ops *ops,
2050 enum target_object object,
2051 const char *annex, gdb_byte *buf,
2052 ULONGEST offset, ULONGEST len,
2053 ULONGEST *xfered_len)
2054 {
2055 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
2056 xfered_len);
2057 }
2058
2059 static enum target_xfer_status
2060 target_write_partial (struct target_ops *ops,
2061 enum target_object object,
2062 const char *annex, const gdb_byte *buf,
2063 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
2064 {
2065 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
2066 xfered_len);
2067 }
2068
2069 /* Wrappers to perform the full transfer. */
2070
2071 /* For docs on target_read see target.h. */
2072
2073 LONGEST
2074 target_read (struct target_ops *ops,
2075 enum target_object object,
2076 const char *annex, gdb_byte *buf,
2077 ULONGEST offset, LONGEST len)
2078 {
2079 LONGEST xfered = 0;
2080
2081 while (xfered < len)
2082 {
2083 ULONGEST xfered_len;
2084 enum target_xfer_status status;
2085
2086 status = target_read_partial (ops, object, annex,
2087 (gdb_byte *) buf + xfered,
2088 offset + xfered, len - xfered,
2089 &xfered_len);
2090
2091 /* Call an observer, notifying them of the xfer progress? */
2092 if (status == TARGET_XFER_EOF)
2093 return xfered;
2094 else if (status == TARGET_XFER_OK)
2095 {
2096 xfered += xfered_len;
2097 QUIT;
2098 }
2099 else
2100 return -1;
2101
2102 }
2103 return len;
2104 }
2105
2106 /* Assuming that the entire [begin, end) range of memory cannot be
2107 read, try to read whatever subrange is possible to read.
2108
2109 The function returns, in RESULT, either zero or one memory block.
2110 If there's a readable subrange at the beginning, it is completely
2111 read and returned. Any further readable subrange will not be read.
2112 Otherwise, if there's a readable subrange at the end, it will be
2113 completely read and returned. Any readable subranges before it
2114 (obviously, not starting at the beginning), will be ignored. In
2115 other cases -- either no readable subrange, or readable subrange(s)
2116 that is neither at the beginning, or end, nothing is returned.
2117
2118 The purpose of this function is to handle a read across a boundary
2119 of accessible memory in a case when memory map is not available.
2120 The above restrictions are fine for this case, but will give
2121 incorrect results if the memory is 'patchy'. However, supporting
2122 'patchy' memory would require trying to read every single byte,
2123 and it seems unacceptable solution. Explicit memory map is
2124 recommended for this case -- and target_read_memory_robust will
2125 take care of reading multiple ranges then. */
2126
2127 static void
2128 read_whatever_is_readable (struct target_ops *ops,
2129 ULONGEST begin, ULONGEST end,
2130 VEC(memory_read_result_s) **result)
2131 {
2132 gdb_byte *buf = xmalloc (end - begin);
2133 ULONGEST current_begin = begin;
2134 ULONGEST current_end = end;
2135 int forward;
2136 memory_read_result_s r;
2137 ULONGEST xfered_len;
2138
2139 /* If we previously failed to read 1 byte, nothing can be done here. */
2140 if (end - begin <= 1)
2141 {
2142 xfree (buf);
2143 return;
2144 }
2145
2146 /* Check that either first or the last byte is readable, and give up
2147 if not. This heuristic is meant to permit reading accessible memory
2148 at the boundary of accessible region. */
2149 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2150 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
2151 {
2152 forward = 1;
2153 ++current_begin;
2154 }
2155 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2156 buf + (end-begin) - 1, end - 1, 1,
2157 &xfered_len) == TARGET_XFER_OK)
2158 {
2159 forward = 0;
2160 --current_end;
2161 }
2162 else
2163 {
2164 xfree (buf);
2165 return;
2166 }
2167
2168 /* Loop invariant is that the [current_begin, current_end) was previously
2169 found to be not readable as a whole.
2170
2171 Note loop condition -- if the range has 1 byte, we can't divide the range
2172 so there's no point trying further. */
2173 while (current_end - current_begin > 1)
2174 {
2175 ULONGEST first_half_begin, first_half_end;
2176 ULONGEST second_half_begin, second_half_end;
2177 LONGEST xfer;
2178 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2179
2180 if (forward)
2181 {
2182 first_half_begin = current_begin;
2183 first_half_end = middle;
2184 second_half_begin = middle;
2185 second_half_end = current_end;
2186 }
2187 else
2188 {
2189 first_half_begin = middle;
2190 first_half_end = current_end;
2191 second_half_begin = current_begin;
2192 second_half_end = middle;
2193 }
2194
2195 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2196 buf + (first_half_begin - begin),
2197 first_half_begin,
2198 first_half_end - first_half_begin);
2199
2200 if (xfer == first_half_end - first_half_begin)
2201 {
2202 /* This half reads up fine. So, the error must be in the
2203 other half. */
2204 current_begin = second_half_begin;
2205 current_end = second_half_end;
2206 }
2207 else
2208 {
2209 /* This half is not readable. Because we've tried one byte, we
2210 know some part of this half if actually redable. Go to the next
2211 iteration to divide again and try to read.
2212
2213 We don't handle the other half, because this function only tries
2214 to read a single readable subrange. */
2215 current_begin = first_half_begin;
2216 current_end = first_half_end;
2217 }
2218 }
2219
2220 if (forward)
2221 {
2222 /* The [begin, current_begin) range has been read. */
2223 r.begin = begin;
2224 r.end = current_begin;
2225 r.data = buf;
2226 }
2227 else
2228 {
2229 /* The [current_end, end) range has been read. */
2230 LONGEST rlen = end - current_end;
2231
2232 r.data = xmalloc (rlen);
2233 memcpy (r.data, buf + current_end - begin, rlen);
2234 r.begin = current_end;
2235 r.end = end;
2236 xfree (buf);
2237 }
2238 VEC_safe_push(memory_read_result_s, (*result), &r);
2239 }
2240
2241 void
2242 free_memory_read_result_vector (void *x)
2243 {
2244 VEC(memory_read_result_s) *v = x;
2245 memory_read_result_s *current;
2246 int ix;
2247
2248 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2249 {
2250 xfree (current->data);
2251 }
2252 VEC_free (memory_read_result_s, v);
2253 }
2254
2255 VEC(memory_read_result_s) *
2256 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2257 {
2258 VEC(memory_read_result_s) *result = 0;
2259
2260 LONGEST xfered = 0;
2261 while (xfered < len)
2262 {
2263 struct mem_region *region = lookup_mem_region (offset + xfered);
2264 LONGEST rlen;
2265
2266 /* If there is no explicit region, a fake one should be created. */
2267 gdb_assert (region);
2268
2269 if (region->hi == 0)
2270 rlen = len - xfered;
2271 else
2272 rlen = region->hi - offset;
2273
2274 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2275 {
2276 /* Cannot read this region. Note that we can end up here only
2277 if the region is explicitly marked inaccessible, or
2278 'inaccessible-by-default' is in effect. */
2279 xfered += rlen;
2280 }
2281 else
2282 {
2283 LONGEST to_read = min (len - xfered, rlen);
2284 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2285
2286 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2287 (gdb_byte *) buffer,
2288 offset + xfered, to_read);
2289 /* Call an observer, notifying them of the xfer progress? */
2290 if (xfer <= 0)
2291 {
2292 /* Got an error reading full chunk. See if maybe we can read
2293 some subrange. */
2294 xfree (buffer);
2295 read_whatever_is_readable (ops, offset + xfered,
2296 offset + xfered + to_read, &result);
2297 xfered += to_read;
2298 }
2299 else
2300 {
2301 struct memory_read_result r;
2302 r.data = buffer;
2303 r.begin = offset + xfered;
2304 r.end = r.begin + xfer;
2305 VEC_safe_push (memory_read_result_s, result, &r);
2306 xfered += xfer;
2307 }
2308 QUIT;
2309 }
2310 }
2311 return result;
2312 }
2313
2314
2315 /* An alternative to target_write with progress callbacks. */
2316
2317 LONGEST
2318 target_write_with_progress (struct target_ops *ops,
2319 enum target_object object,
2320 const char *annex, const gdb_byte *buf,
2321 ULONGEST offset, LONGEST len,
2322 void (*progress) (ULONGEST, void *), void *baton)
2323 {
2324 LONGEST xfered = 0;
2325
2326 /* Give the progress callback a chance to set up. */
2327 if (progress)
2328 (*progress) (0, baton);
2329
2330 while (xfered < len)
2331 {
2332 ULONGEST xfered_len;
2333 enum target_xfer_status status;
2334
2335 status = target_write_partial (ops, object, annex,
2336 (gdb_byte *) buf + xfered,
2337 offset + xfered, len - xfered,
2338 &xfered_len);
2339
2340 if (status == TARGET_XFER_EOF)
2341 return xfered;
2342 if (TARGET_XFER_STATUS_ERROR_P (status))
2343 return -1;
2344
2345 gdb_assert (status == TARGET_XFER_OK);
2346 if (progress)
2347 (*progress) (xfered_len, baton);
2348
2349 xfered += xfered_len;
2350 QUIT;
2351 }
2352 return len;
2353 }
2354
2355 /* For docs on target_write see target.h. */
2356
2357 LONGEST
2358 target_write (struct target_ops *ops,
2359 enum target_object object,
2360 const char *annex, const gdb_byte *buf,
2361 ULONGEST offset, LONGEST len)
2362 {
2363 return target_write_with_progress (ops, object, annex, buf, offset, len,
2364 NULL, NULL);
2365 }
2366
2367 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2368 the size of the transferred data. PADDING additional bytes are
2369 available in *BUF_P. This is a helper function for
2370 target_read_alloc; see the declaration of that function for more
2371 information. */
2372
2373 static LONGEST
2374 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2375 const char *annex, gdb_byte **buf_p, int padding)
2376 {
2377 size_t buf_alloc, buf_pos;
2378 gdb_byte *buf;
2379
2380 /* This function does not have a length parameter; it reads the
2381 entire OBJECT). Also, it doesn't support objects fetched partly
2382 from one target and partly from another (in a different stratum,
2383 e.g. a core file and an executable). Both reasons make it
2384 unsuitable for reading memory. */
2385 gdb_assert (object != TARGET_OBJECT_MEMORY);
2386
2387 /* Start by reading up to 4K at a time. The target will throttle
2388 this number down if necessary. */
2389 buf_alloc = 4096;
2390 buf = xmalloc (buf_alloc);
2391 buf_pos = 0;
2392 while (1)
2393 {
2394 ULONGEST xfered_len;
2395 enum target_xfer_status status;
2396
2397 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2398 buf_pos, buf_alloc - buf_pos - padding,
2399 &xfered_len);
2400
2401 if (status == TARGET_XFER_EOF)
2402 {
2403 /* Read all there was. */
2404 if (buf_pos == 0)
2405 xfree (buf);
2406 else
2407 *buf_p = buf;
2408 return buf_pos;
2409 }
2410 else if (status != TARGET_XFER_OK)
2411 {
2412 /* An error occurred. */
2413 xfree (buf);
2414 return TARGET_XFER_E_IO;
2415 }
2416
2417 buf_pos += xfered_len;
2418
2419 /* If the buffer is filling up, expand it. */
2420 if (buf_alloc < buf_pos * 2)
2421 {
2422 buf_alloc *= 2;
2423 buf = xrealloc (buf, buf_alloc);
2424 }
2425
2426 QUIT;
2427 }
2428 }
2429
2430 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2431 the size of the transferred data. See the declaration in "target.h"
2432 function for more information about the return value. */
2433
2434 LONGEST
2435 target_read_alloc (struct target_ops *ops, enum target_object object,
2436 const char *annex, gdb_byte **buf_p)
2437 {
2438 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2439 }
2440
2441 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2442 returned as a string, allocated using xmalloc. If an error occurs
2443 or the transfer is unsupported, NULL is returned. Empty objects
2444 are returned as allocated but empty strings. A warning is issued
2445 if the result contains any embedded NUL bytes. */
2446
2447 char *
2448 target_read_stralloc (struct target_ops *ops, enum target_object object,
2449 const char *annex)
2450 {
2451 gdb_byte *buffer;
2452 char *bufstr;
2453 LONGEST i, transferred;
2454
2455 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2456 bufstr = (char *) buffer;
2457
2458 if (transferred < 0)
2459 return NULL;
2460
2461 if (transferred == 0)
2462 return xstrdup ("");
2463
2464 bufstr[transferred] = 0;
2465
2466 /* Check for embedded NUL bytes; but allow trailing NULs. */
2467 for (i = strlen (bufstr); i < transferred; i++)
2468 if (bufstr[i] != 0)
2469 {
2470 warning (_("target object %d, annex %s, "
2471 "contained unexpected null characters"),
2472 (int) object, annex ? annex : "(none)");
2473 break;
2474 }
2475
2476 return bufstr;
2477 }
2478
2479 /* Memory transfer methods. */
2480
2481 void
2482 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2483 LONGEST len)
2484 {
2485 /* This method is used to read from an alternate, non-current
2486 target. This read must bypass the overlay support (as symbols
2487 don't match this target), and GDB's internal cache (wrong cache
2488 for this target). */
2489 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2490 != len)
2491 memory_error (TARGET_XFER_E_IO, addr);
2492 }
2493
2494 ULONGEST
2495 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2496 int len, enum bfd_endian byte_order)
2497 {
2498 gdb_byte buf[sizeof (ULONGEST)];
2499
2500 gdb_assert (len <= sizeof (buf));
2501 get_target_memory (ops, addr, buf, len);
2502 return extract_unsigned_integer (buf, len, byte_order);
2503 }
2504
2505 /* See target.h. */
2506
2507 int
2508 target_insert_breakpoint (struct gdbarch *gdbarch,
2509 struct bp_target_info *bp_tgt)
2510 {
2511 if (!may_insert_breakpoints)
2512 {
2513 warning (_("May not insert breakpoints"));
2514 return 1;
2515 }
2516
2517 return current_target.to_insert_breakpoint (&current_target,
2518 gdbarch, bp_tgt);
2519 }
2520
2521 /* See target.h. */
2522
2523 int
2524 target_remove_breakpoint (struct gdbarch *gdbarch,
2525 struct bp_target_info *bp_tgt)
2526 {
2527 /* This is kind of a weird case to handle, but the permission might
2528 have been changed after breakpoints were inserted - in which case
2529 we should just take the user literally and assume that any
2530 breakpoints should be left in place. */
2531 if (!may_insert_breakpoints)
2532 {
2533 warning (_("May not remove breakpoints"));
2534 return 1;
2535 }
2536
2537 return current_target.to_remove_breakpoint (&current_target,
2538 gdbarch, bp_tgt);
2539 }
2540
2541 static void
2542 target_info (char *args, int from_tty)
2543 {
2544 struct target_ops *t;
2545 int has_all_mem = 0;
2546
2547 if (symfile_objfile != NULL)
2548 printf_unfiltered (_("Symbols from \"%s\".\n"),
2549 objfile_name (symfile_objfile));
2550
2551 for (t = target_stack; t != NULL; t = t->beneath)
2552 {
2553 if (!(*t->to_has_memory) (t))
2554 continue;
2555
2556 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2557 continue;
2558 if (has_all_mem)
2559 printf_unfiltered (_("\tWhile running this, "
2560 "GDB does not access memory from...\n"));
2561 printf_unfiltered ("%s:\n", t->to_longname);
2562 (t->to_files_info) (t);
2563 has_all_mem = (*t->to_has_all_memory) (t);
2564 }
2565 }
2566
2567 /* This function is called before any new inferior is created, e.g.
2568 by running a program, attaching, or connecting to a target.
2569 It cleans up any state from previous invocations which might
2570 change between runs. This is a subset of what target_preopen
2571 resets (things which might change between targets). */
2572
2573 void
2574 target_pre_inferior (int from_tty)
2575 {
2576 /* Clear out solib state. Otherwise the solib state of the previous
2577 inferior might have survived and is entirely wrong for the new
2578 target. This has been observed on GNU/Linux using glibc 2.3. How
2579 to reproduce:
2580
2581 bash$ ./foo&
2582 [1] 4711
2583 bash$ ./foo&
2584 [1] 4712
2585 bash$ gdb ./foo
2586 [...]
2587 (gdb) attach 4711
2588 (gdb) detach
2589 (gdb) attach 4712
2590 Cannot access memory at address 0xdeadbeef
2591 */
2592
2593 /* In some OSs, the shared library list is the same/global/shared
2594 across inferiors. If code is shared between processes, so are
2595 memory regions and features. */
2596 if (!gdbarch_has_global_solist (target_gdbarch ()))
2597 {
2598 no_shared_libraries (NULL, from_tty);
2599
2600 invalidate_target_mem_regions ();
2601
2602 target_clear_description ();
2603 }
2604
2605 agent_capability_invalidate ();
2606 }
2607
2608 /* Callback for iterate_over_inferiors. Gets rid of the given
2609 inferior. */
2610
2611 static int
2612 dispose_inferior (struct inferior *inf, void *args)
2613 {
2614 struct thread_info *thread;
2615
2616 thread = any_thread_of_process (inf->pid);
2617 if (thread)
2618 {
2619 switch_to_thread (thread->ptid);
2620
2621 /* Core inferiors actually should be detached, not killed. */
2622 if (target_has_execution)
2623 target_kill ();
2624 else
2625 target_detach (NULL, 0);
2626 }
2627
2628 return 0;
2629 }
2630
2631 /* This is to be called by the open routine before it does
2632 anything. */
2633
2634 void
2635 target_preopen (int from_tty)
2636 {
2637 dont_repeat ();
2638
2639 if (have_inferiors ())
2640 {
2641 if (!from_tty
2642 || !have_live_inferiors ()
2643 || query (_("A program is being debugged already. Kill it? ")))
2644 iterate_over_inferiors (dispose_inferior, NULL);
2645 else
2646 error (_("Program not killed."));
2647 }
2648
2649 /* Calling target_kill may remove the target from the stack. But if
2650 it doesn't (which seems like a win for UDI), remove it now. */
2651 /* Leave the exec target, though. The user may be switching from a
2652 live process to a core of the same program. */
2653 pop_all_targets_above (file_stratum);
2654
2655 target_pre_inferior (from_tty);
2656 }
2657
2658 /* Detach a target after doing deferred register stores. */
2659
2660 void
2661 target_detach (const char *args, int from_tty)
2662 {
2663 struct target_ops* t;
2664
2665 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2666 /* Don't remove global breakpoints here. They're removed on
2667 disconnection from the target. */
2668 ;
2669 else
2670 /* If we're in breakpoints-always-inserted mode, have to remove
2671 them before detaching. */
2672 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2673
2674 prepare_for_detach ();
2675
2676 current_target.to_detach (&current_target, args, from_tty);
2677 if (targetdebug)
2678 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2679 args, from_tty);
2680 }
2681
2682 void
2683 target_disconnect (char *args, int from_tty)
2684 {
2685 struct target_ops *t;
2686
2687 /* If we're in breakpoints-always-inserted mode or if breakpoints
2688 are global across processes, we have to remove them before
2689 disconnecting. */
2690 remove_breakpoints ();
2691
2692 for (t = current_target.beneath; t != NULL; t = t->beneath)
2693 if (t->to_disconnect != NULL)
2694 {
2695 if (targetdebug)
2696 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2697 args, from_tty);
2698 t->to_disconnect (t, args, from_tty);
2699 return;
2700 }
2701
2702 tcomplain ();
2703 }
2704
2705 ptid_t
2706 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2707 {
2708 struct target_ops *t;
2709 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2710 status, options);
2711
2712 if (targetdebug)
2713 {
2714 char *status_string;
2715 char *options_string;
2716
2717 status_string = target_waitstatus_to_string (status);
2718 options_string = target_options_to_string (options);
2719 fprintf_unfiltered (gdb_stdlog,
2720 "target_wait (%d, status, options={%s})"
2721 " = %d, %s\n",
2722 ptid_get_pid (ptid), options_string,
2723 ptid_get_pid (retval), status_string);
2724 xfree (status_string);
2725 xfree (options_string);
2726 }
2727
2728 return retval;
2729 }
2730
2731 char *
2732 target_pid_to_str (ptid_t ptid)
2733 {
2734 struct target_ops *t;
2735
2736 for (t = current_target.beneath; t != NULL; t = t->beneath)
2737 {
2738 if (t->to_pid_to_str != NULL)
2739 return (*t->to_pid_to_str) (t, ptid);
2740 }
2741
2742 return normal_pid_to_str (ptid);
2743 }
2744
2745 char *
2746 target_thread_name (struct thread_info *info)
2747 {
2748 struct target_ops *t;
2749
2750 for (t = current_target.beneath; t != NULL; t = t->beneath)
2751 {
2752 if (t->to_thread_name != NULL)
2753 return (*t->to_thread_name) (t, info);
2754 }
2755
2756 return NULL;
2757 }
2758
2759 void
2760 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2761 {
2762 struct target_ops *t;
2763
2764 target_dcache_invalidate ();
2765
2766 current_target.to_resume (&current_target, ptid, step, signal);
2767 if (targetdebug)
2768 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2769 ptid_get_pid (ptid),
2770 step ? "step" : "continue",
2771 gdb_signal_to_name (signal));
2772
2773 registers_changed_ptid (ptid);
2774 set_executing (ptid, 1);
2775 set_running (ptid, 1);
2776 clear_inline_frame_state (ptid);
2777 }
2778
2779 void
2780 target_pass_signals (int numsigs, unsigned char *pass_signals)
2781 {
2782 struct target_ops *t;
2783
2784 for (t = current_target.beneath; t != NULL; t = t->beneath)
2785 {
2786 if (t->to_pass_signals != NULL)
2787 {
2788 if (targetdebug)
2789 {
2790 int i;
2791
2792 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2793 numsigs);
2794
2795 for (i = 0; i < numsigs; i++)
2796 if (pass_signals[i])
2797 fprintf_unfiltered (gdb_stdlog, " %s",
2798 gdb_signal_to_name (i));
2799
2800 fprintf_unfiltered (gdb_stdlog, " })\n");
2801 }
2802
2803 (*t->to_pass_signals) (t, numsigs, pass_signals);
2804 return;
2805 }
2806 }
2807 }
2808
2809 void
2810 target_program_signals (int numsigs, unsigned char *program_signals)
2811 {
2812 struct target_ops *t;
2813
2814 for (t = current_target.beneath; t != NULL; t = t->beneath)
2815 {
2816 if (t->to_program_signals != NULL)
2817 {
2818 if (targetdebug)
2819 {
2820 int i;
2821
2822 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2823 numsigs);
2824
2825 for (i = 0; i < numsigs; i++)
2826 if (program_signals[i])
2827 fprintf_unfiltered (gdb_stdlog, " %s",
2828 gdb_signal_to_name (i));
2829
2830 fprintf_unfiltered (gdb_stdlog, " })\n");
2831 }
2832
2833 (*t->to_program_signals) (t, numsigs, program_signals);
2834 return;
2835 }
2836 }
2837 }
2838
2839 /* Look through the list of possible targets for a target that can
2840 follow forks. */
2841
2842 int
2843 target_follow_fork (int follow_child, int detach_fork)
2844 {
2845 struct target_ops *t;
2846
2847 for (t = current_target.beneath; t != NULL; t = t->beneath)
2848 {
2849 if (t->to_follow_fork != NULL)
2850 {
2851 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2852
2853 if (targetdebug)
2854 fprintf_unfiltered (gdb_stdlog,
2855 "target_follow_fork (%d, %d) = %d\n",
2856 follow_child, detach_fork, retval);
2857 return retval;
2858 }
2859 }
2860
2861 /* Some target returned a fork event, but did not know how to follow it. */
2862 internal_error (__FILE__, __LINE__,
2863 _("could not find a target to follow fork"));
2864 }
2865
2866 void
2867 target_mourn_inferior (void)
2868 {
2869 struct target_ops *t;
2870
2871 for (t = current_target.beneath; t != NULL; t = t->beneath)
2872 {
2873 if (t->to_mourn_inferior != NULL)
2874 {
2875 t->to_mourn_inferior (t);
2876 if (targetdebug)
2877 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2878
2879 /* We no longer need to keep handles on any of the object files.
2880 Make sure to release them to avoid unnecessarily locking any
2881 of them while we're not actually debugging. */
2882 bfd_cache_close_all ();
2883
2884 return;
2885 }
2886 }
2887
2888 internal_error (__FILE__, __LINE__,
2889 _("could not find a target to follow mourn inferior"));
2890 }
2891
2892 /* Look for a target which can describe architectural features, starting
2893 from TARGET. If we find one, return its description. */
2894
2895 const struct target_desc *
2896 target_read_description (struct target_ops *target)
2897 {
2898 struct target_ops *t;
2899
2900 for (t = target; t != NULL; t = t->beneath)
2901 if (t->to_read_description != NULL)
2902 {
2903 const struct target_desc *tdesc;
2904
2905 tdesc = t->to_read_description (t);
2906 if (tdesc)
2907 return tdesc;
2908 }
2909
2910 return NULL;
2911 }
2912
2913 /* The default implementation of to_search_memory.
2914 This implements a basic search of memory, reading target memory and
2915 performing the search here (as opposed to performing the search in on the
2916 target side with, for example, gdbserver). */
2917
2918 int
2919 simple_search_memory (struct target_ops *ops,
2920 CORE_ADDR start_addr, ULONGEST search_space_len,
2921 const gdb_byte *pattern, ULONGEST pattern_len,
2922 CORE_ADDR *found_addrp)
2923 {
2924 /* NOTE: also defined in find.c testcase. */
2925 #define SEARCH_CHUNK_SIZE 16000
2926 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2927 /* Buffer to hold memory contents for searching. */
2928 gdb_byte *search_buf;
2929 unsigned search_buf_size;
2930 struct cleanup *old_cleanups;
2931
2932 search_buf_size = chunk_size + pattern_len - 1;
2933
2934 /* No point in trying to allocate a buffer larger than the search space. */
2935 if (search_space_len < search_buf_size)
2936 search_buf_size = search_space_len;
2937
2938 search_buf = malloc (search_buf_size);
2939 if (search_buf == NULL)
2940 error (_("Unable to allocate memory to perform the search."));
2941 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2942
2943 /* Prime the search buffer. */
2944
2945 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2946 search_buf, start_addr, search_buf_size) != search_buf_size)
2947 {
2948 warning (_("Unable to access %s bytes of target "
2949 "memory at %s, halting search."),
2950 pulongest (search_buf_size), hex_string (start_addr));
2951 do_cleanups (old_cleanups);
2952 return -1;
2953 }
2954
2955 /* Perform the search.
2956
2957 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2958 When we've scanned N bytes we copy the trailing bytes to the start and
2959 read in another N bytes. */
2960
2961 while (search_space_len >= pattern_len)
2962 {
2963 gdb_byte *found_ptr;
2964 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2965
2966 found_ptr = memmem (search_buf, nr_search_bytes,
2967 pattern, pattern_len);
2968
2969 if (found_ptr != NULL)
2970 {
2971 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2972
2973 *found_addrp = found_addr;
2974 do_cleanups (old_cleanups);
2975 return 1;
2976 }
2977
2978 /* Not found in this chunk, skip to next chunk. */
2979
2980 /* Don't let search_space_len wrap here, it's unsigned. */
2981 if (search_space_len >= chunk_size)
2982 search_space_len -= chunk_size;
2983 else
2984 search_space_len = 0;
2985
2986 if (search_space_len >= pattern_len)
2987 {
2988 unsigned keep_len = search_buf_size - chunk_size;
2989 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2990 int nr_to_read;
2991
2992 /* Copy the trailing part of the previous iteration to the front
2993 of the buffer for the next iteration. */
2994 gdb_assert (keep_len == pattern_len - 1);
2995 memcpy (search_buf, search_buf + chunk_size, keep_len);
2996
2997 nr_to_read = min (search_space_len - keep_len, chunk_size);
2998
2999 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
3000 search_buf + keep_len, read_addr,
3001 nr_to_read) != nr_to_read)
3002 {
3003 warning (_("Unable to access %s bytes of target "
3004 "memory at %s, halting search."),
3005 plongest (nr_to_read),
3006 hex_string (read_addr));
3007 do_cleanups (old_cleanups);
3008 return -1;
3009 }
3010
3011 start_addr += chunk_size;
3012 }
3013 }
3014
3015 /* Not found. */
3016
3017 do_cleanups (old_cleanups);
3018 return 0;
3019 }
3020
3021 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
3022 sequence of bytes in PATTERN with length PATTERN_LEN.
3023
3024 The result is 1 if found, 0 if not found, and -1 if there was an error
3025 requiring halting of the search (e.g. memory read error).
3026 If the pattern is found the address is recorded in FOUND_ADDRP. */
3027
3028 int
3029 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
3030 const gdb_byte *pattern, ULONGEST pattern_len,
3031 CORE_ADDR *found_addrp)
3032 {
3033 struct target_ops *t;
3034 int found;
3035
3036 /* We don't use INHERIT to set current_target.to_search_memory,
3037 so we have to scan the target stack and handle targetdebug
3038 ourselves. */
3039
3040 if (targetdebug)
3041 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
3042 hex_string (start_addr));
3043
3044 for (t = current_target.beneath; t != NULL; t = t->beneath)
3045 if (t->to_search_memory != NULL)
3046 break;
3047
3048 if (t != NULL)
3049 {
3050 found = t->to_search_memory (t, start_addr, search_space_len,
3051 pattern, pattern_len, found_addrp);
3052 }
3053 else
3054 {
3055 /* If a special version of to_search_memory isn't available, use the
3056 simple version. */
3057 found = simple_search_memory (current_target.beneath,
3058 start_addr, search_space_len,
3059 pattern, pattern_len, found_addrp);
3060 }
3061
3062 if (targetdebug)
3063 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
3064
3065 return found;
3066 }
3067
3068 /* Look through the currently pushed targets. If none of them will
3069 be able to restart the currently running process, issue an error
3070 message. */
3071
3072 void
3073 target_require_runnable (void)
3074 {
3075 struct target_ops *t;
3076
3077 for (t = target_stack; t != NULL; t = t->beneath)
3078 {
3079 /* If this target knows how to create a new program, then
3080 assume we will still be able to after killing the current
3081 one. Either killing and mourning will not pop T, or else
3082 find_default_run_target will find it again. */
3083 if (t->to_create_inferior != NULL)
3084 return;
3085
3086 /* Do not worry about thread_stratum targets that can not
3087 create inferiors. Assume they will be pushed again if
3088 necessary, and continue to the process_stratum. */
3089 if (t->to_stratum == thread_stratum
3090 || t->to_stratum == arch_stratum)
3091 continue;
3092
3093 error (_("The \"%s\" target does not support \"run\". "
3094 "Try \"help target\" or \"continue\"."),
3095 t->to_shortname);
3096 }
3097
3098 /* This function is only called if the target is running. In that
3099 case there should have been a process_stratum target and it
3100 should either know how to create inferiors, or not... */
3101 internal_error (__FILE__, __LINE__, _("No targets found"));
3102 }
3103
3104 /* Look through the list of possible targets for a target that can
3105 execute a run or attach command without any other data. This is
3106 used to locate the default process stratum.
3107
3108 If DO_MESG is not NULL, the result is always valid (error() is
3109 called for errors); else, return NULL on error. */
3110
3111 static struct target_ops *
3112 find_default_run_target (char *do_mesg)
3113 {
3114 struct target_ops **t;
3115 struct target_ops *runable = NULL;
3116 int count;
3117
3118 count = 0;
3119
3120 for (t = target_structs; t < target_structs + target_struct_size;
3121 ++t)
3122 {
3123 if ((*t)->to_can_run && target_can_run (*t))
3124 {
3125 runable = *t;
3126 ++count;
3127 }
3128 }
3129
3130 if (count != 1)
3131 {
3132 if (do_mesg)
3133 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3134 else
3135 return NULL;
3136 }
3137
3138 return runable;
3139 }
3140
3141 void
3142 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3143 {
3144 struct target_ops *t;
3145
3146 t = find_default_run_target ("attach");
3147 (t->to_attach) (t, args, from_tty);
3148 return;
3149 }
3150
3151 void
3152 find_default_create_inferior (struct target_ops *ops,
3153 char *exec_file, char *allargs, char **env,
3154 int from_tty)
3155 {
3156 struct target_ops *t;
3157
3158 t = find_default_run_target ("run");
3159 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3160 return;
3161 }
3162
3163 static int
3164 find_default_can_async_p (struct target_ops *ignore)
3165 {
3166 struct target_ops *t;
3167
3168 /* This may be called before the target is pushed on the stack;
3169 look for the default process stratum. If there's none, gdb isn't
3170 configured with a native debugger, and target remote isn't
3171 connected yet. */
3172 t = find_default_run_target (NULL);
3173 if (t && t->to_can_async_p != delegate_can_async_p)
3174 return (t->to_can_async_p) (t);
3175 return 0;
3176 }
3177
3178 static int
3179 find_default_is_async_p (struct target_ops *ignore)
3180 {
3181 struct target_ops *t;
3182
3183 /* This may be called before the target is pushed on the stack;
3184 look for the default process stratum. If there's none, gdb isn't
3185 configured with a native debugger, and target remote isn't
3186 connected yet. */
3187 t = find_default_run_target (NULL);
3188 if (t && t->to_is_async_p != delegate_is_async_p)
3189 return (t->to_is_async_p) (t);
3190 return 0;
3191 }
3192
3193 static int
3194 find_default_supports_non_stop (struct target_ops *self)
3195 {
3196 struct target_ops *t;
3197
3198 t = find_default_run_target (NULL);
3199 if (t && t->to_supports_non_stop)
3200 return (t->to_supports_non_stop) (t);
3201 return 0;
3202 }
3203
3204 int
3205 target_supports_non_stop (void)
3206 {
3207 struct target_ops *t;
3208
3209 for (t = &current_target; t != NULL; t = t->beneath)
3210 if (t->to_supports_non_stop)
3211 return t->to_supports_non_stop (t);
3212
3213 return 0;
3214 }
3215
3216 /* Implement the "info proc" command. */
3217
3218 int
3219 target_info_proc (char *args, enum info_proc_what what)
3220 {
3221 struct target_ops *t;
3222
3223 /* If we're already connected to something that can get us OS
3224 related data, use it. Otherwise, try using the native
3225 target. */
3226 if (current_target.to_stratum >= process_stratum)
3227 t = current_target.beneath;
3228 else
3229 t = find_default_run_target (NULL);
3230
3231 for (; t != NULL; t = t->beneath)
3232 {
3233 if (t->to_info_proc != NULL)
3234 {
3235 t->to_info_proc (t, args, what);
3236
3237 if (targetdebug)
3238 fprintf_unfiltered (gdb_stdlog,
3239 "target_info_proc (\"%s\", %d)\n", args, what);
3240
3241 return 1;
3242 }
3243 }
3244
3245 return 0;
3246 }
3247
3248 static int
3249 find_default_supports_disable_randomization (struct target_ops *self)
3250 {
3251 struct target_ops *t;
3252
3253 t = find_default_run_target (NULL);
3254 if (t && t->to_supports_disable_randomization)
3255 return (t->to_supports_disable_randomization) (t);
3256 return 0;
3257 }
3258
3259 int
3260 target_supports_disable_randomization (void)
3261 {
3262 struct target_ops *t;
3263
3264 for (t = &current_target; t != NULL; t = t->beneath)
3265 if (t->to_supports_disable_randomization)
3266 return t->to_supports_disable_randomization (t);
3267
3268 return 0;
3269 }
3270
3271 char *
3272 target_get_osdata (const char *type)
3273 {
3274 struct target_ops *t;
3275
3276 /* If we're already connected to something that can get us OS
3277 related data, use it. Otherwise, try using the native
3278 target. */
3279 if (current_target.to_stratum >= process_stratum)
3280 t = current_target.beneath;
3281 else
3282 t = find_default_run_target ("get OS data");
3283
3284 if (!t)
3285 return NULL;
3286
3287 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3288 }
3289
3290 /* Determine the current address space of thread PTID. */
3291
3292 struct address_space *
3293 target_thread_address_space (ptid_t ptid)
3294 {
3295 struct address_space *aspace;
3296 struct inferior *inf;
3297 struct target_ops *t;
3298
3299 for (t = current_target.beneath; t != NULL; t = t->beneath)
3300 {
3301 if (t->to_thread_address_space != NULL)
3302 {
3303 aspace = t->to_thread_address_space (t, ptid);
3304 gdb_assert (aspace);
3305
3306 if (targetdebug)
3307 fprintf_unfiltered (gdb_stdlog,
3308 "target_thread_address_space (%s) = %d\n",
3309 target_pid_to_str (ptid),
3310 address_space_num (aspace));
3311 return aspace;
3312 }
3313 }
3314
3315 /* Fall-back to the "main" address space of the inferior. */
3316 inf = find_inferior_pid (ptid_get_pid (ptid));
3317
3318 if (inf == NULL || inf->aspace == NULL)
3319 internal_error (__FILE__, __LINE__,
3320 _("Can't determine the current "
3321 "address space of thread %s\n"),
3322 target_pid_to_str (ptid));
3323
3324 return inf->aspace;
3325 }
3326
3327
3328 /* Target file operations. */
3329
3330 static struct target_ops *
3331 default_fileio_target (void)
3332 {
3333 /* If we're already connected to something that can perform
3334 file I/O, use it. Otherwise, try using the native target. */
3335 if (current_target.to_stratum >= process_stratum)
3336 return current_target.beneath;
3337 else
3338 return find_default_run_target ("file I/O");
3339 }
3340
3341 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3342 target file descriptor, or -1 if an error occurs (and set
3343 *TARGET_ERRNO). */
3344 int
3345 target_fileio_open (const char *filename, int flags, int mode,
3346 int *target_errno)
3347 {
3348 struct target_ops *t;
3349
3350 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3351 {
3352 if (t->to_fileio_open != NULL)
3353 {
3354 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
3355
3356 if (targetdebug)
3357 fprintf_unfiltered (gdb_stdlog,
3358 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3359 filename, flags, mode,
3360 fd, fd != -1 ? 0 : *target_errno);
3361 return fd;
3362 }
3363 }
3364
3365 *target_errno = FILEIO_ENOSYS;
3366 return -1;
3367 }
3368
3369 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3370 Return the number of bytes written, or -1 if an error occurs
3371 (and set *TARGET_ERRNO). */
3372 int
3373 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3374 ULONGEST offset, int *target_errno)
3375 {
3376 struct target_ops *t;
3377
3378 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3379 {
3380 if (t->to_fileio_pwrite != NULL)
3381 {
3382 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
3383 target_errno);
3384
3385 if (targetdebug)
3386 fprintf_unfiltered (gdb_stdlog,
3387 "target_fileio_pwrite (%d,...,%d,%s) "
3388 "= %d (%d)\n",
3389 fd, len, pulongest (offset),
3390 ret, ret != -1 ? 0 : *target_errno);
3391 return ret;
3392 }
3393 }
3394
3395 *target_errno = FILEIO_ENOSYS;
3396 return -1;
3397 }
3398
3399 /* Read up to LEN bytes FD on the target into READ_BUF.
3400 Return the number of bytes read, or -1 if an error occurs
3401 (and set *TARGET_ERRNO). */
3402 int
3403 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3404 ULONGEST offset, int *target_errno)
3405 {
3406 struct target_ops *t;
3407
3408 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3409 {
3410 if (t->to_fileio_pread != NULL)
3411 {
3412 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3413 target_errno);
3414
3415 if (targetdebug)
3416 fprintf_unfiltered (gdb_stdlog,
3417 "target_fileio_pread (%d,...,%d,%s) "
3418 "= %d (%d)\n",
3419 fd, len, pulongest (offset),
3420 ret, ret != -1 ? 0 : *target_errno);
3421 return ret;
3422 }
3423 }
3424
3425 *target_errno = FILEIO_ENOSYS;
3426 return -1;
3427 }
3428
3429 /* Close FD on the target. Return 0, or -1 if an error occurs
3430 (and set *TARGET_ERRNO). */
3431 int
3432 target_fileio_close (int fd, int *target_errno)
3433 {
3434 struct target_ops *t;
3435
3436 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3437 {
3438 if (t->to_fileio_close != NULL)
3439 {
3440 int ret = t->to_fileio_close (t, fd, target_errno);
3441
3442 if (targetdebug)
3443 fprintf_unfiltered (gdb_stdlog,
3444 "target_fileio_close (%d) = %d (%d)\n",
3445 fd, ret, ret != -1 ? 0 : *target_errno);
3446 return ret;
3447 }
3448 }
3449
3450 *target_errno = FILEIO_ENOSYS;
3451 return -1;
3452 }
3453
3454 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3455 occurs (and set *TARGET_ERRNO). */
3456 int
3457 target_fileio_unlink (const char *filename, int *target_errno)
3458 {
3459 struct target_ops *t;
3460
3461 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3462 {
3463 if (t->to_fileio_unlink != NULL)
3464 {
3465 int ret = t->to_fileio_unlink (t, filename, target_errno);
3466
3467 if (targetdebug)
3468 fprintf_unfiltered (gdb_stdlog,
3469 "target_fileio_unlink (%s) = %d (%d)\n",
3470 filename, ret, ret != -1 ? 0 : *target_errno);
3471 return ret;
3472 }
3473 }
3474
3475 *target_errno = FILEIO_ENOSYS;
3476 return -1;
3477 }
3478
3479 /* Read value of symbolic link FILENAME on the target. Return a
3480 null-terminated string allocated via xmalloc, or NULL if an error
3481 occurs (and set *TARGET_ERRNO). */
3482 char *
3483 target_fileio_readlink (const char *filename, int *target_errno)
3484 {
3485 struct target_ops *t;
3486
3487 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3488 {
3489 if (t->to_fileio_readlink != NULL)
3490 {
3491 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3492
3493 if (targetdebug)
3494 fprintf_unfiltered (gdb_stdlog,
3495 "target_fileio_readlink (%s) = %s (%d)\n",
3496 filename, ret? ret : "(nil)",
3497 ret? 0 : *target_errno);
3498 return ret;
3499 }
3500 }
3501
3502 *target_errno = FILEIO_ENOSYS;
3503 return NULL;
3504 }
3505
3506 static void
3507 target_fileio_close_cleanup (void *opaque)
3508 {
3509 int fd = *(int *) opaque;
3510 int target_errno;
3511
3512 target_fileio_close (fd, &target_errno);
3513 }
3514
3515 /* Read target file FILENAME. Store the result in *BUF_P and
3516 return the size of the transferred data. PADDING additional bytes are
3517 available in *BUF_P. This is a helper function for
3518 target_fileio_read_alloc; see the declaration of that function for more
3519 information. */
3520
3521 static LONGEST
3522 target_fileio_read_alloc_1 (const char *filename,
3523 gdb_byte **buf_p, int padding)
3524 {
3525 struct cleanup *close_cleanup;
3526 size_t buf_alloc, buf_pos;
3527 gdb_byte *buf;
3528 LONGEST n;
3529 int fd;
3530 int target_errno;
3531
3532 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3533 if (fd == -1)
3534 return -1;
3535
3536 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3537
3538 /* Start by reading up to 4K at a time. The target will throttle
3539 this number down if necessary. */
3540 buf_alloc = 4096;
3541 buf = xmalloc (buf_alloc);
3542 buf_pos = 0;
3543 while (1)
3544 {
3545 n = target_fileio_pread (fd, &buf[buf_pos],
3546 buf_alloc - buf_pos - padding, buf_pos,
3547 &target_errno);
3548 if (n < 0)
3549 {
3550 /* An error occurred. */
3551 do_cleanups (close_cleanup);
3552 xfree (buf);
3553 return -1;
3554 }
3555 else if (n == 0)
3556 {
3557 /* Read all there was. */
3558 do_cleanups (close_cleanup);
3559 if (buf_pos == 0)
3560 xfree (buf);
3561 else
3562 *buf_p = buf;
3563 return buf_pos;
3564 }
3565
3566 buf_pos += n;
3567
3568 /* If the buffer is filling up, expand it. */
3569 if (buf_alloc < buf_pos * 2)
3570 {
3571 buf_alloc *= 2;
3572 buf = xrealloc (buf, buf_alloc);
3573 }
3574
3575 QUIT;
3576 }
3577 }
3578
3579 /* Read target file FILENAME. Store the result in *BUF_P and return
3580 the size of the transferred data. See the declaration in "target.h"
3581 function for more information about the return value. */
3582
3583 LONGEST
3584 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3585 {
3586 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3587 }
3588
3589 /* Read target file FILENAME. The result is NUL-terminated and
3590 returned as a string, allocated using xmalloc. If an error occurs
3591 or the transfer is unsupported, NULL is returned. Empty objects
3592 are returned as allocated but empty strings. A warning is issued
3593 if the result contains any embedded NUL bytes. */
3594
3595 char *
3596 target_fileio_read_stralloc (const char *filename)
3597 {
3598 gdb_byte *buffer;
3599 char *bufstr;
3600 LONGEST i, transferred;
3601
3602 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3603 bufstr = (char *) buffer;
3604
3605 if (transferred < 0)
3606 return NULL;
3607
3608 if (transferred == 0)
3609 return xstrdup ("");
3610
3611 bufstr[transferred] = 0;
3612
3613 /* Check for embedded NUL bytes; but allow trailing NULs. */
3614 for (i = strlen (bufstr); i < transferred; i++)
3615 if (bufstr[i] != 0)
3616 {
3617 warning (_("target file %s "
3618 "contained unexpected null characters"),
3619 filename);
3620 break;
3621 }
3622
3623 return bufstr;
3624 }
3625
3626
3627 static int
3628 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3629 CORE_ADDR addr, int len)
3630 {
3631 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3632 }
3633
3634 static int
3635 default_watchpoint_addr_within_range (struct target_ops *target,
3636 CORE_ADDR addr,
3637 CORE_ADDR start, int length)
3638 {
3639 return addr >= start && addr < start + length;
3640 }
3641
3642 static struct gdbarch *
3643 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3644 {
3645 return target_gdbarch ();
3646 }
3647
3648 static int
3649 return_zero (void)
3650 {
3651 return 0;
3652 }
3653
3654 static int
3655 return_one (void)
3656 {
3657 return 1;
3658 }
3659
3660 static int
3661 return_minus_one (void)
3662 {
3663 return -1;
3664 }
3665
3666 static void *
3667 return_null (void)
3668 {
3669 return 0;
3670 }
3671
3672 /*
3673 * Find the next target down the stack from the specified target.
3674 */
3675
3676 struct target_ops *
3677 find_target_beneath (struct target_ops *t)
3678 {
3679 return t->beneath;
3680 }
3681
3682 /* See target.h. */
3683
3684 struct target_ops *
3685 find_target_at (enum strata stratum)
3686 {
3687 struct target_ops *t;
3688
3689 for (t = current_target.beneath; t != NULL; t = t->beneath)
3690 if (t->to_stratum == stratum)
3691 return t;
3692
3693 return NULL;
3694 }
3695
3696 \f
3697 /* The inferior process has died. Long live the inferior! */
3698
3699 void
3700 generic_mourn_inferior (void)
3701 {
3702 ptid_t ptid;
3703
3704 ptid = inferior_ptid;
3705 inferior_ptid = null_ptid;
3706
3707 /* Mark breakpoints uninserted in case something tries to delete a
3708 breakpoint while we delete the inferior's threads (which would
3709 fail, since the inferior is long gone). */
3710 mark_breakpoints_out ();
3711
3712 if (!ptid_equal (ptid, null_ptid))
3713 {
3714 int pid = ptid_get_pid (ptid);
3715 exit_inferior (pid);
3716 }
3717
3718 /* Note this wipes step-resume breakpoints, so needs to be done
3719 after exit_inferior, which ends up referencing the step-resume
3720 breakpoints through clear_thread_inferior_resources. */
3721 breakpoint_init_inferior (inf_exited);
3722
3723 registers_changed ();
3724
3725 reopen_exec_file ();
3726 reinit_frame_cache ();
3727
3728 if (deprecated_detach_hook)
3729 deprecated_detach_hook ();
3730 }
3731 \f
3732 /* Convert a normal process ID to a string. Returns the string in a
3733 static buffer. */
3734
3735 char *
3736 normal_pid_to_str (ptid_t ptid)
3737 {
3738 static char buf[32];
3739
3740 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3741 return buf;
3742 }
3743
3744 static char *
3745 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3746 {
3747 return normal_pid_to_str (ptid);
3748 }
3749
3750 /* Error-catcher for target_find_memory_regions. */
3751 static int
3752 dummy_find_memory_regions (struct target_ops *self,
3753 find_memory_region_ftype ignore1, void *ignore2)
3754 {
3755 error (_("Command not implemented for this target."));
3756 return 0;
3757 }
3758
3759 /* Error-catcher for target_make_corefile_notes. */
3760 static char *
3761 dummy_make_corefile_notes (struct target_ops *self,
3762 bfd *ignore1, int *ignore2)
3763 {
3764 error (_("Command not implemented for this target."));
3765 return NULL;
3766 }
3767
3768 /* Error-catcher for target_get_bookmark. */
3769 static gdb_byte *
3770 dummy_get_bookmark (struct target_ops *self, char *ignore1, int ignore2)
3771 {
3772 tcomplain ();
3773 return NULL;
3774 }
3775
3776 /* Error-catcher for target_goto_bookmark. */
3777 static void
3778 dummy_goto_bookmark (struct target_ops *self, gdb_byte *ignore, int from_tty)
3779 {
3780 tcomplain ();
3781 }
3782
3783 /* Set up the handful of non-empty slots needed by the dummy target
3784 vector. */
3785
3786 static void
3787 init_dummy_target (void)
3788 {
3789 dummy_target.to_shortname = "None";
3790 dummy_target.to_longname = "None";
3791 dummy_target.to_doc = "";
3792 dummy_target.to_create_inferior = find_default_create_inferior;
3793 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3794 dummy_target.to_supports_disable_randomization
3795 = find_default_supports_disable_randomization;
3796 dummy_target.to_pid_to_str = dummy_pid_to_str;
3797 dummy_target.to_stratum = dummy_stratum;
3798 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3799 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3800 dummy_target.to_get_bookmark = dummy_get_bookmark;
3801 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3802 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3803 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3804 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3805 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3806 dummy_target.to_has_execution
3807 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3808 dummy_target.to_magic = OPS_MAGIC;
3809
3810 install_dummy_methods (&dummy_target);
3811 }
3812 \f
3813 static void
3814 debug_to_open (char *args, int from_tty)
3815 {
3816 debug_target.to_open (args, from_tty);
3817
3818 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3819 }
3820
3821 void
3822 target_close (struct target_ops *targ)
3823 {
3824 gdb_assert (!target_is_pushed (targ));
3825
3826 if (targ->to_xclose != NULL)
3827 targ->to_xclose (targ);
3828 else if (targ->to_close != NULL)
3829 targ->to_close (targ);
3830
3831 if (targetdebug)
3832 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3833 }
3834
3835 void
3836 target_attach (char *args, int from_tty)
3837 {
3838 current_target.to_attach (&current_target, args, from_tty);
3839 if (targetdebug)
3840 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3841 args, from_tty);
3842 }
3843
3844 int
3845 target_thread_alive (ptid_t ptid)
3846 {
3847 struct target_ops *t;
3848
3849 for (t = current_target.beneath; t != NULL; t = t->beneath)
3850 {
3851 if (t->to_thread_alive != NULL)
3852 {
3853 int retval;
3854
3855 retval = t->to_thread_alive (t, ptid);
3856 if (targetdebug)
3857 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3858 ptid_get_pid (ptid), retval);
3859
3860 return retval;
3861 }
3862 }
3863
3864 return 0;
3865 }
3866
3867 void
3868 target_find_new_threads (void)
3869 {
3870 struct target_ops *t;
3871
3872 for (t = current_target.beneath; t != NULL; t = t->beneath)
3873 {
3874 if (t->to_find_new_threads != NULL)
3875 {
3876 t->to_find_new_threads (t);
3877 if (targetdebug)
3878 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3879
3880 return;
3881 }
3882 }
3883 }
3884
3885 void
3886 target_stop (ptid_t ptid)
3887 {
3888 if (!may_stop)
3889 {
3890 warning (_("May not interrupt or stop the target, ignoring attempt"));
3891 return;
3892 }
3893
3894 (*current_target.to_stop) (&current_target, ptid);
3895 }
3896
3897 static void
3898 debug_to_post_attach (struct target_ops *self, int pid)
3899 {
3900 debug_target.to_post_attach (&debug_target, pid);
3901
3902 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3903 }
3904
3905 /* Concatenate ELEM to LIST, a comma separate list, and return the
3906 result. The LIST incoming argument is released. */
3907
3908 static char *
3909 str_comma_list_concat_elem (char *list, const char *elem)
3910 {
3911 if (list == NULL)
3912 return xstrdup (elem);
3913 else
3914 return reconcat (list, list, ", ", elem, (char *) NULL);
3915 }
3916
3917 /* Helper for target_options_to_string. If OPT is present in
3918 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3919 Returns the new resulting string. OPT is removed from
3920 TARGET_OPTIONS. */
3921
3922 static char *
3923 do_option (int *target_options, char *ret,
3924 int opt, char *opt_str)
3925 {
3926 if ((*target_options & opt) != 0)
3927 {
3928 ret = str_comma_list_concat_elem (ret, opt_str);
3929 *target_options &= ~opt;
3930 }
3931
3932 return ret;
3933 }
3934
3935 char *
3936 target_options_to_string (int target_options)
3937 {
3938 char *ret = NULL;
3939
3940 #define DO_TARG_OPTION(OPT) \
3941 ret = do_option (&target_options, ret, OPT, #OPT)
3942
3943 DO_TARG_OPTION (TARGET_WNOHANG);
3944
3945 if (target_options != 0)
3946 ret = str_comma_list_concat_elem (ret, "unknown???");
3947
3948 if (ret == NULL)
3949 ret = xstrdup ("");
3950 return ret;
3951 }
3952
3953 static void
3954 debug_print_register (const char * func,
3955 struct regcache *regcache, int regno)
3956 {
3957 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3958
3959 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3960 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3961 && gdbarch_register_name (gdbarch, regno) != NULL
3962 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3963 fprintf_unfiltered (gdb_stdlog, "(%s)",
3964 gdbarch_register_name (gdbarch, regno));
3965 else
3966 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3967 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3968 {
3969 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3970 int i, size = register_size (gdbarch, regno);
3971 gdb_byte buf[MAX_REGISTER_SIZE];
3972
3973 regcache_raw_collect (regcache, regno, buf);
3974 fprintf_unfiltered (gdb_stdlog, " = ");
3975 for (i = 0; i < size; i++)
3976 {
3977 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3978 }
3979 if (size <= sizeof (LONGEST))
3980 {
3981 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3982
3983 fprintf_unfiltered (gdb_stdlog, " %s %s",
3984 core_addr_to_string_nz (val), plongest (val));
3985 }
3986 }
3987 fprintf_unfiltered (gdb_stdlog, "\n");
3988 }
3989
3990 void
3991 target_fetch_registers (struct regcache *regcache, int regno)
3992 {
3993 struct target_ops *t;
3994
3995 for (t = current_target.beneath; t != NULL; t = t->beneath)
3996 {
3997 if (t->to_fetch_registers != NULL)
3998 {
3999 t->to_fetch_registers (t, regcache, regno);
4000 if (targetdebug)
4001 debug_print_register ("target_fetch_registers", regcache, regno);
4002 return;
4003 }
4004 }
4005 }
4006
4007 void
4008 target_store_registers (struct regcache *regcache, int regno)
4009 {
4010 struct target_ops *t;
4011
4012 if (!may_write_registers)
4013 error (_("Writing to registers is not allowed (regno %d)"), regno);
4014
4015 current_target.to_store_registers (&current_target, regcache, regno);
4016 if (targetdebug)
4017 {
4018 debug_print_register ("target_store_registers", regcache, regno);
4019 }
4020 }
4021
4022 int
4023 target_core_of_thread (ptid_t ptid)
4024 {
4025 struct target_ops *t;
4026
4027 for (t = current_target.beneath; t != NULL; t = t->beneath)
4028 {
4029 if (t->to_core_of_thread != NULL)
4030 {
4031 int retval = t->to_core_of_thread (t, ptid);
4032
4033 if (targetdebug)
4034 fprintf_unfiltered (gdb_stdlog,
4035 "target_core_of_thread (%d) = %d\n",
4036 ptid_get_pid (ptid), retval);
4037 return retval;
4038 }
4039 }
4040
4041 return -1;
4042 }
4043
4044 int
4045 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4046 {
4047 struct target_ops *t;
4048
4049 for (t = current_target.beneath; t != NULL; t = t->beneath)
4050 {
4051 if (t->to_verify_memory != NULL)
4052 {
4053 int retval = t->to_verify_memory (t, data, memaddr, size);
4054
4055 if (targetdebug)
4056 fprintf_unfiltered (gdb_stdlog,
4057 "target_verify_memory (%s, %s) = %d\n",
4058 paddress (target_gdbarch (), memaddr),
4059 pulongest (size),
4060 retval);
4061 return retval;
4062 }
4063 }
4064
4065 tcomplain ();
4066 }
4067
4068 /* The documentation for this function is in its prototype declaration in
4069 target.h. */
4070
4071 int
4072 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4073 {
4074 struct target_ops *t;
4075
4076 for (t = current_target.beneath; t != NULL; t = t->beneath)
4077 if (t->to_insert_mask_watchpoint != NULL)
4078 {
4079 int ret;
4080
4081 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4082
4083 if (targetdebug)
4084 fprintf_unfiltered (gdb_stdlog, "\
4085 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4086 core_addr_to_string (addr),
4087 core_addr_to_string (mask), rw, ret);
4088
4089 return ret;
4090 }
4091
4092 return 1;
4093 }
4094
4095 /* The documentation for this function is in its prototype declaration in
4096 target.h. */
4097
4098 int
4099 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4100 {
4101 struct target_ops *t;
4102
4103 for (t = current_target.beneath; t != NULL; t = t->beneath)
4104 if (t->to_remove_mask_watchpoint != NULL)
4105 {
4106 int ret;
4107
4108 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4109
4110 if (targetdebug)
4111 fprintf_unfiltered (gdb_stdlog, "\
4112 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4113 core_addr_to_string (addr),
4114 core_addr_to_string (mask), rw, ret);
4115
4116 return ret;
4117 }
4118
4119 return 1;
4120 }
4121
4122 /* The documentation for this function is in its prototype declaration
4123 in target.h. */
4124
4125 int
4126 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4127 {
4128 struct target_ops *t;
4129
4130 for (t = current_target.beneath; t != NULL; t = t->beneath)
4131 if (t->to_masked_watch_num_registers != NULL)
4132 return t->to_masked_watch_num_registers (t, addr, mask);
4133
4134 return -1;
4135 }
4136
4137 /* The documentation for this function is in its prototype declaration
4138 in target.h. */
4139
4140 int
4141 target_ranged_break_num_registers (void)
4142 {
4143 struct target_ops *t;
4144
4145 for (t = current_target.beneath; t != NULL; t = t->beneath)
4146 if (t->to_ranged_break_num_registers != NULL)
4147 return t->to_ranged_break_num_registers (t);
4148
4149 return -1;
4150 }
4151
4152 /* See target.h. */
4153
4154 struct btrace_target_info *
4155 target_enable_btrace (ptid_t ptid)
4156 {
4157 struct target_ops *t;
4158
4159 for (t = current_target.beneath; t != NULL; t = t->beneath)
4160 if (t->to_enable_btrace != NULL)
4161 return t->to_enable_btrace (t, ptid);
4162
4163 tcomplain ();
4164 return NULL;
4165 }
4166
4167 /* See target.h. */
4168
4169 void
4170 target_disable_btrace (struct btrace_target_info *btinfo)
4171 {
4172 struct target_ops *t;
4173
4174 for (t = current_target.beneath; t != NULL; t = t->beneath)
4175 if (t->to_disable_btrace != NULL)
4176 {
4177 t->to_disable_btrace (t, btinfo);
4178 return;
4179 }
4180
4181 tcomplain ();
4182 }
4183
4184 /* See target.h. */
4185
4186 void
4187 target_teardown_btrace (struct btrace_target_info *btinfo)
4188 {
4189 struct target_ops *t;
4190
4191 for (t = current_target.beneath; t != NULL; t = t->beneath)
4192 if (t->to_teardown_btrace != NULL)
4193 {
4194 t->to_teardown_btrace (t, btinfo);
4195 return;
4196 }
4197
4198 tcomplain ();
4199 }
4200
4201 /* See target.h. */
4202
4203 enum btrace_error
4204 target_read_btrace (VEC (btrace_block_s) **btrace,
4205 struct btrace_target_info *btinfo,
4206 enum btrace_read_type type)
4207 {
4208 struct target_ops *t;
4209
4210 for (t = current_target.beneath; t != NULL; t = t->beneath)
4211 if (t->to_read_btrace != NULL)
4212 return t->to_read_btrace (t, btrace, btinfo, type);
4213
4214 tcomplain ();
4215 return BTRACE_ERR_NOT_SUPPORTED;
4216 }
4217
4218 /* See target.h. */
4219
4220 void
4221 target_stop_recording (void)
4222 {
4223 struct target_ops *t;
4224
4225 for (t = current_target.beneath; t != NULL; t = t->beneath)
4226 if (t->to_stop_recording != NULL)
4227 {
4228 t->to_stop_recording (t);
4229 return;
4230 }
4231
4232 /* This is optional. */
4233 }
4234
4235 /* See target.h. */
4236
4237 void
4238 target_info_record (void)
4239 {
4240 struct target_ops *t;
4241
4242 for (t = current_target.beneath; t != NULL; t = t->beneath)
4243 if (t->to_info_record != NULL)
4244 {
4245 t->to_info_record (t);
4246 return;
4247 }
4248
4249 tcomplain ();
4250 }
4251
4252 /* See target.h. */
4253
4254 void
4255 target_save_record (const char *filename)
4256 {
4257 struct target_ops *t;
4258
4259 for (t = current_target.beneath; t != NULL; t = t->beneath)
4260 if (t->to_save_record != NULL)
4261 {
4262 t->to_save_record (t, filename);
4263 return;
4264 }
4265
4266 tcomplain ();
4267 }
4268
4269 /* See target.h. */
4270
4271 int
4272 target_supports_delete_record (void)
4273 {
4274 struct target_ops *t;
4275
4276 for (t = current_target.beneath; t != NULL; t = t->beneath)
4277 if (t->to_delete_record != NULL)
4278 return 1;
4279
4280 return 0;
4281 }
4282
4283 /* See target.h. */
4284
4285 void
4286 target_delete_record (void)
4287 {
4288 struct target_ops *t;
4289
4290 for (t = current_target.beneath; t != NULL; t = t->beneath)
4291 if (t->to_delete_record != NULL)
4292 {
4293 t->to_delete_record (t);
4294 return;
4295 }
4296
4297 tcomplain ();
4298 }
4299
4300 /* See target.h. */
4301
4302 int
4303 target_record_is_replaying (void)
4304 {
4305 struct target_ops *t;
4306
4307 for (t = current_target.beneath; t != NULL; t = t->beneath)
4308 if (t->to_record_is_replaying != NULL)
4309 return t->to_record_is_replaying (t);
4310
4311 return 0;
4312 }
4313
4314 /* See target.h. */
4315
4316 void
4317 target_goto_record_begin (void)
4318 {
4319 struct target_ops *t;
4320
4321 for (t = current_target.beneath; t != NULL; t = t->beneath)
4322 if (t->to_goto_record_begin != NULL)
4323 {
4324 t->to_goto_record_begin (t);
4325 return;
4326 }
4327
4328 tcomplain ();
4329 }
4330
4331 /* See target.h. */
4332
4333 void
4334 target_goto_record_end (void)
4335 {
4336 struct target_ops *t;
4337
4338 for (t = current_target.beneath; t != NULL; t = t->beneath)
4339 if (t->to_goto_record_end != NULL)
4340 {
4341 t->to_goto_record_end (t);
4342 return;
4343 }
4344
4345 tcomplain ();
4346 }
4347
4348 /* See target.h. */
4349
4350 void
4351 target_goto_record (ULONGEST insn)
4352 {
4353 struct target_ops *t;
4354
4355 for (t = current_target.beneath; t != NULL; t = t->beneath)
4356 if (t->to_goto_record != NULL)
4357 {
4358 t->to_goto_record (t, insn);
4359 return;
4360 }
4361
4362 tcomplain ();
4363 }
4364
4365 /* See target.h. */
4366
4367 void
4368 target_insn_history (int size, int flags)
4369 {
4370 struct target_ops *t;
4371
4372 for (t = current_target.beneath; t != NULL; t = t->beneath)
4373 if (t->to_insn_history != NULL)
4374 {
4375 t->to_insn_history (t, size, flags);
4376 return;
4377 }
4378
4379 tcomplain ();
4380 }
4381
4382 /* See target.h. */
4383
4384 void
4385 target_insn_history_from (ULONGEST from, int size, int flags)
4386 {
4387 struct target_ops *t;
4388
4389 for (t = current_target.beneath; t != NULL; t = t->beneath)
4390 if (t->to_insn_history_from != NULL)
4391 {
4392 t->to_insn_history_from (t, from, size, flags);
4393 return;
4394 }
4395
4396 tcomplain ();
4397 }
4398
4399 /* See target.h. */
4400
4401 void
4402 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4403 {
4404 struct target_ops *t;
4405
4406 for (t = current_target.beneath; t != NULL; t = t->beneath)
4407 if (t->to_insn_history_range != NULL)
4408 {
4409 t->to_insn_history_range (t, begin, end, flags);
4410 return;
4411 }
4412
4413 tcomplain ();
4414 }
4415
4416 /* See target.h. */
4417
4418 void
4419 target_call_history (int size, int flags)
4420 {
4421 struct target_ops *t;
4422
4423 for (t = current_target.beneath; t != NULL; t = t->beneath)
4424 if (t->to_call_history != NULL)
4425 {
4426 t->to_call_history (t, size, flags);
4427 return;
4428 }
4429
4430 tcomplain ();
4431 }
4432
4433 /* See target.h. */
4434
4435 void
4436 target_call_history_from (ULONGEST begin, int size, int flags)
4437 {
4438 struct target_ops *t;
4439
4440 for (t = current_target.beneath; t != NULL; t = t->beneath)
4441 if (t->to_call_history_from != NULL)
4442 {
4443 t->to_call_history_from (t, begin, size, flags);
4444 return;
4445 }
4446
4447 tcomplain ();
4448 }
4449
4450 /* See target.h. */
4451
4452 void
4453 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4454 {
4455 struct target_ops *t;
4456
4457 for (t = current_target.beneath; t != NULL; t = t->beneath)
4458 if (t->to_call_history_range != NULL)
4459 {
4460 t->to_call_history_range (t, begin, end, flags);
4461 return;
4462 }
4463
4464 tcomplain ();
4465 }
4466
4467 static void
4468 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4469 {
4470 debug_target.to_prepare_to_store (&debug_target, regcache);
4471
4472 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4473 }
4474
4475 /* See target.h. */
4476
4477 const struct frame_unwind *
4478 target_get_unwinder (void)
4479 {
4480 struct target_ops *t;
4481
4482 for (t = current_target.beneath; t != NULL; t = t->beneath)
4483 if (t->to_get_unwinder != NULL)
4484 return t->to_get_unwinder;
4485
4486 return NULL;
4487 }
4488
4489 /* See target.h. */
4490
4491 const struct frame_unwind *
4492 target_get_tailcall_unwinder (void)
4493 {
4494 struct target_ops *t;
4495
4496 for (t = current_target.beneath; t != NULL; t = t->beneath)
4497 if (t->to_get_tailcall_unwinder != NULL)
4498 return t->to_get_tailcall_unwinder;
4499
4500 return NULL;
4501 }
4502
4503 /* See target.h. */
4504
4505 CORE_ADDR
4506 forward_target_decr_pc_after_break (struct target_ops *ops,
4507 struct gdbarch *gdbarch)
4508 {
4509 for (; ops != NULL; ops = ops->beneath)
4510 if (ops->to_decr_pc_after_break != NULL)
4511 return ops->to_decr_pc_after_break (ops, gdbarch);
4512
4513 return gdbarch_decr_pc_after_break (gdbarch);
4514 }
4515
4516 /* See target.h. */
4517
4518 CORE_ADDR
4519 target_decr_pc_after_break (struct gdbarch *gdbarch)
4520 {
4521 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4522 }
4523
4524 static int
4525 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4526 int write, struct mem_attrib *attrib,
4527 struct target_ops *target)
4528 {
4529 int retval;
4530
4531 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4532 attrib, target);
4533
4534 fprintf_unfiltered (gdb_stdlog,
4535 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4536 paddress (target_gdbarch (), memaddr), len,
4537 write ? "write" : "read", retval);
4538
4539 if (retval > 0)
4540 {
4541 int i;
4542
4543 fputs_unfiltered (", bytes =", gdb_stdlog);
4544 for (i = 0; i < retval; i++)
4545 {
4546 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4547 {
4548 if (targetdebug < 2 && i > 0)
4549 {
4550 fprintf_unfiltered (gdb_stdlog, " ...");
4551 break;
4552 }
4553 fprintf_unfiltered (gdb_stdlog, "\n");
4554 }
4555
4556 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4557 }
4558 }
4559
4560 fputc_unfiltered ('\n', gdb_stdlog);
4561
4562 return retval;
4563 }
4564
4565 static void
4566 debug_to_files_info (struct target_ops *target)
4567 {
4568 debug_target.to_files_info (target);
4569
4570 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4571 }
4572
4573 static int
4574 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4575 struct bp_target_info *bp_tgt)
4576 {
4577 int retval;
4578
4579 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4580
4581 fprintf_unfiltered (gdb_stdlog,
4582 "target_insert_breakpoint (%s, xxx) = %ld\n",
4583 core_addr_to_string (bp_tgt->placed_address),
4584 (unsigned long) retval);
4585 return retval;
4586 }
4587
4588 static int
4589 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4590 struct bp_target_info *bp_tgt)
4591 {
4592 int retval;
4593
4594 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4595
4596 fprintf_unfiltered (gdb_stdlog,
4597 "target_remove_breakpoint (%s, xxx) = %ld\n",
4598 core_addr_to_string (bp_tgt->placed_address),
4599 (unsigned long) retval);
4600 return retval;
4601 }
4602
4603 static int
4604 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4605 int type, int cnt, int from_tty)
4606 {
4607 int retval;
4608
4609 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4610 type, cnt, from_tty);
4611
4612 fprintf_unfiltered (gdb_stdlog,
4613 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4614 (unsigned long) type,
4615 (unsigned long) cnt,
4616 (unsigned long) from_tty,
4617 (unsigned long) retval);
4618 return retval;
4619 }
4620
4621 static int
4622 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4623 CORE_ADDR addr, int len)
4624 {
4625 CORE_ADDR retval;
4626
4627 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4628 addr, len);
4629
4630 fprintf_unfiltered (gdb_stdlog,
4631 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4632 core_addr_to_string (addr), (unsigned long) len,
4633 core_addr_to_string (retval));
4634 return retval;
4635 }
4636
4637 static int
4638 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
4639 CORE_ADDR addr, int len, int rw,
4640 struct expression *cond)
4641 {
4642 int retval;
4643
4644 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
4645 addr, len,
4646 rw, cond);
4647
4648 fprintf_unfiltered (gdb_stdlog,
4649 "target_can_accel_watchpoint_condition "
4650 "(%s, %d, %d, %s) = %ld\n",
4651 core_addr_to_string (addr), len, rw,
4652 host_address_to_string (cond), (unsigned long) retval);
4653 return retval;
4654 }
4655
4656 static int
4657 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4658 {
4659 int retval;
4660
4661 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4662
4663 fprintf_unfiltered (gdb_stdlog,
4664 "target_stopped_by_watchpoint () = %ld\n",
4665 (unsigned long) retval);
4666 return retval;
4667 }
4668
4669 static int
4670 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4671 {
4672 int retval;
4673
4674 retval = debug_target.to_stopped_data_address (target, addr);
4675
4676 fprintf_unfiltered (gdb_stdlog,
4677 "target_stopped_data_address ([%s]) = %ld\n",
4678 core_addr_to_string (*addr),
4679 (unsigned long)retval);
4680 return retval;
4681 }
4682
4683 static int
4684 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4685 CORE_ADDR addr,
4686 CORE_ADDR start, int length)
4687 {
4688 int retval;
4689
4690 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4691 start, length);
4692
4693 fprintf_filtered (gdb_stdlog,
4694 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4695 core_addr_to_string (addr), core_addr_to_string (start),
4696 length, retval);
4697 return retval;
4698 }
4699
4700 static int
4701 debug_to_insert_hw_breakpoint (struct target_ops *self,
4702 struct gdbarch *gdbarch,
4703 struct bp_target_info *bp_tgt)
4704 {
4705 int retval;
4706
4707 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4708 gdbarch, bp_tgt);
4709
4710 fprintf_unfiltered (gdb_stdlog,
4711 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4712 core_addr_to_string (bp_tgt->placed_address),
4713 (unsigned long) retval);
4714 return retval;
4715 }
4716
4717 static int
4718 debug_to_remove_hw_breakpoint (struct target_ops *self,
4719 struct gdbarch *gdbarch,
4720 struct bp_target_info *bp_tgt)
4721 {
4722 int retval;
4723
4724 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4725 gdbarch, bp_tgt);
4726
4727 fprintf_unfiltered (gdb_stdlog,
4728 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4729 core_addr_to_string (bp_tgt->placed_address),
4730 (unsigned long) retval);
4731 return retval;
4732 }
4733
4734 static int
4735 debug_to_insert_watchpoint (struct target_ops *self,
4736 CORE_ADDR addr, int len, int type,
4737 struct expression *cond)
4738 {
4739 int retval;
4740
4741 retval = debug_target.to_insert_watchpoint (&debug_target,
4742 addr, len, type, cond);
4743
4744 fprintf_unfiltered (gdb_stdlog,
4745 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4746 core_addr_to_string (addr), len, type,
4747 host_address_to_string (cond), (unsigned long) retval);
4748 return retval;
4749 }
4750
4751 static int
4752 debug_to_remove_watchpoint (struct target_ops *self,
4753 CORE_ADDR addr, int len, int type,
4754 struct expression *cond)
4755 {
4756 int retval;
4757
4758 retval = debug_target.to_remove_watchpoint (&debug_target,
4759 addr, len, type, cond);
4760
4761 fprintf_unfiltered (gdb_stdlog,
4762 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4763 core_addr_to_string (addr), len, type,
4764 host_address_to_string (cond), (unsigned long) retval);
4765 return retval;
4766 }
4767
4768 static void
4769 debug_to_terminal_init (struct target_ops *self)
4770 {
4771 debug_target.to_terminal_init (&debug_target);
4772
4773 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4774 }
4775
4776 static void
4777 debug_to_terminal_inferior (struct target_ops *self)
4778 {
4779 debug_target.to_terminal_inferior (&debug_target);
4780
4781 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4782 }
4783
4784 static void
4785 debug_to_terminal_ours_for_output (struct target_ops *self)
4786 {
4787 debug_target.to_terminal_ours_for_output (&debug_target);
4788
4789 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4790 }
4791
4792 static void
4793 debug_to_terminal_ours (struct target_ops *self)
4794 {
4795 debug_target.to_terminal_ours (&debug_target);
4796
4797 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4798 }
4799
4800 static void
4801 debug_to_terminal_save_ours (struct target_ops *self)
4802 {
4803 debug_target.to_terminal_save_ours (&debug_target);
4804
4805 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4806 }
4807
4808 static void
4809 debug_to_terminal_info (struct target_ops *self,
4810 const char *arg, int from_tty)
4811 {
4812 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4813
4814 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4815 from_tty);
4816 }
4817
4818 static void
4819 debug_to_load (struct target_ops *self, char *args, int from_tty)
4820 {
4821 debug_target.to_load (&debug_target, args, from_tty);
4822
4823 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4824 }
4825
4826 static void
4827 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4828 {
4829 debug_target.to_post_startup_inferior (&debug_target, ptid);
4830
4831 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4832 ptid_get_pid (ptid));
4833 }
4834
4835 static int
4836 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4837 {
4838 int retval;
4839
4840 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4841
4842 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4843 pid, retval);
4844
4845 return retval;
4846 }
4847
4848 static int
4849 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4850 {
4851 int retval;
4852
4853 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4854
4855 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4856 pid, retval);
4857
4858 return retval;
4859 }
4860
4861 static int
4862 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4863 {
4864 int retval;
4865
4866 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4867
4868 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4869 pid, retval);
4870
4871 return retval;
4872 }
4873
4874 static int
4875 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4876 {
4877 int retval;
4878
4879 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4880
4881 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4882 pid, retval);
4883
4884 return retval;
4885 }
4886
4887 static int
4888 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4889 {
4890 int retval;
4891
4892 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4893
4894 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4895 pid, retval);
4896
4897 return retval;
4898 }
4899
4900 static int
4901 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4902 {
4903 int retval;
4904
4905 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4906
4907 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4908 pid, retval);
4909
4910 return retval;
4911 }
4912
4913 static int
4914 debug_to_has_exited (struct target_ops *self,
4915 int pid, int wait_status, int *exit_status)
4916 {
4917 int has_exited;
4918
4919 has_exited = debug_target.to_has_exited (&debug_target,
4920 pid, wait_status, exit_status);
4921
4922 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4923 pid, wait_status, *exit_status, has_exited);
4924
4925 return has_exited;
4926 }
4927
4928 static int
4929 debug_to_can_run (struct target_ops *self)
4930 {
4931 int retval;
4932
4933 retval = debug_target.to_can_run (&debug_target);
4934
4935 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4936
4937 return retval;
4938 }
4939
4940 static struct gdbarch *
4941 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4942 {
4943 struct gdbarch *retval;
4944
4945 retval = debug_target.to_thread_architecture (ops, ptid);
4946
4947 fprintf_unfiltered (gdb_stdlog,
4948 "target_thread_architecture (%s) = %s [%s]\n",
4949 target_pid_to_str (ptid),
4950 host_address_to_string (retval),
4951 gdbarch_bfd_arch_info (retval)->printable_name);
4952 return retval;
4953 }
4954
4955 static void
4956 debug_to_stop (struct target_ops *self, ptid_t ptid)
4957 {
4958 debug_target.to_stop (&debug_target, ptid);
4959
4960 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4961 target_pid_to_str (ptid));
4962 }
4963
4964 static void
4965 debug_to_rcmd (struct target_ops *self, char *command,
4966 struct ui_file *outbuf)
4967 {
4968 debug_target.to_rcmd (&debug_target, command, outbuf);
4969 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4970 }
4971
4972 static char *
4973 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
4974 {
4975 char *exec_file;
4976
4977 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
4978
4979 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4980 pid, exec_file);
4981
4982 return exec_file;
4983 }
4984
4985 static void
4986 setup_target_debug (void)
4987 {
4988 memcpy (&debug_target, &current_target, sizeof debug_target);
4989
4990 current_target.to_open = debug_to_open;
4991 current_target.to_post_attach = debug_to_post_attach;
4992 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4993 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4994 current_target.to_files_info = debug_to_files_info;
4995 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4996 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4997 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4998 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4999 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
5000 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
5001 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
5002 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
5003 current_target.to_stopped_data_address = debug_to_stopped_data_address;
5004 current_target.to_watchpoint_addr_within_range
5005 = debug_to_watchpoint_addr_within_range;
5006 current_target.to_region_ok_for_hw_watchpoint
5007 = debug_to_region_ok_for_hw_watchpoint;
5008 current_target.to_can_accel_watchpoint_condition
5009 = debug_to_can_accel_watchpoint_condition;
5010 current_target.to_terminal_init = debug_to_terminal_init;
5011 current_target.to_terminal_inferior = debug_to_terminal_inferior;
5012 current_target.to_terminal_ours_for_output
5013 = debug_to_terminal_ours_for_output;
5014 current_target.to_terminal_ours = debug_to_terminal_ours;
5015 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
5016 current_target.to_terminal_info = debug_to_terminal_info;
5017 current_target.to_load = debug_to_load;
5018 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
5019 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
5020 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
5021 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
5022 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
5023 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
5024 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
5025 current_target.to_has_exited = debug_to_has_exited;
5026 current_target.to_can_run = debug_to_can_run;
5027 current_target.to_stop = debug_to_stop;
5028 current_target.to_rcmd = debug_to_rcmd;
5029 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
5030 current_target.to_thread_architecture = debug_to_thread_architecture;
5031 }
5032 \f
5033
5034 static char targ_desc[] =
5035 "Names of targets and files being debugged.\nShows the entire \
5036 stack of targets currently in use (including the exec-file,\n\
5037 core-file, and process, if any), as well as the symbol file name.";
5038
5039 static void
5040 default_rcmd (struct target_ops *self, char *command, struct ui_file *output)
5041 {
5042 error (_("\"monitor\" command not supported by this target."));
5043 }
5044
5045 static void
5046 do_monitor_command (char *cmd,
5047 int from_tty)
5048 {
5049 target_rcmd (cmd, gdb_stdtarg);
5050 }
5051
5052 /* Print the name of each layers of our target stack. */
5053
5054 static void
5055 maintenance_print_target_stack (char *cmd, int from_tty)
5056 {
5057 struct target_ops *t;
5058
5059 printf_filtered (_("The current target stack is:\n"));
5060
5061 for (t = target_stack; t != NULL; t = t->beneath)
5062 {
5063 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
5064 }
5065 }
5066
5067 /* Controls if async mode is permitted. */
5068 int target_async_permitted = 0;
5069
5070 /* The set command writes to this variable. If the inferior is
5071 executing, target_async_permitted is *not* updated. */
5072 static int target_async_permitted_1 = 0;
5073
5074 static void
5075 set_target_async_command (char *args, int from_tty,
5076 struct cmd_list_element *c)
5077 {
5078 if (have_live_inferiors ())
5079 {
5080 target_async_permitted_1 = target_async_permitted;
5081 error (_("Cannot change this setting while the inferior is running."));
5082 }
5083
5084 target_async_permitted = target_async_permitted_1;
5085 }
5086
5087 static void
5088 show_target_async_command (struct ui_file *file, int from_tty,
5089 struct cmd_list_element *c,
5090 const char *value)
5091 {
5092 fprintf_filtered (file,
5093 _("Controlling the inferior in "
5094 "asynchronous mode is %s.\n"), value);
5095 }
5096
5097 /* Temporary copies of permission settings. */
5098
5099 static int may_write_registers_1 = 1;
5100 static int may_write_memory_1 = 1;
5101 static int may_insert_breakpoints_1 = 1;
5102 static int may_insert_tracepoints_1 = 1;
5103 static int may_insert_fast_tracepoints_1 = 1;
5104 static int may_stop_1 = 1;
5105
5106 /* Make the user-set values match the real values again. */
5107
5108 void
5109 update_target_permissions (void)
5110 {
5111 may_write_registers_1 = may_write_registers;
5112 may_write_memory_1 = may_write_memory;
5113 may_insert_breakpoints_1 = may_insert_breakpoints;
5114 may_insert_tracepoints_1 = may_insert_tracepoints;
5115 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
5116 may_stop_1 = may_stop;
5117 }
5118
5119 /* The one function handles (most of) the permission flags in the same
5120 way. */
5121
5122 static void
5123 set_target_permissions (char *args, int from_tty,
5124 struct cmd_list_element *c)
5125 {
5126 if (target_has_execution)
5127 {
5128 update_target_permissions ();
5129 error (_("Cannot change this setting while the inferior is running."));
5130 }
5131
5132 /* Make the real values match the user-changed values. */
5133 may_write_registers = may_write_registers_1;
5134 may_insert_breakpoints = may_insert_breakpoints_1;
5135 may_insert_tracepoints = may_insert_tracepoints_1;
5136 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5137 may_stop = may_stop_1;
5138 update_observer_mode ();
5139 }
5140
5141 /* Set memory write permission independently of observer mode. */
5142
5143 static void
5144 set_write_memory_permission (char *args, int from_tty,
5145 struct cmd_list_element *c)
5146 {
5147 /* Make the real values match the user-changed values. */
5148 may_write_memory = may_write_memory_1;
5149 update_observer_mode ();
5150 }
5151
5152
5153 void
5154 initialize_targets (void)
5155 {
5156 init_dummy_target ();
5157 push_target (&dummy_target);
5158
5159 add_info ("target", target_info, targ_desc);
5160 add_info ("files", target_info, targ_desc);
5161
5162 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5163 Set target debugging."), _("\
5164 Show target debugging."), _("\
5165 When non-zero, target debugging is enabled. Higher numbers are more\n\
5166 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5167 command."),
5168 NULL,
5169 show_targetdebug,
5170 &setdebuglist, &showdebuglist);
5171
5172 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5173 &trust_readonly, _("\
5174 Set mode for reading from readonly sections."), _("\
5175 Show mode for reading from readonly sections."), _("\
5176 When this mode is on, memory reads from readonly sections (such as .text)\n\
5177 will be read from the object file instead of from the target. This will\n\
5178 result in significant performance improvement for remote targets."),
5179 NULL,
5180 show_trust_readonly,
5181 &setlist, &showlist);
5182
5183 add_com ("monitor", class_obscure, do_monitor_command,
5184 _("Send a command to the remote monitor (remote targets only)."));
5185
5186 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5187 _("Print the name of each layer of the internal target stack."),
5188 &maintenanceprintlist);
5189
5190 add_setshow_boolean_cmd ("target-async", no_class,
5191 &target_async_permitted_1, _("\
5192 Set whether gdb controls the inferior in asynchronous mode."), _("\
5193 Show whether gdb controls the inferior in asynchronous mode."), _("\
5194 Tells gdb whether to control the inferior in asynchronous mode."),
5195 set_target_async_command,
5196 show_target_async_command,
5197 &setlist,
5198 &showlist);
5199
5200 add_setshow_boolean_cmd ("may-write-registers", class_support,
5201 &may_write_registers_1, _("\
5202 Set permission to write into registers."), _("\
5203 Show permission to write into registers."), _("\
5204 When this permission is on, GDB may write into the target's registers.\n\
5205 Otherwise, any sort of write attempt will result in an error."),
5206 set_target_permissions, NULL,
5207 &setlist, &showlist);
5208
5209 add_setshow_boolean_cmd ("may-write-memory", class_support,
5210 &may_write_memory_1, _("\
5211 Set permission to write into target memory."), _("\
5212 Show permission to write into target memory."), _("\
5213 When this permission is on, GDB may write into the target's memory.\n\
5214 Otherwise, any sort of write attempt will result in an error."),
5215 set_write_memory_permission, NULL,
5216 &setlist, &showlist);
5217
5218 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5219 &may_insert_breakpoints_1, _("\
5220 Set permission to insert breakpoints in the target."), _("\
5221 Show permission to insert breakpoints in the target."), _("\
5222 When this permission is on, GDB may insert breakpoints in the program.\n\
5223 Otherwise, any sort of insertion attempt will result in an error."),
5224 set_target_permissions, NULL,
5225 &setlist, &showlist);
5226
5227 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5228 &may_insert_tracepoints_1, _("\
5229 Set permission to insert tracepoints in the target."), _("\
5230 Show permission to insert tracepoints in the target."), _("\
5231 When this permission is on, GDB may insert tracepoints in the program.\n\
5232 Otherwise, any sort of insertion attempt will result in an error."),
5233 set_target_permissions, NULL,
5234 &setlist, &showlist);
5235
5236 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5237 &may_insert_fast_tracepoints_1, _("\
5238 Set permission to insert fast tracepoints in the target."), _("\
5239 Show permission to insert fast tracepoints in the target."), _("\
5240 When this permission is on, GDB may insert fast tracepoints.\n\
5241 Otherwise, any sort of insertion attempt will result in an error."),
5242 set_target_permissions, NULL,
5243 &setlist, &showlist);
5244
5245 add_setshow_boolean_cmd ("may-interrupt", class_support,
5246 &may_stop_1, _("\
5247 Set permission to interrupt or signal the target."), _("\
5248 Show permission to interrupt or signal the target."), _("\
5249 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5250 Otherwise, any attempt to interrupt or stop will be ignored."),
5251 set_target_permissions, NULL,
5252 &setlist, &showlist);
5253 }