]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/target.c
53f3783339d771ac53e0b72607ff86aa76d70b48
[thirdparty/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
56
57 static void tcomplain (void) ATTRIBUTE_NORETURN;
58
59 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
60
61 static int return_zero (void);
62
63 static int return_one (void);
64
65 static int return_minus_one (void);
66
67 static void *return_null (void);
68
69 void target_ignore (void);
70
71 static void target_command (char *, int);
72
73 static struct target_ops *find_default_run_target (char *);
74
75 static target_xfer_partial_ftype default_xfer_partial;
76
77 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
78 ptid_t ptid);
79
80 static int find_default_can_async_p (struct target_ops *ignore);
81
82 static int find_default_is_async_p (struct target_ops *ignore);
83
84 #include "target-delegates.c"
85
86 static void init_dummy_target (void);
87
88 static struct target_ops debug_target;
89
90 static void debug_to_open (char *, int);
91
92 static void debug_to_prepare_to_store (struct target_ops *self,
93 struct regcache *);
94
95 static void debug_to_files_info (struct target_ops *);
96
97 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
98 struct bp_target_info *);
99
100 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
101 struct bp_target_info *);
102
103 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
104 int, int, int);
105
106 static int debug_to_insert_hw_breakpoint (struct gdbarch *,
107 struct bp_target_info *);
108
109 static int debug_to_remove_hw_breakpoint (struct gdbarch *,
110 struct bp_target_info *);
111
112 static int debug_to_insert_watchpoint (CORE_ADDR, int, int,
113 struct expression *);
114
115 static int debug_to_remove_watchpoint (CORE_ADDR, int, int,
116 struct expression *);
117
118 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
119
120 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
121 CORE_ADDR, CORE_ADDR, int);
122
123 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
124
125 static int debug_to_can_accel_watchpoint_condition (CORE_ADDR, int, int,
126 struct expression *);
127
128 static void debug_to_terminal_init (void);
129
130 static void debug_to_terminal_inferior (void);
131
132 static void debug_to_terminal_ours_for_output (void);
133
134 static void debug_to_terminal_save_ours (void);
135
136 static void debug_to_terminal_ours (void);
137
138 static void debug_to_load (char *, int);
139
140 static int debug_to_can_run (void);
141
142 static void debug_to_stop (ptid_t);
143
144 /* Pointer to array of target architecture structures; the size of the
145 array; the current index into the array; the allocated size of the
146 array. */
147 struct target_ops **target_structs;
148 unsigned target_struct_size;
149 unsigned target_struct_allocsize;
150 #define DEFAULT_ALLOCSIZE 10
151
152 /* The initial current target, so that there is always a semi-valid
153 current target. */
154
155 static struct target_ops dummy_target;
156
157 /* Top of target stack. */
158
159 static struct target_ops *target_stack;
160
161 /* The target structure we are currently using to talk to a process
162 or file or whatever "inferior" we have. */
163
164 struct target_ops current_target;
165
166 /* Command list for target. */
167
168 static struct cmd_list_element *targetlist = NULL;
169
170 /* Nonzero if we should trust readonly sections from the
171 executable when reading memory. */
172
173 static int trust_readonly = 0;
174
175 /* Nonzero if we should show true memory content including
176 memory breakpoint inserted by gdb. */
177
178 static int show_memory_breakpoints = 0;
179
180 /* These globals control whether GDB attempts to perform these
181 operations; they are useful for targets that need to prevent
182 inadvertant disruption, such as in non-stop mode. */
183
184 int may_write_registers = 1;
185
186 int may_write_memory = 1;
187
188 int may_insert_breakpoints = 1;
189
190 int may_insert_tracepoints = 1;
191
192 int may_insert_fast_tracepoints = 1;
193
194 int may_stop = 1;
195
196 /* Non-zero if we want to see trace of target level stuff. */
197
198 static unsigned int targetdebug = 0;
199 static void
200 show_targetdebug (struct ui_file *file, int from_tty,
201 struct cmd_list_element *c, const char *value)
202 {
203 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
204 }
205
206 static void setup_target_debug (void);
207
208 /* The user just typed 'target' without the name of a target. */
209
210 static void
211 target_command (char *arg, int from_tty)
212 {
213 fputs_filtered ("Argument required (target name). Try `help target'\n",
214 gdb_stdout);
215 }
216
217 /* Default target_has_* methods for process_stratum targets. */
218
219 int
220 default_child_has_all_memory (struct target_ops *ops)
221 {
222 /* If no inferior selected, then we can't read memory here. */
223 if (ptid_equal (inferior_ptid, null_ptid))
224 return 0;
225
226 return 1;
227 }
228
229 int
230 default_child_has_memory (struct target_ops *ops)
231 {
232 /* If no inferior selected, then we can't read memory here. */
233 if (ptid_equal (inferior_ptid, null_ptid))
234 return 0;
235
236 return 1;
237 }
238
239 int
240 default_child_has_stack (struct target_ops *ops)
241 {
242 /* If no inferior selected, there's no stack. */
243 if (ptid_equal (inferior_ptid, null_ptid))
244 return 0;
245
246 return 1;
247 }
248
249 int
250 default_child_has_registers (struct target_ops *ops)
251 {
252 /* Can't read registers from no inferior. */
253 if (ptid_equal (inferior_ptid, null_ptid))
254 return 0;
255
256 return 1;
257 }
258
259 int
260 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
261 {
262 /* If there's no thread selected, then we can't make it run through
263 hoops. */
264 if (ptid_equal (the_ptid, null_ptid))
265 return 0;
266
267 return 1;
268 }
269
270
271 int
272 target_has_all_memory_1 (void)
273 {
274 struct target_ops *t;
275
276 for (t = current_target.beneath; t != NULL; t = t->beneath)
277 if (t->to_has_all_memory (t))
278 return 1;
279
280 return 0;
281 }
282
283 int
284 target_has_memory_1 (void)
285 {
286 struct target_ops *t;
287
288 for (t = current_target.beneath; t != NULL; t = t->beneath)
289 if (t->to_has_memory (t))
290 return 1;
291
292 return 0;
293 }
294
295 int
296 target_has_stack_1 (void)
297 {
298 struct target_ops *t;
299
300 for (t = current_target.beneath; t != NULL; t = t->beneath)
301 if (t->to_has_stack (t))
302 return 1;
303
304 return 0;
305 }
306
307 int
308 target_has_registers_1 (void)
309 {
310 struct target_ops *t;
311
312 for (t = current_target.beneath; t != NULL; t = t->beneath)
313 if (t->to_has_registers (t))
314 return 1;
315
316 return 0;
317 }
318
319 int
320 target_has_execution_1 (ptid_t the_ptid)
321 {
322 struct target_ops *t;
323
324 for (t = current_target.beneath; t != NULL; t = t->beneath)
325 if (t->to_has_execution (t, the_ptid))
326 return 1;
327
328 return 0;
329 }
330
331 int
332 target_has_execution_current (void)
333 {
334 return target_has_execution_1 (inferior_ptid);
335 }
336
337 /* Complete initialization of T. This ensures that various fields in
338 T are set, if needed by the target implementation. */
339
340 void
341 complete_target_initialization (struct target_ops *t)
342 {
343 /* Provide default values for all "must have" methods. */
344 if (t->to_xfer_partial == NULL)
345 t->to_xfer_partial = default_xfer_partial;
346
347 if (t->to_has_all_memory == NULL)
348 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
349
350 if (t->to_has_memory == NULL)
351 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
352
353 if (t->to_has_stack == NULL)
354 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
355
356 if (t->to_has_registers == NULL)
357 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
358
359 if (t->to_has_execution == NULL)
360 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
361
362 install_delegators (t);
363 }
364
365 /* Add possible target architecture T to the list and add a new
366 command 'target T->to_shortname'. Set COMPLETER as the command's
367 completer if not NULL. */
368
369 void
370 add_target_with_completer (struct target_ops *t,
371 completer_ftype *completer)
372 {
373 struct cmd_list_element *c;
374
375 complete_target_initialization (t);
376
377 if (!target_structs)
378 {
379 target_struct_allocsize = DEFAULT_ALLOCSIZE;
380 target_structs = (struct target_ops **) xmalloc
381 (target_struct_allocsize * sizeof (*target_structs));
382 }
383 if (target_struct_size >= target_struct_allocsize)
384 {
385 target_struct_allocsize *= 2;
386 target_structs = (struct target_ops **)
387 xrealloc ((char *) target_structs,
388 target_struct_allocsize * sizeof (*target_structs));
389 }
390 target_structs[target_struct_size++] = t;
391
392 if (targetlist == NULL)
393 add_prefix_cmd ("target", class_run, target_command, _("\
394 Connect to a target machine or process.\n\
395 The first argument is the type or protocol of the target machine.\n\
396 Remaining arguments are interpreted by the target protocol. For more\n\
397 information on the arguments for a particular protocol, type\n\
398 `help target ' followed by the protocol name."),
399 &targetlist, "target ", 0, &cmdlist);
400 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
401 &targetlist);
402 if (completer != NULL)
403 set_cmd_completer (c, completer);
404 }
405
406 /* Add a possible target architecture to the list. */
407
408 void
409 add_target (struct target_ops *t)
410 {
411 add_target_with_completer (t, NULL);
412 }
413
414 /* See target.h. */
415
416 void
417 add_deprecated_target_alias (struct target_ops *t, char *alias)
418 {
419 struct cmd_list_element *c;
420 char *alt;
421
422 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
423 see PR cli/15104. */
424 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
425 alt = xstrprintf ("target %s", t->to_shortname);
426 deprecate_cmd (c, alt);
427 }
428
429 /* Stub functions */
430
431 void
432 target_ignore (void)
433 {
434 }
435
436 void
437 target_kill (void)
438 {
439 struct target_ops *t;
440
441 for (t = current_target.beneath; t != NULL; t = t->beneath)
442 if (t->to_kill != NULL)
443 {
444 if (targetdebug)
445 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
446
447 t->to_kill (t);
448 return;
449 }
450
451 noprocess ();
452 }
453
454 void
455 target_load (char *arg, int from_tty)
456 {
457 target_dcache_invalidate ();
458 (*current_target.to_load) (arg, from_tty);
459 }
460
461 void
462 target_create_inferior (char *exec_file, char *args,
463 char **env, int from_tty)
464 {
465 struct target_ops *t;
466
467 for (t = current_target.beneath; t != NULL; t = t->beneath)
468 {
469 if (t->to_create_inferior != NULL)
470 {
471 t->to_create_inferior (t, exec_file, args, env, from_tty);
472 if (targetdebug)
473 fprintf_unfiltered (gdb_stdlog,
474 "target_create_inferior (%s, %s, xxx, %d)\n",
475 exec_file, args, from_tty);
476 return;
477 }
478 }
479
480 internal_error (__FILE__, __LINE__,
481 _("could not find a target to create inferior"));
482 }
483
484 void
485 target_terminal_inferior (void)
486 {
487 /* A background resume (``run&'') should leave GDB in control of the
488 terminal. Use target_can_async_p, not target_is_async_p, since at
489 this point the target is not async yet. However, if sync_execution
490 is not set, we know it will become async prior to resume. */
491 if (target_can_async_p () && !sync_execution)
492 return;
493
494 /* If GDB is resuming the inferior in the foreground, install
495 inferior's terminal modes. */
496 (*current_target.to_terminal_inferior) ();
497 }
498
499 static int
500 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
501 struct target_ops *t)
502 {
503 errno = EIO; /* Can't read/write this location. */
504 return 0; /* No bytes handled. */
505 }
506
507 static void
508 tcomplain (void)
509 {
510 error (_("You can't do that when your target is `%s'"),
511 current_target.to_shortname);
512 }
513
514 void
515 noprocess (void)
516 {
517 error (_("You can't do that without a process to debug."));
518 }
519
520 static void
521 default_terminal_info (const char *args, int from_tty)
522 {
523 printf_unfiltered (_("No saved terminal information.\n"));
524 }
525
526 /* A default implementation for the to_get_ada_task_ptid target method.
527
528 This function builds the PTID by using both LWP and TID as part of
529 the PTID lwp and tid elements. The pid used is the pid of the
530 inferior_ptid. */
531
532 static ptid_t
533 default_get_ada_task_ptid (long lwp, long tid)
534 {
535 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
536 }
537
538 static enum exec_direction_kind
539 default_execution_direction (void)
540 {
541 if (!target_can_execute_reverse)
542 return EXEC_FORWARD;
543 else if (!target_can_async_p ())
544 return EXEC_FORWARD;
545 else
546 gdb_assert_not_reached ("\
547 to_execution_direction must be implemented for reverse async");
548 }
549
550 /* Go through the target stack from top to bottom, copying over zero
551 entries in current_target, then filling in still empty entries. In
552 effect, we are doing class inheritance through the pushed target
553 vectors.
554
555 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
556 is currently implemented, is that it discards any knowledge of
557 which target an inherited method originally belonged to.
558 Consequently, new new target methods should instead explicitly and
559 locally search the target stack for the target that can handle the
560 request. */
561
562 static void
563 update_current_target (void)
564 {
565 struct target_ops *t;
566
567 /* First, reset current's contents. */
568 memset (&current_target, 0, sizeof (current_target));
569
570 /* Install the delegators. */
571 install_delegators (&current_target);
572
573 #define INHERIT(FIELD, TARGET) \
574 if (!current_target.FIELD) \
575 current_target.FIELD = (TARGET)->FIELD
576
577 for (t = target_stack; t; t = t->beneath)
578 {
579 INHERIT (to_shortname, t);
580 INHERIT (to_longname, t);
581 INHERIT (to_doc, t);
582 /* Do not inherit to_open. */
583 /* Do not inherit to_close. */
584 /* Do not inherit to_attach. */
585 INHERIT (to_post_attach, t);
586 INHERIT (to_attach_no_wait, t);
587 /* Do not inherit to_detach. */
588 /* Do not inherit to_disconnect. */
589 /* Do not inherit to_resume. */
590 /* Do not inherit to_wait. */
591 /* Do not inherit to_fetch_registers. */
592 /* Do not inherit to_store_registers. */
593 INHERIT (to_prepare_to_store, t);
594 INHERIT (deprecated_xfer_memory, t);
595 INHERIT (to_files_info, t);
596 /* Do not inherit to_insert_breakpoint. */
597 /* Do not inherit to_remove_breakpoint. */
598 INHERIT (to_can_use_hw_breakpoint, t);
599 INHERIT (to_insert_hw_breakpoint, t);
600 INHERIT (to_remove_hw_breakpoint, t);
601 /* Do not inherit to_ranged_break_num_registers. */
602 INHERIT (to_insert_watchpoint, t);
603 INHERIT (to_remove_watchpoint, t);
604 /* Do not inherit to_insert_mask_watchpoint. */
605 /* Do not inherit to_remove_mask_watchpoint. */
606 /* Do not inherit to_stopped_data_address. */
607 INHERIT (to_have_steppable_watchpoint, t);
608 INHERIT (to_have_continuable_watchpoint, t);
609 /* Do not inherit to_stopped_by_watchpoint. */
610 INHERIT (to_watchpoint_addr_within_range, t);
611 INHERIT (to_region_ok_for_hw_watchpoint, t);
612 INHERIT (to_can_accel_watchpoint_condition, t);
613 /* Do not inherit to_masked_watch_num_registers. */
614 INHERIT (to_terminal_init, t);
615 INHERIT (to_terminal_inferior, t);
616 INHERIT (to_terminal_ours_for_output, t);
617 INHERIT (to_terminal_ours, t);
618 INHERIT (to_terminal_save_ours, t);
619 INHERIT (to_terminal_info, t);
620 /* Do not inherit to_kill. */
621 INHERIT (to_load, t);
622 /* Do no inherit to_create_inferior. */
623 INHERIT (to_post_startup_inferior, t);
624 INHERIT (to_insert_fork_catchpoint, t);
625 INHERIT (to_remove_fork_catchpoint, t);
626 INHERIT (to_insert_vfork_catchpoint, t);
627 INHERIT (to_remove_vfork_catchpoint, t);
628 /* Do not inherit to_follow_fork. */
629 INHERIT (to_insert_exec_catchpoint, t);
630 INHERIT (to_remove_exec_catchpoint, t);
631 INHERIT (to_set_syscall_catchpoint, t);
632 INHERIT (to_has_exited, t);
633 /* Do not inherit to_mourn_inferior. */
634 INHERIT (to_can_run, t);
635 /* Do not inherit to_pass_signals. */
636 /* Do not inherit to_program_signals. */
637 /* Do not inherit to_thread_alive. */
638 /* Do not inherit to_find_new_threads. */
639 /* Do not inherit to_pid_to_str. */
640 INHERIT (to_extra_thread_info, t);
641 INHERIT (to_thread_name, t);
642 INHERIT (to_stop, t);
643 /* Do not inherit to_xfer_partial. */
644 INHERIT (to_rcmd, t);
645 INHERIT (to_pid_to_exec_file, t);
646 INHERIT (to_log_command, t);
647 INHERIT (to_stratum, t);
648 /* Do not inherit to_has_all_memory. */
649 /* Do not inherit to_has_memory. */
650 /* Do not inherit to_has_stack. */
651 /* Do not inherit to_has_registers. */
652 /* Do not inherit to_has_execution. */
653 INHERIT (to_has_thread_control, t);
654 /* Do not inherit to_can_async_p. */
655 /* Do not inherit to_is_async_p. */
656 /* Do not inherit to_async. */
657 INHERIT (to_find_memory_regions, t);
658 INHERIT (to_make_corefile_notes, t);
659 INHERIT (to_get_bookmark, t);
660 INHERIT (to_goto_bookmark, t);
661 /* Do not inherit to_get_thread_local_address. */
662 INHERIT (to_can_execute_reverse, t);
663 INHERIT (to_execution_direction, t);
664 INHERIT (to_thread_architecture, t);
665 /* Do not inherit to_read_description. */
666 INHERIT (to_get_ada_task_ptid, t);
667 /* Do not inherit to_search_memory. */
668 INHERIT (to_supports_multi_process, t);
669 INHERIT (to_supports_enable_disable_tracepoint, t);
670 INHERIT (to_supports_string_tracing, t);
671 INHERIT (to_trace_init, t);
672 INHERIT (to_download_tracepoint, t);
673 INHERIT (to_can_download_tracepoint, t);
674 INHERIT (to_download_trace_state_variable, t);
675 INHERIT (to_enable_tracepoint, t);
676 INHERIT (to_disable_tracepoint, t);
677 INHERIT (to_trace_set_readonly_regions, t);
678 INHERIT (to_trace_start, t);
679 INHERIT (to_get_trace_status, t);
680 INHERIT (to_get_tracepoint_status, t);
681 INHERIT (to_trace_stop, t);
682 INHERIT (to_trace_find, t);
683 INHERIT (to_get_trace_state_variable_value, t);
684 INHERIT (to_save_trace_data, t);
685 INHERIT (to_upload_tracepoints, t);
686 INHERIT (to_upload_trace_state_variables, t);
687 INHERIT (to_get_raw_trace_data, t);
688 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
689 INHERIT (to_set_disconnected_tracing, t);
690 INHERIT (to_set_circular_trace_buffer, t);
691 INHERIT (to_set_trace_buffer_size, t);
692 INHERIT (to_set_trace_notes, t);
693 INHERIT (to_get_tib_address, t);
694 INHERIT (to_set_permissions, t);
695 INHERIT (to_static_tracepoint_marker_at, t);
696 INHERIT (to_static_tracepoint_markers_by_strid, t);
697 INHERIT (to_traceframe_info, t);
698 INHERIT (to_use_agent, t);
699 INHERIT (to_can_use_agent, t);
700 INHERIT (to_augmented_libraries_svr4_read, t);
701 INHERIT (to_magic, t);
702 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
703 INHERIT (to_can_run_breakpoint_commands, t);
704 /* Do not inherit to_memory_map. */
705 /* Do not inherit to_flash_erase. */
706 /* Do not inherit to_flash_done. */
707 }
708 #undef INHERIT
709
710 /* Clean up a target struct so it no longer has any zero pointers in
711 it. Some entries are defaulted to a method that print an error,
712 others are hard-wired to a standard recursive default. */
713
714 #define de_fault(field, value) \
715 if (!current_target.field) \
716 current_target.field = value
717
718 de_fault (to_open,
719 (void (*) (char *, int))
720 tcomplain);
721 de_fault (to_close,
722 (void (*) (struct target_ops *))
723 target_ignore);
724 de_fault (to_post_attach,
725 (void (*) (struct target_ops *, int))
726 target_ignore);
727 de_fault (to_prepare_to_store,
728 (void (*) (struct target_ops *, struct regcache *))
729 noprocess);
730 de_fault (deprecated_xfer_memory,
731 (int (*) (CORE_ADDR, gdb_byte *, int, int,
732 struct mem_attrib *, struct target_ops *))
733 nomemory);
734 de_fault (to_files_info,
735 (void (*) (struct target_ops *))
736 target_ignore);
737 de_fault (to_can_use_hw_breakpoint,
738 (int (*) (struct target_ops *, int, int, int))
739 return_zero);
740 de_fault (to_insert_hw_breakpoint,
741 (int (*) (struct gdbarch *, struct bp_target_info *))
742 return_minus_one);
743 de_fault (to_remove_hw_breakpoint,
744 (int (*) (struct gdbarch *, struct bp_target_info *))
745 return_minus_one);
746 de_fault (to_insert_watchpoint,
747 (int (*) (CORE_ADDR, int, int, struct expression *))
748 return_minus_one);
749 de_fault (to_remove_watchpoint,
750 (int (*) (CORE_ADDR, int, int, struct expression *))
751 return_minus_one);
752 de_fault (to_watchpoint_addr_within_range,
753 default_watchpoint_addr_within_range);
754 de_fault (to_region_ok_for_hw_watchpoint,
755 default_region_ok_for_hw_watchpoint);
756 de_fault (to_can_accel_watchpoint_condition,
757 (int (*) (CORE_ADDR, int, int, struct expression *))
758 return_zero);
759 de_fault (to_terminal_init,
760 (void (*) (void))
761 target_ignore);
762 de_fault (to_terminal_inferior,
763 (void (*) (void))
764 target_ignore);
765 de_fault (to_terminal_ours_for_output,
766 (void (*) (void))
767 target_ignore);
768 de_fault (to_terminal_ours,
769 (void (*) (void))
770 target_ignore);
771 de_fault (to_terminal_save_ours,
772 (void (*) (void))
773 target_ignore);
774 de_fault (to_terminal_info,
775 default_terminal_info);
776 de_fault (to_load,
777 (void (*) (char *, int))
778 tcomplain);
779 de_fault (to_post_startup_inferior,
780 (void (*) (ptid_t))
781 target_ignore);
782 de_fault (to_insert_fork_catchpoint,
783 (int (*) (int))
784 return_one);
785 de_fault (to_remove_fork_catchpoint,
786 (int (*) (int))
787 return_one);
788 de_fault (to_insert_vfork_catchpoint,
789 (int (*) (int))
790 return_one);
791 de_fault (to_remove_vfork_catchpoint,
792 (int (*) (int))
793 return_one);
794 de_fault (to_insert_exec_catchpoint,
795 (int (*) (int))
796 return_one);
797 de_fault (to_remove_exec_catchpoint,
798 (int (*) (int))
799 return_one);
800 de_fault (to_set_syscall_catchpoint,
801 (int (*) (int, int, int, int, int *))
802 return_one);
803 de_fault (to_has_exited,
804 (int (*) (int, int, int *))
805 return_zero);
806 de_fault (to_can_run,
807 return_zero);
808 de_fault (to_extra_thread_info,
809 (char *(*) (struct thread_info *))
810 return_null);
811 de_fault (to_thread_name,
812 (char *(*) (struct thread_info *))
813 return_null);
814 de_fault (to_stop,
815 (void (*) (ptid_t))
816 target_ignore);
817 de_fault (to_rcmd,
818 (void (*) (char *, struct ui_file *))
819 tcomplain);
820 de_fault (to_pid_to_exec_file,
821 (char *(*) (int))
822 return_null);
823 de_fault (to_thread_architecture,
824 default_thread_architecture);
825 current_target.to_read_description = NULL;
826 de_fault (to_get_ada_task_ptid,
827 (ptid_t (*) (long, long))
828 default_get_ada_task_ptid);
829 de_fault (to_supports_multi_process,
830 (int (*) (void))
831 return_zero);
832 de_fault (to_supports_enable_disable_tracepoint,
833 (int (*) (void))
834 return_zero);
835 de_fault (to_supports_string_tracing,
836 (int (*) (void))
837 return_zero);
838 de_fault (to_trace_init,
839 (void (*) (void))
840 tcomplain);
841 de_fault (to_download_tracepoint,
842 (void (*) (struct bp_location *))
843 tcomplain);
844 de_fault (to_can_download_tracepoint,
845 (int (*) (void))
846 return_zero);
847 de_fault (to_download_trace_state_variable,
848 (void (*) (struct trace_state_variable *))
849 tcomplain);
850 de_fault (to_enable_tracepoint,
851 (void (*) (struct bp_location *))
852 tcomplain);
853 de_fault (to_disable_tracepoint,
854 (void (*) (struct bp_location *))
855 tcomplain);
856 de_fault (to_trace_set_readonly_regions,
857 (void (*) (void))
858 tcomplain);
859 de_fault (to_trace_start,
860 (void (*) (void))
861 tcomplain);
862 de_fault (to_get_trace_status,
863 (int (*) (struct trace_status *))
864 return_minus_one);
865 de_fault (to_get_tracepoint_status,
866 (void (*) (struct breakpoint *, struct uploaded_tp *))
867 tcomplain);
868 de_fault (to_trace_stop,
869 (void (*) (void))
870 tcomplain);
871 de_fault (to_trace_find,
872 (int (*) (enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
873 return_minus_one);
874 de_fault (to_get_trace_state_variable_value,
875 (int (*) (int, LONGEST *))
876 return_zero);
877 de_fault (to_save_trace_data,
878 (int (*) (const char *))
879 tcomplain);
880 de_fault (to_upload_tracepoints,
881 (int (*) (struct uploaded_tp **))
882 return_zero);
883 de_fault (to_upload_trace_state_variables,
884 (int (*) (struct uploaded_tsv **))
885 return_zero);
886 de_fault (to_get_raw_trace_data,
887 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
888 tcomplain);
889 de_fault (to_get_min_fast_tracepoint_insn_len,
890 (int (*) (void))
891 return_minus_one);
892 de_fault (to_set_disconnected_tracing,
893 (void (*) (int))
894 target_ignore);
895 de_fault (to_set_circular_trace_buffer,
896 (void (*) (int))
897 target_ignore);
898 de_fault (to_set_trace_buffer_size,
899 (void (*) (LONGEST))
900 target_ignore);
901 de_fault (to_set_trace_notes,
902 (int (*) (const char *, const char *, const char *))
903 return_zero);
904 de_fault (to_get_tib_address,
905 (int (*) (ptid_t, CORE_ADDR *))
906 tcomplain);
907 de_fault (to_set_permissions,
908 (void (*) (void))
909 target_ignore);
910 de_fault (to_static_tracepoint_marker_at,
911 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
912 return_zero);
913 de_fault (to_static_tracepoint_markers_by_strid,
914 (VEC(static_tracepoint_marker_p) * (*) (const char *))
915 tcomplain);
916 de_fault (to_traceframe_info,
917 (struct traceframe_info * (*) (void))
918 return_null);
919 de_fault (to_supports_evaluation_of_breakpoint_conditions,
920 (int (*) (void))
921 return_zero);
922 de_fault (to_can_run_breakpoint_commands,
923 (int (*) (void))
924 return_zero);
925 de_fault (to_use_agent,
926 (int (*) (int))
927 tcomplain);
928 de_fault (to_can_use_agent,
929 (int (*) (void))
930 return_zero);
931 de_fault (to_augmented_libraries_svr4_read,
932 (int (*) (void))
933 return_zero);
934 de_fault (to_execution_direction, default_execution_direction);
935
936 #undef de_fault
937
938 /* Finally, position the target-stack beneath the squashed
939 "current_target". That way code looking for a non-inherited
940 target method can quickly and simply find it. */
941 current_target.beneath = target_stack;
942
943 if (targetdebug)
944 setup_target_debug ();
945 }
946
947 /* Push a new target type into the stack of the existing target accessors,
948 possibly superseding some of the existing accessors.
949
950 Rather than allow an empty stack, we always have the dummy target at
951 the bottom stratum, so we can call the function vectors without
952 checking them. */
953
954 void
955 push_target (struct target_ops *t)
956 {
957 struct target_ops **cur;
958
959 /* Check magic number. If wrong, it probably means someone changed
960 the struct definition, but not all the places that initialize one. */
961 if (t->to_magic != OPS_MAGIC)
962 {
963 fprintf_unfiltered (gdb_stderr,
964 "Magic number of %s target struct wrong\n",
965 t->to_shortname);
966 internal_error (__FILE__, __LINE__,
967 _("failed internal consistency check"));
968 }
969
970 /* Find the proper stratum to install this target in. */
971 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
972 {
973 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
974 break;
975 }
976
977 /* If there's already targets at this stratum, remove them. */
978 /* FIXME: cagney/2003-10-15: I think this should be popping all
979 targets to CUR, and not just those at this stratum level. */
980 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
981 {
982 /* There's already something at this stratum level. Close it,
983 and un-hook it from the stack. */
984 struct target_ops *tmp = (*cur);
985
986 (*cur) = (*cur)->beneath;
987 tmp->beneath = NULL;
988 target_close (tmp);
989 }
990
991 /* We have removed all targets in our stratum, now add the new one. */
992 t->beneath = (*cur);
993 (*cur) = t;
994
995 update_current_target ();
996 }
997
998 /* Remove a target_ops vector from the stack, wherever it may be.
999 Return how many times it was removed (0 or 1). */
1000
1001 int
1002 unpush_target (struct target_ops *t)
1003 {
1004 struct target_ops **cur;
1005 struct target_ops *tmp;
1006
1007 if (t->to_stratum == dummy_stratum)
1008 internal_error (__FILE__, __LINE__,
1009 _("Attempt to unpush the dummy target"));
1010
1011 /* Look for the specified target. Note that we assume that a target
1012 can only occur once in the target stack. */
1013
1014 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1015 {
1016 if ((*cur) == t)
1017 break;
1018 }
1019
1020 /* If we don't find target_ops, quit. Only open targets should be
1021 closed. */
1022 if ((*cur) == NULL)
1023 return 0;
1024
1025 /* Unchain the target. */
1026 tmp = (*cur);
1027 (*cur) = (*cur)->beneath;
1028 tmp->beneath = NULL;
1029
1030 update_current_target ();
1031
1032 /* Finally close the target. Note we do this after unchaining, so
1033 any target method calls from within the target_close
1034 implementation don't end up in T anymore. */
1035 target_close (t);
1036
1037 return 1;
1038 }
1039
1040 void
1041 pop_all_targets_above (enum strata above_stratum)
1042 {
1043 while ((int) (current_target.to_stratum) > (int) above_stratum)
1044 {
1045 if (!unpush_target (target_stack))
1046 {
1047 fprintf_unfiltered (gdb_stderr,
1048 "pop_all_targets couldn't find target %s\n",
1049 target_stack->to_shortname);
1050 internal_error (__FILE__, __LINE__,
1051 _("failed internal consistency check"));
1052 break;
1053 }
1054 }
1055 }
1056
1057 void
1058 pop_all_targets (void)
1059 {
1060 pop_all_targets_above (dummy_stratum);
1061 }
1062
1063 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1064
1065 int
1066 target_is_pushed (struct target_ops *t)
1067 {
1068 struct target_ops **cur;
1069
1070 /* Check magic number. If wrong, it probably means someone changed
1071 the struct definition, but not all the places that initialize one. */
1072 if (t->to_magic != OPS_MAGIC)
1073 {
1074 fprintf_unfiltered (gdb_stderr,
1075 "Magic number of %s target struct wrong\n",
1076 t->to_shortname);
1077 internal_error (__FILE__, __LINE__,
1078 _("failed internal consistency check"));
1079 }
1080
1081 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1082 if (*cur == t)
1083 return 1;
1084
1085 return 0;
1086 }
1087
1088 /* Using the objfile specified in OBJFILE, find the address for the
1089 current thread's thread-local storage with offset OFFSET. */
1090 CORE_ADDR
1091 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1092 {
1093 volatile CORE_ADDR addr = 0;
1094 struct target_ops *target;
1095
1096 for (target = current_target.beneath;
1097 target != NULL;
1098 target = target->beneath)
1099 {
1100 if (target->to_get_thread_local_address != NULL)
1101 break;
1102 }
1103
1104 if (target != NULL
1105 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1106 {
1107 ptid_t ptid = inferior_ptid;
1108 volatile struct gdb_exception ex;
1109
1110 TRY_CATCH (ex, RETURN_MASK_ALL)
1111 {
1112 CORE_ADDR lm_addr;
1113
1114 /* Fetch the load module address for this objfile. */
1115 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1116 objfile);
1117 /* If it's 0, throw the appropriate exception. */
1118 if (lm_addr == 0)
1119 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1120 _("TLS load module not found"));
1121
1122 addr = target->to_get_thread_local_address (target, ptid,
1123 lm_addr, offset);
1124 }
1125 /* If an error occurred, print TLS related messages here. Otherwise,
1126 throw the error to some higher catcher. */
1127 if (ex.reason < 0)
1128 {
1129 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1130
1131 switch (ex.error)
1132 {
1133 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1134 error (_("Cannot find thread-local variables "
1135 "in this thread library."));
1136 break;
1137 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1138 if (objfile_is_library)
1139 error (_("Cannot find shared library `%s' in dynamic"
1140 " linker's load module list"), objfile_name (objfile));
1141 else
1142 error (_("Cannot find executable file `%s' in dynamic"
1143 " linker's load module list"), objfile_name (objfile));
1144 break;
1145 case TLS_NOT_ALLOCATED_YET_ERROR:
1146 if (objfile_is_library)
1147 error (_("The inferior has not yet allocated storage for"
1148 " thread-local variables in\n"
1149 "the shared library `%s'\n"
1150 "for %s"),
1151 objfile_name (objfile), target_pid_to_str (ptid));
1152 else
1153 error (_("The inferior has not yet allocated storage for"
1154 " thread-local variables in\n"
1155 "the executable `%s'\n"
1156 "for %s"),
1157 objfile_name (objfile), target_pid_to_str (ptid));
1158 break;
1159 case TLS_GENERIC_ERROR:
1160 if (objfile_is_library)
1161 error (_("Cannot find thread-local storage for %s, "
1162 "shared library %s:\n%s"),
1163 target_pid_to_str (ptid),
1164 objfile_name (objfile), ex.message);
1165 else
1166 error (_("Cannot find thread-local storage for %s, "
1167 "executable file %s:\n%s"),
1168 target_pid_to_str (ptid),
1169 objfile_name (objfile), ex.message);
1170 break;
1171 default:
1172 throw_exception (ex);
1173 break;
1174 }
1175 }
1176 }
1177 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1178 TLS is an ABI-specific thing. But we don't do that yet. */
1179 else
1180 error (_("Cannot find thread-local variables on this target"));
1181
1182 return addr;
1183 }
1184
1185 const char *
1186 target_xfer_status_to_string (enum target_xfer_status err)
1187 {
1188 #define CASE(X) case X: return #X
1189 switch (err)
1190 {
1191 CASE(TARGET_XFER_E_IO);
1192 CASE(TARGET_XFER_E_UNAVAILABLE);
1193 default:
1194 return "<unknown>";
1195 }
1196 #undef CASE
1197 };
1198
1199
1200 #undef MIN
1201 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1202
1203 /* target_read_string -- read a null terminated string, up to LEN bytes,
1204 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1205 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1206 is responsible for freeing it. Return the number of bytes successfully
1207 read. */
1208
1209 int
1210 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1211 {
1212 int tlen, offset, i;
1213 gdb_byte buf[4];
1214 int errcode = 0;
1215 char *buffer;
1216 int buffer_allocated;
1217 char *bufptr;
1218 unsigned int nbytes_read = 0;
1219
1220 gdb_assert (string);
1221
1222 /* Small for testing. */
1223 buffer_allocated = 4;
1224 buffer = xmalloc (buffer_allocated);
1225 bufptr = buffer;
1226
1227 while (len > 0)
1228 {
1229 tlen = MIN (len, 4 - (memaddr & 3));
1230 offset = memaddr & 3;
1231
1232 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1233 if (errcode != 0)
1234 {
1235 /* The transfer request might have crossed the boundary to an
1236 unallocated region of memory. Retry the transfer, requesting
1237 a single byte. */
1238 tlen = 1;
1239 offset = 0;
1240 errcode = target_read_memory (memaddr, buf, 1);
1241 if (errcode != 0)
1242 goto done;
1243 }
1244
1245 if (bufptr - buffer + tlen > buffer_allocated)
1246 {
1247 unsigned int bytes;
1248
1249 bytes = bufptr - buffer;
1250 buffer_allocated *= 2;
1251 buffer = xrealloc (buffer, buffer_allocated);
1252 bufptr = buffer + bytes;
1253 }
1254
1255 for (i = 0; i < tlen; i++)
1256 {
1257 *bufptr++ = buf[i + offset];
1258 if (buf[i + offset] == '\000')
1259 {
1260 nbytes_read += i + 1;
1261 goto done;
1262 }
1263 }
1264
1265 memaddr += tlen;
1266 len -= tlen;
1267 nbytes_read += tlen;
1268 }
1269 done:
1270 *string = buffer;
1271 if (errnop != NULL)
1272 *errnop = errcode;
1273 return nbytes_read;
1274 }
1275
1276 struct target_section_table *
1277 target_get_section_table (struct target_ops *target)
1278 {
1279 struct target_ops *t;
1280
1281 if (targetdebug)
1282 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1283
1284 for (t = target; t != NULL; t = t->beneath)
1285 if (t->to_get_section_table != NULL)
1286 return (*t->to_get_section_table) (t);
1287
1288 return NULL;
1289 }
1290
1291 /* Find a section containing ADDR. */
1292
1293 struct target_section *
1294 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1295 {
1296 struct target_section_table *table = target_get_section_table (target);
1297 struct target_section *secp;
1298
1299 if (table == NULL)
1300 return NULL;
1301
1302 for (secp = table->sections; secp < table->sections_end; secp++)
1303 {
1304 if (addr >= secp->addr && addr < secp->endaddr)
1305 return secp;
1306 }
1307 return NULL;
1308 }
1309
1310 /* Read memory from the live target, even if currently inspecting a
1311 traceframe. The return is the same as that of target_read. */
1312
1313 static enum target_xfer_status
1314 target_read_live_memory (enum target_object object,
1315 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1316 ULONGEST *xfered_len)
1317 {
1318 enum target_xfer_status ret;
1319 struct cleanup *cleanup;
1320
1321 /* Switch momentarily out of tfind mode so to access live memory.
1322 Note that this must not clear global state, such as the frame
1323 cache, which must still remain valid for the previous traceframe.
1324 We may be _building_ the frame cache at this point. */
1325 cleanup = make_cleanup_restore_traceframe_number ();
1326 set_traceframe_number (-1);
1327
1328 ret = target_xfer_partial (current_target.beneath, object, NULL,
1329 myaddr, NULL, memaddr, len, xfered_len);
1330
1331 do_cleanups (cleanup);
1332 return ret;
1333 }
1334
1335 /* Using the set of read-only target sections of OPS, read live
1336 read-only memory. Note that the actual reads start from the
1337 top-most target again.
1338
1339 For interface/parameters/return description see target.h,
1340 to_xfer_partial. */
1341
1342 static enum target_xfer_status
1343 memory_xfer_live_readonly_partial (struct target_ops *ops,
1344 enum target_object object,
1345 gdb_byte *readbuf, ULONGEST memaddr,
1346 ULONGEST len, ULONGEST *xfered_len)
1347 {
1348 struct target_section *secp;
1349 struct target_section_table *table;
1350
1351 secp = target_section_by_addr (ops, memaddr);
1352 if (secp != NULL
1353 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1354 secp->the_bfd_section)
1355 & SEC_READONLY))
1356 {
1357 struct target_section *p;
1358 ULONGEST memend = memaddr + len;
1359
1360 table = target_get_section_table (ops);
1361
1362 for (p = table->sections; p < table->sections_end; p++)
1363 {
1364 if (memaddr >= p->addr)
1365 {
1366 if (memend <= p->endaddr)
1367 {
1368 /* Entire transfer is within this section. */
1369 return target_read_live_memory (object, memaddr,
1370 readbuf, len, xfered_len);
1371 }
1372 else if (memaddr >= p->endaddr)
1373 {
1374 /* This section ends before the transfer starts. */
1375 continue;
1376 }
1377 else
1378 {
1379 /* This section overlaps the transfer. Just do half. */
1380 len = p->endaddr - memaddr;
1381 return target_read_live_memory (object, memaddr,
1382 readbuf, len, xfered_len);
1383 }
1384 }
1385 }
1386 }
1387
1388 return TARGET_XFER_EOF;
1389 }
1390
1391 /* Read memory from more than one valid target. A core file, for
1392 instance, could have some of memory but delegate other bits to
1393 the target below it. So, we must manually try all targets. */
1394
1395 static enum target_xfer_status
1396 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1397 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1398 ULONGEST *xfered_len)
1399 {
1400 enum target_xfer_status res;
1401
1402 do
1403 {
1404 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1405 readbuf, writebuf, memaddr, len,
1406 xfered_len);
1407 if (res == TARGET_XFER_OK)
1408 break;
1409
1410 /* Stop if the target reports that the memory is not available. */
1411 if (res == TARGET_XFER_E_UNAVAILABLE)
1412 break;
1413
1414 /* We want to continue past core files to executables, but not
1415 past a running target's memory. */
1416 if (ops->to_has_all_memory (ops))
1417 break;
1418
1419 ops = ops->beneath;
1420 }
1421 while (ops != NULL);
1422
1423 return res;
1424 }
1425
1426 /* Perform a partial memory transfer.
1427 For docs see target.h, to_xfer_partial. */
1428
1429 static enum target_xfer_status
1430 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1431 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1432 ULONGEST len, ULONGEST *xfered_len)
1433 {
1434 enum target_xfer_status res;
1435 int reg_len;
1436 struct mem_region *region;
1437 struct inferior *inf;
1438
1439 /* For accesses to unmapped overlay sections, read directly from
1440 files. Must do this first, as MEMADDR may need adjustment. */
1441 if (readbuf != NULL && overlay_debugging)
1442 {
1443 struct obj_section *section = find_pc_overlay (memaddr);
1444
1445 if (pc_in_unmapped_range (memaddr, section))
1446 {
1447 struct target_section_table *table
1448 = target_get_section_table (ops);
1449 const char *section_name = section->the_bfd_section->name;
1450
1451 memaddr = overlay_mapped_address (memaddr, section);
1452 return section_table_xfer_memory_partial (readbuf, writebuf,
1453 memaddr, len, xfered_len,
1454 table->sections,
1455 table->sections_end,
1456 section_name);
1457 }
1458 }
1459
1460 /* Try the executable files, if "trust-readonly-sections" is set. */
1461 if (readbuf != NULL && trust_readonly)
1462 {
1463 struct target_section *secp;
1464 struct target_section_table *table;
1465
1466 secp = target_section_by_addr (ops, memaddr);
1467 if (secp != NULL
1468 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1469 secp->the_bfd_section)
1470 & SEC_READONLY))
1471 {
1472 table = target_get_section_table (ops);
1473 return section_table_xfer_memory_partial (readbuf, writebuf,
1474 memaddr, len, xfered_len,
1475 table->sections,
1476 table->sections_end,
1477 NULL);
1478 }
1479 }
1480
1481 /* If reading unavailable memory in the context of traceframes, and
1482 this address falls within a read-only section, fallback to
1483 reading from live memory. */
1484 if (readbuf != NULL && get_traceframe_number () != -1)
1485 {
1486 VEC(mem_range_s) *available;
1487
1488 /* If we fail to get the set of available memory, then the
1489 target does not support querying traceframe info, and so we
1490 attempt reading from the traceframe anyway (assuming the
1491 target implements the old QTro packet then). */
1492 if (traceframe_available_memory (&available, memaddr, len))
1493 {
1494 struct cleanup *old_chain;
1495
1496 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1497
1498 if (VEC_empty (mem_range_s, available)
1499 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1500 {
1501 /* Don't read into the traceframe's available
1502 memory. */
1503 if (!VEC_empty (mem_range_s, available))
1504 {
1505 LONGEST oldlen = len;
1506
1507 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1508 gdb_assert (len <= oldlen);
1509 }
1510
1511 do_cleanups (old_chain);
1512
1513 /* This goes through the topmost target again. */
1514 res = memory_xfer_live_readonly_partial (ops, object,
1515 readbuf, memaddr,
1516 len, xfered_len);
1517 if (res == TARGET_XFER_OK)
1518 return TARGET_XFER_OK;
1519 else
1520 {
1521 /* No use trying further, we know some memory starting
1522 at MEMADDR isn't available. */
1523 *xfered_len = len;
1524 return TARGET_XFER_E_UNAVAILABLE;
1525 }
1526 }
1527
1528 /* Don't try to read more than how much is available, in
1529 case the target implements the deprecated QTro packet to
1530 cater for older GDBs (the target's knowledge of read-only
1531 sections may be outdated by now). */
1532 len = VEC_index (mem_range_s, available, 0)->length;
1533
1534 do_cleanups (old_chain);
1535 }
1536 }
1537
1538 /* Try GDB's internal data cache. */
1539 region = lookup_mem_region (memaddr);
1540 /* region->hi == 0 means there's no upper bound. */
1541 if (memaddr + len < region->hi || region->hi == 0)
1542 reg_len = len;
1543 else
1544 reg_len = region->hi - memaddr;
1545
1546 switch (region->attrib.mode)
1547 {
1548 case MEM_RO:
1549 if (writebuf != NULL)
1550 return TARGET_XFER_E_IO;
1551 break;
1552
1553 case MEM_WO:
1554 if (readbuf != NULL)
1555 return TARGET_XFER_E_IO;
1556 break;
1557
1558 case MEM_FLASH:
1559 /* We only support writing to flash during "load" for now. */
1560 if (writebuf != NULL)
1561 error (_("Writing to flash memory forbidden in this context"));
1562 break;
1563
1564 case MEM_NONE:
1565 return TARGET_XFER_E_IO;
1566 }
1567
1568 if (!ptid_equal (inferior_ptid, null_ptid))
1569 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1570 else
1571 inf = NULL;
1572
1573 if (inf != NULL
1574 /* The dcache reads whole cache lines; that doesn't play well
1575 with reading from a trace buffer, because reading outside of
1576 the collected memory range fails. */
1577 && get_traceframe_number () == -1
1578 && (region->attrib.cache
1579 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1580 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1581 {
1582 DCACHE *dcache = target_dcache_get_or_init ();
1583 int l;
1584
1585 if (readbuf != NULL)
1586 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1587 else
1588 /* FIXME drow/2006-08-09: If we're going to preserve const
1589 correctness dcache_xfer_memory should take readbuf and
1590 writebuf. */
1591 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1592 reg_len, 1);
1593 if (l <= 0)
1594 return TARGET_XFER_E_IO;
1595 else
1596 {
1597 *xfered_len = (ULONGEST) l;
1598 return TARGET_XFER_OK;
1599 }
1600 }
1601
1602 /* If none of those methods found the memory we wanted, fall back
1603 to a target partial transfer. Normally a single call to
1604 to_xfer_partial is enough; if it doesn't recognize an object
1605 it will call the to_xfer_partial of the next target down.
1606 But for memory this won't do. Memory is the only target
1607 object which can be read from more than one valid target.
1608 A core file, for instance, could have some of memory but
1609 delegate other bits to the target below it. So, we must
1610 manually try all targets. */
1611
1612 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1613 xfered_len);
1614
1615 /* Make sure the cache gets updated no matter what - if we are writing
1616 to the stack. Even if this write is not tagged as such, we still need
1617 to update the cache. */
1618
1619 if (res == TARGET_XFER_OK
1620 && inf != NULL
1621 && writebuf != NULL
1622 && target_dcache_init_p ()
1623 && !region->attrib.cache
1624 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1625 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1626 {
1627 DCACHE *dcache = target_dcache_get ();
1628
1629 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1630 }
1631
1632 /* If we still haven't got anything, return the last error. We
1633 give up. */
1634 return res;
1635 }
1636
1637 /* Perform a partial memory transfer. For docs see target.h,
1638 to_xfer_partial. */
1639
1640 static enum target_xfer_status
1641 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1642 gdb_byte *readbuf, const gdb_byte *writebuf,
1643 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1644 {
1645 enum target_xfer_status res;
1646
1647 /* Zero length requests are ok and require no work. */
1648 if (len == 0)
1649 return TARGET_XFER_EOF;
1650
1651 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1652 breakpoint insns, thus hiding out from higher layers whether
1653 there are software breakpoints inserted in the code stream. */
1654 if (readbuf != NULL)
1655 {
1656 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1657 xfered_len);
1658
1659 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1660 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1661 }
1662 else
1663 {
1664 void *buf;
1665 struct cleanup *old_chain;
1666
1667 /* A large write request is likely to be partially satisfied
1668 by memory_xfer_partial_1. We will continually malloc
1669 and free a copy of the entire write request for breakpoint
1670 shadow handling even though we only end up writing a small
1671 subset of it. Cap writes to 4KB to mitigate this. */
1672 len = min (4096, len);
1673
1674 buf = xmalloc (len);
1675 old_chain = make_cleanup (xfree, buf);
1676 memcpy (buf, writebuf, len);
1677
1678 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1679 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1680 xfered_len);
1681
1682 do_cleanups (old_chain);
1683 }
1684
1685 return res;
1686 }
1687
1688 static void
1689 restore_show_memory_breakpoints (void *arg)
1690 {
1691 show_memory_breakpoints = (uintptr_t) arg;
1692 }
1693
1694 struct cleanup *
1695 make_show_memory_breakpoints_cleanup (int show)
1696 {
1697 int current = show_memory_breakpoints;
1698
1699 show_memory_breakpoints = show;
1700 return make_cleanup (restore_show_memory_breakpoints,
1701 (void *) (uintptr_t) current);
1702 }
1703
1704 /* For docs see target.h, to_xfer_partial. */
1705
1706 enum target_xfer_status
1707 target_xfer_partial (struct target_ops *ops,
1708 enum target_object object, const char *annex,
1709 gdb_byte *readbuf, const gdb_byte *writebuf,
1710 ULONGEST offset, ULONGEST len,
1711 ULONGEST *xfered_len)
1712 {
1713 enum target_xfer_status retval;
1714
1715 gdb_assert (ops->to_xfer_partial != NULL);
1716
1717 /* Transfer is done when LEN is zero. */
1718 if (len == 0)
1719 return TARGET_XFER_EOF;
1720
1721 if (writebuf && !may_write_memory)
1722 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1723 core_addr_to_string_nz (offset), plongest (len));
1724
1725 *xfered_len = 0;
1726
1727 /* If this is a memory transfer, let the memory-specific code
1728 have a look at it instead. Memory transfers are more
1729 complicated. */
1730 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1731 || object == TARGET_OBJECT_CODE_MEMORY)
1732 retval = memory_xfer_partial (ops, object, readbuf,
1733 writebuf, offset, len, xfered_len);
1734 else if (object == TARGET_OBJECT_RAW_MEMORY)
1735 {
1736 /* Request the normal memory object from other layers. */
1737 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1738 xfered_len);
1739 }
1740 else
1741 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1742 writebuf, offset, len, xfered_len);
1743
1744 if (targetdebug)
1745 {
1746 const unsigned char *myaddr = NULL;
1747
1748 fprintf_unfiltered (gdb_stdlog,
1749 "%s:target_xfer_partial "
1750 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1751 ops->to_shortname,
1752 (int) object,
1753 (annex ? annex : "(null)"),
1754 host_address_to_string (readbuf),
1755 host_address_to_string (writebuf),
1756 core_addr_to_string_nz (offset),
1757 pulongest (len), retval,
1758 pulongest (*xfered_len));
1759
1760 if (readbuf)
1761 myaddr = readbuf;
1762 if (writebuf)
1763 myaddr = writebuf;
1764 if (retval == TARGET_XFER_OK && myaddr != NULL)
1765 {
1766 int i;
1767
1768 fputs_unfiltered (", bytes =", gdb_stdlog);
1769 for (i = 0; i < *xfered_len; i++)
1770 {
1771 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1772 {
1773 if (targetdebug < 2 && i > 0)
1774 {
1775 fprintf_unfiltered (gdb_stdlog, " ...");
1776 break;
1777 }
1778 fprintf_unfiltered (gdb_stdlog, "\n");
1779 }
1780
1781 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1782 }
1783 }
1784
1785 fputc_unfiltered ('\n', gdb_stdlog);
1786 }
1787
1788 /* Check implementations of to_xfer_partial update *XFERED_LEN
1789 properly. Do assertion after printing debug messages, so that we
1790 can find more clues on assertion failure from debugging messages. */
1791 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1792 gdb_assert (*xfered_len > 0);
1793
1794 return retval;
1795 }
1796
1797 /* Read LEN bytes of target memory at address MEMADDR, placing the
1798 results in GDB's memory at MYADDR. Returns either 0 for success or
1799 TARGET_XFER_E_IO if any error occurs.
1800
1801 If an error occurs, no guarantee is made about the contents of the data at
1802 MYADDR. In particular, the caller should not depend upon partial reads
1803 filling the buffer with good data. There is no way for the caller to know
1804 how much good data might have been transfered anyway. Callers that can
1805 deal with partial reads should call target_read (which will retry until
1806 it makes no progress, and then return how much was transferred). */
1807
1808 int
1809 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1810 {
1811 /* Dispatch to the topmost target, not the flattened current_target.
1812 Memory accesses check target->to_has_(all_)memory, and the
1813 flattened target doesn't inherit those. */
1814 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1815 myaddr, memaddr, len) == len)
1816 return 0;
1817 else
1818 return TARGET_XFER_E_IO;
1819 }
1820
1821 /* Like target_read_memory, but specify explicitly that this is a read
1822 from the target's raw memory. That is, this read bypasses the
1823 dcache, breakpoint shadowing, etc. */
1824
1825 int
1826 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1827 {
1828 /* See comment in target_read_memory about why the request starts at
1829 current_target.beneath. */
1830 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1831 myaddr, memaddr, len) == len)
1832 return 0;
1833 else
1834 return TARGET_XFER_E_IO;
1835 }
1836
1837 /* Like target_read_memory, but specify explicitly that this is a read from
1838 the target's stack. This may trigger different cache behavior. */
1839
1840 int
1841 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1842 {
1843 /* See comment in target_read_memory about why the request starts at
1844 current_target.beneath. */
1845 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1846 myaddr, memaddr, len) == len)
1847 return 0;
1848 else
1849 return TARGET_XFER_E_IO;
1850 }
1851
1852 /* Like target_read_memory, but specify explicitly that this is a read from
1853 the target's code. This may trigger different cache behavior. */
1854
1855 int
1856 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1857 {
1858 /* See comment in target_read_memory about why the request starts at
1859 current_target.beneath. */
1860 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1861 myaddr, memaddr, len) == len)
1862 return 0;
1863 else
1864 return TARGET_XFER_E_IO;
1865 }
1866
1867 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1868 Returns either 0 for success or TARGET_XFER_E_IO if any
1869 error occurs. If an error occurs, no guarantee is made about how
1870 much data got written. Callers that can deal with partial writes
1871 should call target_write. */
1872
1873 int
1874 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1875 {
1876 /* See comment in target_read_memory about why the request starts at
1877 current_target.beneath. */
1878 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1879 myaddr, memaddr, len) == len)
1880 return 0;
1881 else
1882 return TARGET_XFER_E_IO;
1883 }
1884
1885 /* Write LEN bytes from MYADDR to target raw memory at address
1886 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1887 if any error occurs. If an error occurs, no guarantee is made
1888 about how much data got written. Callers that can deal with
1889 partial writes should call target_write. */
1890
1891 int
1892 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1893 {
1894 /* See comment in target_read_memory about why the request starts at
1895 current_target.beneath. */
1896 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1897 myaddr, memaddr, len) == len)
1898 return 0;
1899 else
1900 return TARGET_XFER_E_IO;
1901 }
1902
1903 /* Fetch the target's memory map. */
1904
1905 VEC(mem_region_s) *
1906 target_memory_map (void)
1907 {
1908 VEC(mem_region_s) *result;
1909 struct mem_region *last_one, *this_one;
1910 int ix;
1911 struct target_ops *t;
1912
1913 if (targetdebug)
1914 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1915
1916 for (t = current_target.beneath; t != NULL; t = t->beneath)
1917 if (t->to_memory_map != NULL)
1918 break;
1919
1920 if (t == NULL)
1921 return NULL;
1922
1923 result = t->to_memory_map (t);
1924 if (result == NULL)
1925 return NULL;
1926
1927 qsort (VEC_address (mem_region_s, result),
1928 VEC_length (mem_region_s, result),
1929 sizeof (struct mem_region), mem_region_cmp);
1930
1931 /* Check that regions do not overlap. Simultaneously assign
1932 a numbering for the "mem" commands to use to refer to
1933 each region. */
1934 last_one = NULL;
1935 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1936 {
1937 this_one->number = ix;
1938
1939 if (last_one && last_one->hi > this_one->lo)
1940 {
1941 warning (_("Overlapping regions in memory map: ignoring"));
1942 VEC_free (mem_region_s, result);
1943 return NULL;
1944 }
1945 last_one = this_one;
1946 }
1947
1948 return result;
1949 }
1950
1951 void
1952 target_flash_erase (ULONGEST address, LONGEST length)
1953 {
1954 struct target_ops *t;
1955
1956 for (t = current_target.beneath; t != NULL; t = t->beneath)
1957 if (t->to_flash_erase != NULL)
1958 {
1959 if (targetdebug)
1960 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1961 hex_string (address), phex (length, 0));
1962 t->to_flash_erase (t, address, length);
1963 return;
1964 }
1965
1966 tcomplain ();
1967 }
1968
1969 void
1970 target_flash_done (void)
1971 {
1972 struct target_ops *t;
1973
1974 for (t = current_target.beneath; t != NULL; t = t->beneath)
1975 if (t->to_flash_done != NULL)
1976 {
1977 if (targetdebug)
1978 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1979 t->to_flash_done (t);
1980 return;
1981 }
1982
1983 tcomplain ();
1984 }
1985
1986 static void
1987 show_trust_readonly (struct ui_file *file, int from_tty,
1988 struct cmd_list_element *c, const char *value)
1989 {
1990 fprintf_filtered (file,
1991 _("Mode for reading from readonly sections is %s.\n"),
1992 value);
1993 }
1994
1995 /* More generic transfers. */
1996
1997 static enum target_xfer_status
1998 default_xfer_partial (struct target_ops *ops, enum target_object object,
1999 const char *annex, gdb_byte *readbuf,
2000 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
2001 ULONGEST *xfered_len)
2002 {
2003 if (object == TARGET_OBJECT_MEMORY
2004 && ops->deprecated_xfer_memory != NULL)
2005 /* If available, fall back to the target's
2006 "deprecated_xfer_memory" method. */
2007 {
2008 int xfered = -1;
2009
2010 errno = 0;
2011 if (writebuf != NULL)
2012 {
2013 void *buffer = xmalloc (len);
2014 struct cleanup *cleanup = make_cleanup (xfree, buffer);
2015
2016 memcpy (buffer, writebuf, len);
2017 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
2018 1/*write*/, NULL, ops);
2019 do_cleanups (cleanup);
2020 }
2021 if (readbuf != NULL)
2022 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
2023 0/*read*/, NULL, ops);
2024 if (xfered > 0)
2025 {
2026 *xfered_len = (ULONGEST) xfered;
2027 return TARGET_XFER_E_IO;
2028 }
2029 else if (xfered == 0 && errno == 0)
2030 /* "deprecated_xfer_memory" uses 0, cross checked against
2031 ERRNO as one indication of an error. */
2032 return TARGET_XFER_EOF;
2033 else
2034 return TARGET_XFER_E_IO;
2035 }
2036 else
2037 {
2038 gdb_assert (ops->beneath != NULL);
2039 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
2040 readbuf, writebuf, offset, len,
2041 xfered_len);
2042 }
2043 }
2044
2045 /* Target vector read/write partial wrapper functions. */
2046
2047 static enum target_xfer_status
2048 target_read_partial (struct target_ops *ops,
2049 enum target_object object,
2050 const char *annex, gdb_byte *buf,
2051 ULONGEST offset, ULONGEST len,
2052 ULONGEST *xfered_len)
2053 {
2054 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
2055 xfered_len);
2056 }
2057
2058 static enum target_xfer_status
2059 target_write_partial (struct target_ops *ops,
2060 enum target_object object,
2061 const char *annex, const gdb_byte *buf,
2062 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
2063 {
2064 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
2065 xfered_len);
2066 }
2067
2068 /* Wrappers to perform the full transfer. */
2069
2070 /* For docs on target_read see target.h. */
2071
2072 LONGEST
2073 target_read (struct target_ops *ops,
2074 enum target_object object,
2075 const char *annex, gdb_byte *buf,
2076 ULONGEST offset, LONGEST len)
2077 {
2078 LONGEST xfered = 0;
2079
2080 while (xfered < len)
2081 {
2082 ULONGEST xfered_len;
2083 enum target_xfer_status status;
2084
2085 status = target_read_partial (ops, object, annex,
2086 (gdb_byte *) buf + xfered,
2087 offset + xfered, len - xfered,
2088 &xfered_len);
2089
2090 /* Call an observer, notifying them of the xfer progress? */
2091 if (status == TARGET_XFER_EOF)
2092 return xfered;
2093 else if (status == TARGET_XFER_OK)
2094 {
2095 xfered += xfered_len;
2096 QUIT;
2097 }
2098 else
2099 return -1;
2100
2101 }
2102 return len;
2103 }
2104
2105 /* Assuming that the entire [begin, end) range of memory cannot be
2106 read, try to read whatever subrange is possible to read.
2107
2108 The function returns, in RESULT, either zero or one memory block.
2109 If there's a readable subrange at the beginning, it is completely
2110 read and returned. Any further readable subrange will not be read.
2111 Otherwise, if there's a readable subrange at the end, it will be
2112 completely read and returned. Any readable subranges before it
2113 (obviously, not starting at the beginning), will be ignored. In
2114 other cases -- either no readable subrange, or readable subrange(s)
2115 that is neither at the beginning, or end, nothing is returned.
2116
2117 The purpose of this function is to handle a read across a boundary
2118 of accessible memory in a case when memory map is not available.
2119 The above restrictions are fine for this case, but will give
2120 incorrect results if the memory is 'patchy'. However, supporting
2121 'patchy' memory would require trying to read every single byte,
2122 and it seems unacceptable solution. Explicit memory map is
2123 recommended for this case -- and target_read_memory_robust will
2124 take care of reading multiple ranges then. */
2125
2126 static void
2127 read_whatever_is_readable (struct target_ops *ops,
2128 ULONGEST begin, ULONGEST end,
2129 VEC(memory_read_result_s) **result)
2130 {
2131 gdb_byte *buf = xmalloc (end - begin);
2132 ULONGEST current_begin = begin;
2133 ULONGEST current_end = end;
2134 int forward;
2135 memory_read_result_s r;
2136 ULONGEST xfered_len;
2137
2138 /* If we previously failed to read 1 byte, nothing can be done here. */
2139 if (end - begin <= 1)
2140 {
2141 xfree (buf);
2142 return;
2143 }
2144
2145 /* Check that either first or the last byte is readable, and give up
2146 if not. This heuristic is meant to permit reading accessible memory
2147 at the boundary of accessible region. */
2148 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2149 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
2150 {
2151 forward = 1;
2152 ++current_begin;
2153 }
2154 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2155 buf + (end-begin) - 1, end - 1, 1,
2156 &xfered_len) == TARGET_XFER_OK)
2157 {
2158 forward = 0;
2159 --current_end;
2160 }
2161 else
2162 {
2163 xfree (buf);
2164 return;
2165 }
2166
2167 /* Loop invariant is that the [current_begin, current_end) was previously
2168 found to be not readable as a whole.
2169
2170 Note loop condition -- if the range has 1 byte, we can't divide the range
2171 so there's no point trying further. */
2172 while (current_end - current_begin > 1)
2173 {
2174 ULONGEST first_half_begin, first_half_end;
2175 ULONGEST second_half_begin, second_half_end;
2176 LONGEST xfer;
2177 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2178
2179 if (forward)
2180 {
2181 first_half_begin = current_begin;
2182 first_half_end = middle;
2183 second_half_begin = middle;
2184 second_half_end = current_end;
2185 }
2186 else
2187 {
2188 first_half_begin = middle;
2189 first_half_end = current_end;
2190 second_half_begin = current_begin;
2191 second_half_end = middle;
2192 }
2193
2194 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2195 buf + (first_half_begin - begin),
2196 first_half_begin,
2197 first_half_end - first_half_begin);
2198
2199 if (xfer == first_half_end - first_half_begin)
2200 {
2201 /* This half reads up fine. So, the error must be in the
2202 other half. */
2203 current_begin = second_half_begin;
2204 current_end = second_half_end;
2205 }
2206 else
2207 {
2208 /* This half is not readable. Because we've tried one byte, we
2209 know some part of this half if actually redable. Go to the next
2210 iteration to divide again and try to read.
2211
2212 We don't handle the other half, because this function only tries
2213 to read a single readable subrange. */
2214 current_begin = first_half_begin;
2215 current_end = first_half_end;
2216 }
2217 }
2218
2219 if (forward)
2220 {
2221 /* The [begin, current_begin) range has been read. */
2222 r.begin = begin;
2223 r.end = current_begin;
2224 r.data = buf;
2225 }
2226 else
2227 {
2228 /* The [current_end, end) range has been read. */
2229 LONGEST rlen = end - current_end;
2230
2231 r.data = xmalloc (rlen);
2232 memcpy (r.data, buf + current_end - begin, rlen);
2233 r.begin = current_end;
2234 r.end = end;
2235 xfree (buf);
2236 }
2237 VEC_safe_push(memory_read_result_s, (*result), &r);
2238 }
2239
2240 void
2241 free_memory_read_result_vector (void *x)
2242 {
2243 VEC(memory_read_result_s) *v = x;
2244 memory_read_result_s *current;
2245 int ix;
2246
2247 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2248 {
2249 xfree (current->data);
2250 }
2251 VEC_free (memory_read_result_s, v);
2252 }
2253
2254 VEC(memory_read_result_s) *
2255 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2256 {
2257 VEC(memory_read_result_s) *result = 0;
2258
2259 LONGEST xfered = 0;
2260 while (xfered < len)
2261 {
2262 struct mem_region *region = lookup_mem_region (offset + xfered);
2263 LONGEST rlen;
2264
2265 /* If there is no explicit region, a fake one should be created. */
2266 gdb_assert (region);
2267
2268 if (region->hi == 0)
2269 rlen = len - xfered;
2270 else
2271 rlen = region->hi - offset;
2272
2273 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2274 {
2275 /* Cannot read this region. Note that we can end up here only
2276 if the region is explicitly marked inaccessible, or
2277 'inaccessible-by-default' is in effect. */
2278 xfered += rlen;
2279 }
2280 else
2281 {
2282 LONGEST to_read = min (len - xfered, rlen);
2283 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2284
2285 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2286 (gdb_byte *) buffer,
2287 offset + xfered, to_read);
2288 /* Call an observer, notifying them of the xfer progress? */
2289 if (xfer <= 0)
2290 {
2291 /* Got an error reading full chunk. See if maybe we can read
2292 some subrange. */
2293 xfree (buffer);
2294 read_whatever_is_readable (ops, offset + xfered,
2295 offset + xfered + to_read, &result);
2296 xfered += to_read;
2297 }
2298 else
2299 {
2300 struct memory_read_result r;
2301 r.data = buffer;
2302 r.begin = offset + xfered;
2303 r.end = r.begin + xfer;
2304 VEC_safe_push (memory_read_result_s, result, &r);
2305 xfered += xfer;
2306 }
2307 QUIT;
2308 }
2309 }
2310 return result;
2311 }
2312
2313
2314 /* An alternative to target_write with progress callbacks. */
2315
2316 LONGEST
2317 target_write_with_progress (struct target_ops *ops,
2318 enum target_object object,
2319 const char *annex, const gdb_byte *buf,
2320 ULONGEST offset, LONGEST len,
2321 void (*progress) (ULONGEST, void *), void *baton)
2322 {
2323 LONGEST xfered = 0;
2324
2325 /* Give the progress callback a chance to set up. */
2326 if (progress)
2327 (*progress) (0, baton);
2328
2329 while (xfered < len)
2330 {
2331 ULONGEST xfered_len;
2332 enum target_xfer_status status;
2333
2334 status = target_write_partial (ops, object, annex,
2335 (gdb_byte *) buf + xfered,
2336 offset + xfered, len - xfered,
2337 &xfered_len);
2338
2339 if (status == TARGET_XFER_EOF)
2340 return xfered;
2341 if (TARGET_XFER_STATUS_ERROR_P (status))
2342 return -1;
2343
2344 gdb_assert (status == TARGET_XFER_OK);
2345 if (progress)
2346 (*progress) (xfered_len, baton);
2347
2348 xfered += xfered_len;
2349 QUIT;
2350 }
2351 return len;
2352 }
2353
2354 /* For docs on target_write see target.h. */
2355
2356 LONGEST
2357 target_write (struct target_ops *ops,
2358 enum target_object object,
2359 const char *annex, const gdb_byte *buf,
2360 ULONGEST offset, LONGEST len)
2361 {
2362 return target_write_with_progress (ops, object, annex, buf, offset, len,
2363 NULL, NULL);
2364 }
2365
2366 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2367 the size of the transferred data. PADDING additional bytes are
2368 available in *BUF_P. This is a helper function for
2369 target_read_alloc; see the declaration of that function for more
2370 information. */
2371
2372 static LONGEST
2373 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2374 const char *annex, gdb_byte **buf_p, int padding)
2375 {
2376 size_t buf_alloc, buf_pos;
2377 gdb_byte *buf;
2378
2379 /* This function does not have a length parameter; it reads the
2380 entire OBJECT). Also, it doesn't support objects fetched partly
2381 from one target and partly from another (in a different stratum,
2382 e.g. a core file and an executable). Both reasons make it
2383 unsuitable for reading memory. */
2384 gdb_assert (object != TARGET_OBJECT_MEMORY);
2385
2386 /* Start by reading up to 4K at a time. The target will throttle
2387 this number down if necessary. */
2388 buf_alloc = 4096;
2389 buf = xmalloc (buf_alloc);
2390 buf_pos = 0;
2391 while (1)
2392 {
2393 ULONGEST xfered_len;
2394 enum target_xfer_status status;
2395
2396 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2397 buf_pos, buf_alloc - buf_pos - padding,
2398 &xfered_len);
2399
2400 if (status == TARGET_XFER_EOF)
2401 {
2402 /* Read all there was. */
2403 if (buf_pos == 0)
2404 xfree (buf);
2405 else
2406 *buf_p = buf;
2407 return buf_pos;
2408 }
2409 else if (status != TARGET_XFER_OK)
2410 {
2411 /* An error occurred. */
2412 xfree (buf);
2413 return TARGET_XFER_E_IO;
2414 }
2415
2416 buf_pos += xfered_len;
2417
2418 /* If the buffer is filling up, expand it. */
2419 if (buf_alloc < buf_pos * 2)
2420 {
2421 buf_alloc *= 2;
2422 buf = xrealloc (buf, buf_alloc);
2423 }
2424
2425 QUIT;
2426 }
2427 }
2428
2429 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2430 the size of the transferred data. See the declaration in "target.h"
2431 function for more information about the return value. */
2432
2433 LONGEST
2434 target_read_alloc (struct target_ops *ops, enum target_object object,
2435 const char *annex, gdb_byte **buf_p)
2436 {
2437 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2438 }
2439
2440 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2441 returned as a string, allocated using xmalloc. If an error occurs
2442 or the transfer is unsupported, NULL is returned. Empty objects
2443 are returned as allocated but empty strings. A warning is issued
2444 if the result contains any embedded NUL bytes. */
2445
2446 char *
2447 target_read_stralloc (struct target_ops *ops, enum target_object object,
2448 const char *annex)
2449 {
2450 gdb_byte *buffer;
2451 char *bufstr;
2452 LONGEST i, transferred;
2453
2454 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2455 bufstr = (char *) buffer;
2456
2457 if (transferred < 0)
2458 return NULL;
2459
2460 if (transferred == 0)
2461 return xstrdup ("");
2462
2463 bufstr[transferred] = 0;
2464
2465 /* Check for embedded NUL bytes; but allow trailing NULs. */
2466 for (i = strlen (bufstr); i < transferred; i++)
2467 if (bufstr[i] != 0)
2468 {
2469 warning (_("target object %d, annex %s, "
2470 "contained unexpected null characters"),
2471 (int) object, annex ? annex : "(none)");
2472 break;
2473 }
2474
2475 return bufstr;
2476 }
2477
2478 /* Memory transfer methods. */
2479
2480 void
2481 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2482 LONGEST len)
2483 {
2484 /* This method is used to read from an alternate, non-current
2485 target. This read must bypass the overlay support (as symbols
2486 don't match this target), and GDB's internal cache (wrong cache
2487 for this target). */
2488 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2489 != len)
2490 memory_error (TARGET_XFER_E_IO, addr);
2491 }
2492
2493 ULONGEST
2494 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2495 int len, enum bfd_endian byte_order)
2496 {
2497 gdb_byte buf[sizeof (ULONGEST)];
2498
2499 gdb_assert (len <= sizeof (buf));
2500 get_target_memory (ops, addr, buf, len);
2501 return extract_unsigned_integer (buf, len, byte_order);
2502 }
2503
2504 /* See target.h. */
2505
2506 int
2507 target_insert_breakpoint (struct gdbarch *gdbarch,
2508 struct bp_target_info *bp_tgt)
2509 {
2510 if (!may_insert_breakpoints)
2511 {
2512 warning (_("May not insert breakpoints"));
2513 return 1;
2514 }
2515
2516 return current_target.to_insert_breakpoint (&current_target,
2517 gdbarch, bp_tgt);
2518 }
2519
2520 /* See target.h. */
2521
2522 int
2523 target_remove_breakpoint (struct gdbarch *gdbarch,
2524 struct bp_target_info *bp_tgt)
2525 {
2526 /* This is kind of a weird case to handle, but the permission might
2527 have been changed after breakpoints were inserted - in which case
2528 we should just take the user literally and assume that any
2529 breakpoints should be left in place. */
2530 if (!may_insert_breakpoints)
2531 {
2532 warning (_("May not remove breakpoints"));
2533 return 1;
2534 }
2535
2536 return current_target.to_remove_breakpoint (&current_target,
2537 gdbarch, bp_tgt);
2538 }
2539
2540 static void
2541 target_info (char *args, int from_tty)
2542 {
2543 struct target_ops *t;
2544 int has_all_mem = 0;
2545
2546 if (symfile_objfile != NULL)
2547 printf_unfiltered (_("Symbols from \"%s\".\n"),
2548 objfile_name (symfile_objfile));
2549
2550 for (t = target_stack; t != NULL; t = t->beneath)
2551 {
2552 if (!(*t->to_has_memory) (t))
2553 continue;
2554
2555 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2556 continue;
2557 if (has_all_mem)
2558 printf_unfiltered (_("\tWhile running this, "
2559 "GDB does not access memory from...\n"));
2560 printf_unfiltered ("%s:\n", t->to_longname);
2561 (t->to_files_info) (t);
2562 has_all_mem = (*t->to_has_all_memory) (t);
2563 }
2564 }
2565
2566 /* This function is called before any new inferior is created, e.g.
2567 by running a program, attaching, or connecting to a target.
2568 It cleans up any state from previous invocations which might
2569 change between runs. This is a subset of what target_preopen
2570 resets (things which might change between targets). */
2571
2572 void
2573 target_pre_inferior (int from_tty)
2574 {
2575 /* Clear out solib state. Otherwise the solib state of the previous
2576 inferior might have survived and is entirely wrong for the new
2577 target. This has been observed on GNU/Linux using glibc 2.3. How
2578 to reproduce:
2579
2580 bash$ ./foo&
2581 [1] 4711
2582 bash$ ./foo&
2583 [1] 4712
2584 bash$ gdb ./foo
2585 [...]
2586 (gdb) attach 4711
2587 (gdb) detach
2588 (gdb) attach 4712
2589 Cannot access memory at address 0xdeadbeef
2590 */
2591
2592 /* In some OSs, the shared library list is the same/global/shared
2593 across inferiors. If code is shared between processes, so are
2594 memory regions and features. */
2595 if (!gdbarch_has_global_solist (target_gdbarch ()))
2596 {
2597 no_shared_libraries (NULL, from_tty);
2598
2599 invalidate_target_mem_regions ();
2600
2601 target_clear_description ();
2602 }
2603
2604 agent_capability_invalidate ();
2605 }
2606
2607 /* Callback for iterate_over_inferiors. Gets rid of the given
2608 inferior. */
2609
2610 static int
2611 dispose_inferior (struct inferior *inf, void *args)
2612 {
2613 struct thread_info *thread;
2614
2615 thread = any_thread_of_process (inf->pid);
2616 if (thread)
2617 {
2618 switch_to_thread (thread->ptid);
2619
2620 /* Core inferiors actually should be detached, not killed. */
2621 if (target_has_execution)
2622 target_kill ();
2623 else
2624 target_detach (NULL, 0);
2625 }
2626
2627 return 0;
2628 }
2629
2630 /* This is to be called by the open routine before it does
2631 anything. */
2632
2633 void
2634 target_preopen (int from_tty)
2635 {
2636 dont_repeat ();
2637
2638 if (have_inferiors ())
2639 {
2640 if (!from_tty
2641 || !have_live_inferiors ()
2642 || query (_("A program is being debugged already. Kill it? ")))
2643 iterate_over_inferiors (dispose_inferior, NULL);
2644 else
2645 error (_("Program not killed."));
2646 }
2647
2648 /* Calling target_kill may remove the target from the stack. But if
2649 it doesn't (which seems like a win for UDI), remove it now. */
2650 /* Leave the exec target, though. The user may be switching from a
2651 live process to a core of the same program. */
2652 pop_all_targets_above (file_stratum);
2653
2654 target_pre_inferior (from_tty);
2655 }
2656
2657 /* Detach a target after doing deferred register stores. */
2658
2659 void
2660 target_detach (const char *args, int from_tty)
2661 {
2662 struct target_ops* t;
2663
2664 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2665 /* Don't remove global breakpoints here. They're removed on
2666 disconnection from the target. */
2667 ;
2668 else
2669 /* If we're in breakpoints-always-inserted mode, have to remove
2670 them before detaching. */
2671 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2672
2673 prepare_for_detach ();
2674
2675 for (t = current_target.beneath; t != NULL; t = t->beneath)
2676 {
2677 if (t->to_detach != NULL)
2678 {
2679 t->to_detach (t, args, from_tty);
2680 if (targetdebug)
2681 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2682 args, from_tty);
2683 return;
2684 }
2685 }
2686
2687 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2688 }
2689
2690 void
2691 target_disconnect (char *args, int from_tty)
2692 {
2693 struct target_ops *t;
2694
2695 /* If we're in breakpoints-always-inserted mode or if breakpoints
2696 are global across processes, we have to remove them before
2697 disconnecting. */
2698 remove_breakpoints ();
2699
2700 for (t = current_target.beneath; t != NULL; t = t->beneath)
2701 if (t->to_disconnect != NULL)
2702 {
2703 if (targetdebug)
2704 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2705 args, from_tty);
2706 t->to_disconnect (t, args, from_tty);
2707 return;
2708 }
2709
2710 tcomplain ();
2711 }
2712
2713 ptid_t
2714 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2715 {
2716 struct target_ops *t;
2717 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2718 status, options);
2719
2720 if (targetdebug)
2721 {
2722 char *status_string;
2723 char *options_string;
2724
2725 status_string = target_waitstatus_to_string (status);
2726 options_string = target_options_to_string (options);
2727 fprintf_unfiltered (gdb_stdlog,
2728 "target_wait (%d, status, options={%s})"
2729 " = %d, %s\n",
2730 ptid_get_pid (ptid), options_string,
2731 ptid_get_pid (retval), status_string);
2732 xfree (status_string);
2733 xfree (options_string);
2734 }
2735
2736 return retval;
2737 }
2738
2739 char *
2740 target_pid_to_str (ptid_t ptid)
2741 {
2742 struct target_ops *t;
2743
2744 for (t = current_target.beneath; t != NULL; t = t->beneath)
2745 {
2746 if (t->to_pid_to_str != NULL)
2747 return (*t->to_pid_to_str) (t, ptid);
2748 }
2749
2750 return normal_pid_to_str (ptid);
2751 }
2752
2753 char *
2754 target_thread_name (struct thread_info *info)
2755 {
2756 struct target_ops *t;
2757
2758 for (t = current_target.beneath; t != NULL; t = t->beneath)
2759 {
2760 if (t->to_thread_name != NULL)
2761 return (*t->to_thread_name) (info);
2762 }
2763
2764 return NULL;
2765 }
2766
2767 void
2768 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2769 {
2770 struct target_ops *t;
2771
2772 target_dcache_invalidate ();
2773
2774 current_target.to_resume (&current_target, ptid, step, signal);
2775 if (targetdebug)
2776 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2777 ptid_get_pid (ptid),
2778 step ? "step" : "continue",
2779 gdb_signal_to_name (signal));
2780
2781 registers_changed_ptid (ptid);
2782 set_executing (ptid, 1);
2783 set_running (ptid, 1);
2784 clear_inline_frame_state (ptid);
2785 }
2786
2787 void
2788 target_pass_signals (int numsigs, unsigned char *pass_signals)
2789 {
2790 struct target_ops *t;
2791
2792 for (t = current_target.beneath; t != NULL; t = t->beneath)
2793 {
2794 if (t->to_pass_signals != NULL)
2795 {
2796 if (targetdebug)
2797 {
2798 int i;
2799
2800 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2801 numsigs);
2802
2803 for (i = 0; i < numsigs; i++)
2804 if (pass_signals[i])
2805 fprintf_unfiltered (gdb_stdlog, " %s",
2806 gdb_signal_to_name (i));
2807
2808 fprintf_unfiltered (gdb_stdlog, " })\n");
2809 }
2810
2811 (*t->to_pass_signals) (numsigs, pass_signals);
2812 return;
2813 }
2814 }
2815 }
2816
2817 void
2818 target_program_signals (int numsigs, unsigned char *program_signals)
2819 {
2820 struct target_ops *t;
2821
2822 for (t = current_target.beneath; t != NULL; t = t->beneath)
2823 {
2824 if (t->to_program_signals != NULL)
2825 {
2826 if (targetdebug)
2827 {
2828 int i;
2829
2830 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2831 numsigs);
2832
2833 for (i = 0; i < numsigs; i++)
2834 if (program_signals[i])
2835 fprintf_unfiltered (gdb_stdlog, " %s",
2836 gdb_signal_to_name (i));
2837
2838 fprintf_unfiltered (gdb_stdlog, " })\n");
2839 }
2840
2841 (*t->to_program_signals) (numsigs, program_signals);
2842 return;
2843 }
2844 }
2845 }
2846
2847 /* Look through the list of possible targets for a target that can
2848 follow forks. */
2849
2850 int
2851 target_follow_fork (int follow_child, int detach_fork)
2852 {
2853 struct target_ops *t;
2854
2855 for (t = current_target.beneath; t != NULL; t = t->beneath)
2856 {
2857 if (t->to_follow_fork != NULL)
2858 {
2859 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2860
2861 if (targetdebug)
2862 fprintf_unfiltered (gdb_stdlog,
2863 "target_follow_fork (%d, %d) = %d\n",
2864 follow_child, detach_fork, retval);
2865 return retval;
2866 }
2867 }
2868
2869 /* Some target returned a fork event, but did not know how to follow it. */
2870 internal_error (__FILE__, __LINE__,
2871 _("could not find a target to follow fork"));
2872 }
2873
2874 void
2875 target_mourn_inferior (void)
2876 {
2877 struct target_ops *t;
2878
2879 for (t = current_target.beneath; t != NULL; t = t->beneath)
2880 {
2881 if (t->to_mourn_inferior != NULL)
2882 {
2883 t->to_mourn_inferior (t);
2884 if (targetdebug)
2885 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2886
2887 /* We no longer need to keep handles on any of the object files.
2888 Make sure to release them to avoid unnecessarily locking any
2889 of them while we're not actually debugging. */
2890 bfd_cache_close_all ();
2891
2892 return;
2893 }
2894 }
2895
2896 internal_error (__FILE__, __LINE__,
2897 _("could not find a target to follow mourn inferior"));
2898 }
2899
2900 /* Look for a target which can describe architectural features, starting
2901 from TARGET. If we find one, return its description. */
2902
2903 const struct target_desc *
2904 target_read_description (struct target_ops *target)
2905 {
2906 struct target_ops *t;
2907
2908 for (t = target; t != NULL; t = t->beneath)
2909 if (t->to_read_description != NULL)
2910 {
2911 const struct target_desc *tdesc;
2912
2913 tdesc = t->to_read_description (t);
2914 if (tdesc)
2915 return tdesc;
2916 }
2917
2918 return NULL;
2919 }
2920
2921 /* The default implementation of to_search_memory.
2922 This implements a basic search of memory, reading target memory and
2923 performing the search here (as opposed to performing the search in on the
2924 target side with, for example, gdbserver). */
2925
2926 int
2927 simple_search_memory (struct target_ops *ops,
2928 CORE_ADDR start_addr, ULONGEST search_space_len,
2929 const gdb_byte *pattern, ULONGEST pattern_len,
2930 CORE_ADDR *found_addrp)
2931 {
2932 /* NOTE: also defined in find.c testcase. */
2933 #define SEARCH_CHUNK_SIZE 16000
2934 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2935 /* Buffer to hold memory contents for searching. */
2936 gdb_byte *search_buf;
2937 unsigned search_buf_size;
2938 struct cleanup *old_cleanups;
2939
2940 search_buf_size = chunk_size + pattern_len - 1;
2941
2942 /* No point in trying to allocate a buffer larger than the search space. */
2943 if (search_space_len < search_buf_size)
2944 search_buf_size = search_space_len;
2945
2946 search_buf = malloc (search_buf_size);
2947 if (search_buf == NULL)
2948 error (_("Unable to allocate memory to perform the search."));
2949 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2950
2951 /* Prime the search buffer. */
2952
2953 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2954 search_buf, start_addr, search_buf_size) != search_buf_size)
2955 {
2956 warning (_("Unable to access %s bytes of target "
2957 "memory at %s, halting search."),
2958 pulongest (search_buf_size), hex_string (start_addr));
2959 do_cleanups (old_cleanups);
2960 return -1;
2961 }
2962
2963 /* Perform the search.
2964
2965 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2966 When we've scanned N bytes we copy the trailing bytes to the start and
2967 read in another N bytes. */
2968
2969 while (search_space_len >= pattern_len)
2970 {
2971 gdb_byte *found_ptr;
2972 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2973
2974 found_ptr = memmem (search_buf, nr_search_bytes,
2975 pattern, pattern_len);
2976
2977 if (found_ptr != NULL)
2978 {
2979 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2980
2981 *found_addrp = found_addr;
2982 do_cleanups (old_cleanups);
2983 return 1;
2984 }
2985
2986 /* Not found in this chunk, skip to next chunk. */
2987
2988 /* Don't let search_space_len wrap here, it's unsigned. */
2989 if (search_space_len >= chunk_size)
2990 search_space_len -= chunk_size;
2991 else
2992 search_space_len = 0;
2993
2994 if (search_space_len >= pattern_len)
2995 {
2996 unsigned keep_len = search_buf_size - chunk_size;
2997 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2998 int nr_to_read;
2999
3000 /* Copy the trailing part of the previous iteration to the front
3001 of the buffer for the next iteration. */
3002 gdb_assert (keep_len == pattern_len - 1);
3003 memcpy (search_buf, search_buf + chunk_size, keep_len);
3004
3005 nr_to_read = min (search_space_len - keep_len, chunk_size);
3006
3007 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
3008 search_buf + keep_len, read_addr,
3009 nr_to_read) != nr_to_read)
3010 {
3011 warning (_("Unable to access %s bytes of target "
3012 "memory at %s, halting search."),
3013 plongest (nr_to_read),
3014 hex_string (read_addr));
3015 do_cleanups (old_cleanups);
3016 return -1;
3017 }
3018
3019 start_addr += chunk_size;
3020 }
3021 }
3022
3023 /* Not found. */
3024
3025 do_cleanups (old_cleanups);
3026 return 0;
3027 }
3028
3029 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
3030 sequence of bytes in PATTERN with length PATTERN_LEN.
3031
3032 The result is 1 if found, 0 if not found, and -1 if there was an error
3033 requiring halting of the search (e.g. memory read error).
3034 If the pattern is found the address is recorded in FOUND_ADDRP. */
3035
3036 int
3037 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
3038 const gdb_byte *pattern, ULONGEST pattern_len,
3039 CORE_ADDR *found_addrp)
3040 {
3041 struct target_ops *t;
3042 int found;
3043
3044 /* We don't use INHERIT to set current_target.to_search_memory,
3045 so we have to scan the target stack and handle targetdebug
3046 ourselves. */
3047
3048 if (targetdebug)
3049 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
3050 hex_string (start_addr));
3051
3052 for (t = current_target.beneath; t != NULL; t = t->beneath)
3053 if (t->to_search_memory != NULL)
3054 break;
3055
3056 if (t != NULL)
3057 {
3058 found = t->to_search_memory (t, start_addr, search_space_len,
3059 pattern, pattern_len, found_addrp);
3060 }
3061 else
3062 {
3063 /* If a special version of to_search_memory isn't available, use the
3064 simple version. */
3065 found = simple_search_memory (current_target.beneath,
3066 start_addr, search_space_len,
3067 pattern, pattern_len, found_addrp);
3068 }
3069
3070 if (targetdebug)
3071 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
3072
3073 return found;
3074 }
3075
3076 /* Look through the currently pushed targets. If none of them will
3077 be able to restart the currently running process, issue an error
3078 message. */
3079
3080 void
3081 target_require_runnable (void)
3082 {
3083 struct target_ops *t;
3084
3085 for (t = target_stack; t != NULL; t = t->beneath)
3086 {
3087 /* If this target knows how to create a new program, then
3088 assume we will still be able to after killing the current
3089 one. Either killing and mourning will not pop T, or else
3090 find_default_run_target will find it again. */
3091 if (t->to_create_inferior != NULL)
3092 return;
3093
3094 /* Do not worry about thread_stratum targets that can not
3095 create inferiors. Assume they will be pushed again if
3096 necessary, and continue to the process_stratum. */
3097 if (t->to_stratum == thread_stratum
3098 || t->to_stratum == arch_stratum)
3099 continue;
3100
3101 error (_("The \"%s\" target does not support \"run\". "
3102 "Try \"help target\" or \"continue\"."),
3103 t->to_shortname);
3104 }
3105
3106 /* This function is only called if the target is running. In that
3107 case there should have been a process_stratum target and it
3108 should either know how to create inferiors, or not... */
3109 internal_error (__FILE__, __LINE__, _("No targets found"));
3110 }
3111
3112 /* Look through the list of possible targets for a target that can
3113 execute a run or attach command without any other data. This is
3114 used to locate the default process stratum.
3115
3116 If DO_MESG is not NULL, the result is always valid (error() is
3117 called for errors); else, return NULL on error. */
3118
3119 static struct target_ops *
3120 find_default_run_target (char *do_mesg)
3121 {
3122 struct target_ops **t;
3123 struct target_ops *runable = NULL;
3124 int count;
3125
3126 count = 0;
3127
3128 for (t = target_structs; t < target_structs + target_struct_size;
3129 ++t)
3130 {
3131 if ((*t)->to_can_run && target_can_run (*t))
3132 {
3133 runable = *t;
3134 ++count;
3135 }
3136 }
3137
3138 if (count != 1)
3139 {
3140 if (do_mesg)
3141 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3142 else
3143 return NULL;
3144 }
3145
3146 return runable;
3147 }
3148
3149 void
3150 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3151 {
3152 struct target_ops *t;
3153
3154 t = find_default_run_target ("attach");
3155 (t->to_attach) (t, args, from_tty);
3156 return;
3157 }
3158
3159 void
3160 find_default_create_inferior (struct target_ops *ops,
3161 char *exec_file, char *allargs, char **env,
3162 int from_tty)
3163 {
3164 struct target_ops *t;
3165
3166 t = find_default_run_target ("run");
3167 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3168 return;
3169 }
3170
3171 static int
3172 find_default_can_async_p (struct target_ops *ignore)
3173 {
3174 struct target_ops *t;
3175
3176 /* This may be called before the target is pushed on the stack;
3177 look for the default process stratum. If there's none, gdb isn't
3178 configured with a native debugger, and target remote isn't
3179 connected yet. */
3180 t = find_default_run_target (NULL);
3181 if (t && t->to_can_async_p != delegate_can_async_p)
3182 return (t->to_can_async_p) (t);
3183 return 0;
3184 }
3185
3186 static int
3187 find_default_is_async_p (struct target_ops *ignore)
3188 {
3189 struct target_ops *t;
3190
3191 /* This may be called before the target is pushed on the stack;
3192 look for the default process stratum. If there's none, gdb isn't
3193 configured with a native debugger, and target remote isn't
3194 connected yet. */
3195 t = find_default_run_target (NULL);
3196 if (t && t->to_is_async_p != delegate_is_async_p)
3197 return (t->to_is_async_p) (t);
3198 return 0;
3199 }
3200
3201 static int
3202 find_default_supports_non_stop (void)
3203 {
3204 struct target_ops *t;
3205
3206 t = find_default_run_target (NULL);
3207 if (t && t->to_supports_non_stop)
3208 return (t->to_supports_non_stop) ();
3209 return 0;
3210 }
3211
3212 int
3213 target_supports_non_stop (void)
3214 {
3215 struct target_ops *t;
3216
3217 for (t = &current_target; t != NULL; t = t->beneath)
3218 if (t->to_supports_non_stop)
3219 return t->to_supports_non_stop ();
3220
3221 return 0;
3222 }
3223
3224 /* Implement the "info proc" command. */
3225
3226 int
3227 target_info_proc (char *args, enum info_proc_what what)
3228 {
3229 struct target_ops *t;
3230
3231 /* If we're already connected to something that can get us OS
3232 related data, use it. Otherwise, try using the native
3233 target. */
3234 if (current_target.to_stratum >= process_stratum)
3235 t = current_target.beneath;
3236 else
3237 t = find_default_run_target (NULL);
3238
3239 for (; t != NULL; t = t->beneath)
3240 {
3241 if (t->to_info_proc != NULL)
3242 {
3243 t->to_info_proc (t, args, what);
3244
3245 if (targetdebug)
3246 fprintf_unfiltered (gdb_stdlog,
3247 "target_info_proc (\"%s\", %d)\n", args, what);
3248
3249 return 1;
3250 }
3251 }
3252
3253 return 0;
3254 }
3255
3256 static int
3257 find_default_supports_disable_randomization (void)
3258 {
3259 struct target_ops *t;
3260
3261 t = find_default_run_target (NULL);
3262 if (t && t->to_supports_disable_randomization)
3263 return (t->to_supports_disable_randomization) ();
3264 return 0;
3265 }
3266
3267 int
3268 target_supports_disable_randomization (void)
3269 {
3270 struct target_ops *t;
3271
3272 for (t = &current_target; t != NULL; t = t->beneath)
3273 if (t->to_supports_disable_randomization)
3274 return t->to_supports_disable_randomization ();
3275
3276 return 0;
3277 }
3278
3279 char *
3280 target_get_osdata (const char *type)
3281 {
3282 struct target_ops *t;
3283
3284 /* If we're already connected to something that can get us OS
3285 related data, use it. Otherwise, try using the native
3286 target. */
3287 if (current_target.to_stratum >= process_stratum)
3288 t = current_target.beneath;
3289 else
3290 t = find_default_run_target ("get OS data");
3291
3292 if (!t)
3293 return NULL;
3294
3295 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3296 }
3297
3298 /* Determine the current address space of thread PTID. */
3299
3300 struct address_space *
3301 target_thread_address_space (ptid_t ptid)
3302 {
3303 struct address_space *aspace;
3304 struct inferior *inf;
3305 struct target_ops *t;
3306
3307 for (t = current_target.beneath; t != NULL; t = t->beneath)
3308 {
3309 if (t->to_thread_address_space != NULL)
3310 {
3311 aspace = t->to_thread_address_space (t, ptid);
3312 gdb_assert (aspace);
3313
3314 if (targetdebug)
3315 fprintf_unfiltered (gdb_stdlog,
3316 "target_thread_address_space (%s) = %d\n",
3317 target_pid_to_str (ptid),
3318 address_space_num (aspace));
3319 return aspace;
3320 }
3321 }
3322
3323 /* Fall-back to the "main" address space of the inferior. */
3324 inf = find_inferior_pid (ptid_get_pid (ptid));
3325
3326 if (inf == NULL || inf->aspace == NULL)
3327 internal_error (__FILE__, __LINE__,
3328 _("Can't determine the current "
3329 "address space of thread %s\n"),
3330 target_pid_to_str (ptid));
3331
3332 return inf->aspace;
3333 }
3334
3335
3336 /* Target file operations. */
3337
3338 static struct target_ops *
3339 default_fileio_target (void)
3340 {
3341 /* If we're already connected to something that can perform
3342 file I/O, use it. Otherwise, try using the native target. */
3343 if (current_target.to_stratum >= process_stratum)
3344 return current_target.beneath;
3345 else
3346 return find_default_run_target ("file I/O");
3347 }
3348
3349 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3350 target file descriptor, or -1 if an error occurs (and set
3351 *TARGET_ERRNO). */
3352 int
3353 target_fileio_open (const char *filename, int flags, int mode,
3354 int *target_errno)
3355 {
3356 struct target_ops *t;
3357
3358 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3359 {
3360 if (t->to_fileio_open != NULL)
3361 {
3362 int fd = t->to_fileio_open (filename, flags, mode, target_errno);
3363
3364 if (targetdebug)
3365 fprintf_unfiltered (gdb_stdlog,
3366 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3367 filename, flags, mode,
3368 fd, fd != -1 ? 0 : *target_errno);
3369 return fd;
3370 }
3371 }
3372
3373 *target_errno = FILEIO_ENOSYS;
3374 return -1;
3375 }
3376
3377 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3378 Return the number of bytes written, or -1 if an error occurs
3379 (and set *TARGET_ERRNO). */
3380 int
3381 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3382 ULONGEST offset, int *target_errno)
3383 {
3384 struct target_ops *t;
3385
3386 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3387 {
3388 if (t->to_fileio_pwrite != NULL)
3389 {
3390 int ret = t->to_fileio_pwrite (fd, write_buf, len, offset,
3391 target_errno);
3392
3393 if (targetdebug)
3394 fprintf_unfiltered (gdb_stdlog,
3395 "target_fileio_pwrite (%d,...,%d,%s) "
3396 "= %d (%d)\n",
3397 fd, len, pulongest (offset),
3398 ret, ret != -1 ? 0 : *target_errno);
3399 return ret;
3400 }
3401 }
3402
3403 *target_errno = FILEIO_ENOSYS;
3404 return -1;
3405 }
3406
3407 /* Read up to LEN bytes FD on the target into READ_BUF.
3408 Return the number of bytes read, or -1 if an error occurs
3409 (and set *TARGET_ERRNO). */
3410 int
3411 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3412 ULONGEST offset, int *target_errno)
3413 {
3414 struct target_ops *t;
3415
3416 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3417 {
3418 if (t->to_fileio_pread != NULL)
3419 {
3420 int ret = t->to_fileio_pread (fd, read_buf, len, offset,
3421 target_errno);
3422
3423 if (targetdebug)
3424 fprintf_unfiltered (gdb_stdlog,
3425 "target_fileio_pread (%d,...,%d,%s) "
3426 "= %d (%d)\n",
3427 fd, len, pulongest (offset),
3428 ret, ret != -1 ? 0 : *target_errno);
3429 return ret;
3430 }
3431 }
3432
3433 *target_errno = FILEIO_ENOSYS;
3434 return -1;
3435 }
3436
3437 /* Close FD on the target. Return 0, or -1 if an error occurs
3438 (and set *TARGET_ERRNO). */
3439 int
3440 target_fileio_close (int fd, int *target_errno)
3441 {
3442 struct target_ops *t;
3443
3444 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3445 {
3446 if (t->to_fileio_close != NULL)
3447 {
3448 int ret = t->to_fileio_close (fd, target_errno);
3449
3450 if (targetdebug)
3451 fprintf_unfiltered (gdb_stdlog,
3452 "target_fileio_close (%d) = %d (%d)\n",
3453 fd, ret, ret != -1 ? 0 : *target_errno);
3454 return ret;
3455 }
3456 }
3457
3458 *target_errno = FILEIO_ENOSYS;
3459 return -1;
3460 }
3461
3462 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3463 occurs (and set *TARGET_ERRNO). */
3464 int
3465 target_fileio_unlink (const char *filename, int *target_errno)
3466 {
3467 struct target_ops *t;
3468
3469 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3470 {
3471 if (t->to_fileio_unlink != NULL)
3472 {
3473 int ret = t->to_fileio_unlink (filename, target_errno);
3474
3475 if (targetdebug)
3476 fprintf_unfiltered (gdb_stdlog,
3477 "target_fileio_unlink (%s) = %d (%d)\n",
3478 filename, ret, ret != -1 ? 0 : *target_errno);
3479 return ret;
3480 }
3481 }
3482
3483 *target_errno = FILEIO_ENOSYS;
3484 return -1;
3485 }
3486
3487 /* Read value of symbolic link FILENAME on the target. Return a
3488 null-terminated string allocated via xmalloc, or NULL if an error
3489 occurs (and set *TARGET_ERRNO). */
3490 char *
3491 target_fileio_readlink (const char *filename, int *target_errno)
3492 {
3493 struct target_ops *t;
3494
3495 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3496 {
3497 if (t->to_fileio_readlink != NULL)
3498 {
3499 char *ret = t->to_fileio_readlink (filename, target_errno);
3500
3501 if (targetdebug)
3502 fprintf_unfiltered (gdb_stdlog,
3503 "target_fileio_readlink (%s) = %s (%d)\n",
3504 filename, ret? ret : "(nil)",
3505 ret? 0 : *target_errno);
3506 return ret;
3507 }
3508 }
3509
3510 *target_errno = FILEIO_ENOSYS;
3511 return NULL;
3512 }
3513
3514 static void
3515 target_fileio_close_cleanup (void *opaque)
3516 {
3517 int fd = *(int *) opaque;
3518 int target_errno;
3519
3520 target_fileio_close (fd, &target_errno);
3521 }
3522
3523 /* Read target file FILENAME. Store the result in *BUF_P and
3524 return the size of the transferred data. PADDING additional bytes are
3525 available in *BUF_P. This is a helper function for
3526 target_fileio_read_alloc; see the declaration of that function for more
3527 information. */
3528
3529 static LONGEST
3530 target_fileio_read_alloc_1 (const char *filename,
3531 gdb_byte **buf_p, int padding)
3532 {
3533 struct cleanup *close_cleanup;
3534 size_t buf_alloc, buf_pos;
3535 gdb_byte *buf;
3536 LONGEST n;
3537 int fd;
3538 int target_errno;
3539
3540 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3541 if (fd == -1)
3542 return -1;
3543
3544 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3545
3546 /* Start by reading up to 4K at a time. The target will throttle
3547 this number down if necessary. */
3548 buf_alloc = 4096;
3549 buf = xmalloc (buf_alloc);
3550 buf_pos = 0;
3551 while (1)
3552 {
3553 n = target_fileio_pread (fd, &buf[buf_pos],
3554 buf_alloc - buf_pos - padding, buf_pos,
3555 &target_errno);
3556 if (n < 0)
3557 {
3558 /* An error occurred. */
3559 do_cleanups (close_cleanup);
3560 xfree (buf);
3561 return -1;
3562 }
3563 else if (n == 0)
3564 {
3565 /* Read all there was. */
3566 do_cleanups (close_cleanup);
3567 if (buf_pos == 0)
3568 xfree (buf);
3569 else
3570 *buf_p = buf;
3571 return buf_pos;
3572 }
3573
3574 buf_pos += n;
3575
3576 /* If the buffer is filling up, expand it. */
3577 if (buf_alloc < buf_pos * 2)
3578 {
3579 buf_alloc *= 2;
3580 buf = xrealloc (buf, buf_alloc);
3581 }
3582
3583 QUIT;
3584 }
3585 }
3586
3587 /* Read target file FILENAME. Store the result in *BUF_P and return
3588 the size of the transferred data. See the declaration in "target.h"
3589 function for more information about the return value. */
3590
3591 LONGEST
3592 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3593 {
3594 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3595 }
3596
3597 /* Read target file FILENAME. The result is NUL-terminated and
3598 returned as a string, allocated using xmalloc. If an error occurs
3599 or the transfer is unsupported, NULL is returned. Empty objects
3600 are returned as allocated but empty strings. A warning is issued
3601 if the result contains any embedded NUL bytes. */
3602
3603 char *
3604 target_fileio_read_stralloc (const char *filename)
3605 {
3606 gdb_byte *buffer;
3607 char *bufstr;
3608 LONGEST i, transferred;
3609
3610 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3611 bufstr = (char *) buffer;
3612
3613 if (transferred < 0)
3614 return NULL;
3615
3616 if (transferred == 0)
3617 return xstrdup ("");
3618
3619 bufstr[transferred] = 0;
3620
3621 /* Check for embedded NUL bytes; but allow trailing NULs. */
3622 for (i = strlen (bufstr); i < transferred; i++)
3623 if (bufstr[i] != 0)
3624 {
3625 warning (_("target file %s "
3626 "contained unexpected null characters"),
3627 filename);
3628 break;
3629 }
3630
3631 return bufstr;
3632 }
3633
3634
3635 static int
3636 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3637 {
3638 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3639 }
3640
3641 static int
3642 default_watchpoint_addr_within_range (struct target_ops *target,
3643 CORE_ADDR addr,
3644 CORE_ADDR start, int length)
3645 {
3646 return addr >= start && addr < start + length;
3647 }
3648
3649 static struct gdbarch *
3650 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3651 {
3652 return target_gdbarch ();
3653 }
3654
3655 static int
3656 return_zero (void)
3657 {
3658 return 0;
3659 }
3660
3661 static int
3662 return_one (void)
3663 {
3664 return 1;
3665 }
3666
3667 static int
3668 return_minus_one (void)
3669 {
3670 return -1;
3671 }
3672
3673 static void *
3674 return_null (void)
3675 {
3676 return 0;
3677 }
3678
3679 /*
3680 * Find the next target down the stack from the specified target.
3681 */
3682
3683 struct target_ops *
3684 find_target_beneath (struct target_ops *t)
3685 {
3686 return t->beneath;
3687 }
3688
3689 /* See target.h. */
3690
3691 struct target_ops *
3692 find_target_at (enum strata stratum)
3693 {
3694 struct target_ops *t;
3695
3696 for (t = current_target.beneath; t != NULL; t = t->beneath)
3697 if (t->to_stratum == stratum)
3698 return t;
3699
3700 return NULL;
3701 }
3702
3703 \f
3704 /* The inferior process has died. Long live the inferior! */
3705
3706 void
3707 generic_mourn_inferior (void)
3708 {
3709 ptid_t ptid;
3710
3711 ptid = inferior_ptid;
3712 inferior_ptid = null_ptid;
3713
3714 /* Mark breakpoints uninserted in case something tries to delete a
3715 breakpoint while we delete the inferior's threads (which would
3716 fail, since the inferior is long gone). */
3717 mark_breakpoints_out ();
3718
3719 if (!ptid_equal (ptid, null_ptid))
3720 {
3721 int pid = ptid_get_pid (ptid);
3722 exit_inferior (pid);
3723 }
3724
3725 /* Note this wipes step-resume breakpoints, so needs to be done
3726 after exit_inferior, which ends up referencing the step-resume
3727 breakpoints through clear_thread_inferior_resources. */
3728 breakpoint_init_inferior (inf_exited);
3729
3730 registers_changed ();
3731
3732 reopen_exec_file ();
3733 reinit_frame_cache ();
3734
3735 if (deprecated_detach_hook)
3736 deprecated_detach_hook ();
3737 }
3738 \f
3739 /* Convert a normal process ID to a string. Returns the string in a
3740 static buffer. */
3741
3742 char *
3743 normal_pid_to_str (ptid_t ptid)
3744 {
3745 static char buf[32];
3746
3747 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3748 return buf;
3749 }
3750
3751 static char *
3752 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3753 {
3754 return normal_pid_to_str (ptid);
3755 }
3756
3757 /* Error-catcher for target_find_memory_regions. */
3758 static int
3759 dummy_find_memory_regions (find_memory_region_ftype ignore1, void *ignore2)
3760 {
3761 error (_("Command not implemented for this target."));
3762 return 0;
3763 }
3764
3765 /* Error-catcher for target_make_corefile_notes. */
3766 static char *
3767 dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
3768 {
3769 error (_("Command not implemented for this target."));
3770 return NULL;
3771 }
3772
3773 /* Error-catcher for target_get_bookmark. */
3774 static gdb_byte *
3775 dummy_get_bookmark (char *ignore1, int ignore2)
3776 {
3777 tcomplain ();
3778 return NULL;
3779 }
3780
3781 /* Error-catcher for target_goto_bookmark. */
3782 static void
3783 dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
3784 {
3785 tcomplain ();
3786 }
3787
3788 /* Set up the handful of non-empty slots needed by the dummy target
3789 vector. */
3790
3791 static void
3792 init_dummy_target (void)
3793 {
3794 dummy_target.to_shortname = "None";
3795 dummy_target.to_longname = "None";
3796 dummy_target.to_doc = "";
3797 dummy_target.to_attach = find_default_attach;
3798 dummy_target.to_detach =
3799 (void (*)(struct target_ops *, const char *, int))target_ignore;
3800 dummy_target.to_create_inferior = find_default_create_inferior;
3801 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3802 dummy_target.to_supports_disable_randomization
3803 = find_default_supports_disable_randomization;
3804 dummy_target.to_pid_to_str = dummy_pid_to_str;
3805 dummy_target.to_stratum = dummy_stratum;
3806 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3807 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3808 dummy_target.to_get_bookmark = dummy_get_bookmark;
3809 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3810 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3811 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3812 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3813 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3814 dummy_target.to_has_execution
3815 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3816 dummy_target.to_magic = OPS_MAGIC;
3817
3818 install_dummy_methods (&dummy_target);
3819 }
3820 \f
3821 static void
3822 debug_to_open (char *args, int from_tty)
3823 {
3824 debug_target.to_open (args, from_tty);
3825
3826 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3827 }
3828
3829 void
3830 target_close (struct target_ops *targ)
3831 {
3832 gdb_assert (!target_is_pushed (targ));
3833
3834 if (targ->to_xclose != NULL)
3835 targ->to_xclose (targ);
3836 else if (targ->to_close != NULL)
3837 targ->to_close (targ);
3838
3839 if (targetdebug)
3840 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3841 }
3842
3843 void
3844 target_attach (char *args, int from_tty)
3845 {
3846 struct target_ops *t;
3847
3848 for (t = current_target.beneath; t != NULL; t = t->beneath)
3849 {
3850 if (t->to_attach != NULL)
3851 {
3852 t->to_attach (t, args, from_tty);
3853 if (targetdebug)
3854 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3855 args, from_tty);
3856 return;
3857 }
3858 }
3859
3860 internal_error (__FILE__, __LINE__,
3861 _("could not find a target to attach"));
3862 }
3863
3864 int
3865 target_thread_alive (ptid_t ptid)
3866 {
3867 struct target_ops *t;
3868
3869 for (t = current_target.beneath; t != NULL; t = t->beneath)
3870 {
3871 if (t->to_thread_alive != NULL)
3872 {
3873 int retval;
3874
3875 retval = t->to_thread_alive (t, ptid);
3876 if (targetdebug)
3877 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3878 ptid_get_pid (ptid), retval);
3879
3880 return retval;
3881 }
3882 }
3883
3884 return 0;
3885 }
3886
3887 void
3888 target_find_new_threads (void)
3889 {
3890 struct target_ops *t;
3891
3892 for (t = current_target.beneath; t != NULL; t = t->beneath)
3893 {
3894 if (t->to_find_new_threads != NULL)
3895 {
3896 t->to_find_new_threads (t);
3897 if (targetdebug)
3898 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3899
3900 return;
3901 }
3902 }
3903 }
3904
3905 void
3906 target_stop (ptid_t ptid)
3907 {
3908 if (!may_stop)
3909 {
3910 warning (_("May not interrupt or stop the target, ignoring attempt"));
3911 return;
3912 }
3913
3914 (*current_target.to_stop) (ptid);
3915 }
3916
3917 static void
3918 debug_to_post_attach (struct target_ops *self, int pid)
3919 {
3920 debug_target.to_post_attach (&debug_target, pid);
3921
3922 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3923 }
3924
3925 /* Concatenate ELEM to LIST, a comma separate list, and return the
3926 result. The LIST incoming argument is released. */
3927
3928 static char *
3929 str_comma_list_concat_elem (char *list, const char *elem)
3930 {
3931 if (list == NULL)
3932 return xstrdup (elem);
3933 else
3934 return reconcat (list, list, ", ", elem, (char *) NULL);
3935 }
3936
3937 /* Helper for target_options_to_string. If OPT is present in
3938 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3939 Returns the new resulting string. OPT is removed from
3940 TARGET_OPTIONS. */
3941
3942 static char *
3943 do_option (int *target_options, char *ret,
3944 int opt, char *opt_str)
3945 {
3946 if ((*target_options & opt) != 0)
3947 {
3948 ret = str_comma_list_concat_elem (ret, opt_str);
3949 *target_options &= ~opt;
3950 }
3951
3952 return ret;
3953 }
3954
3955 char *
3956 target_options_to_string (int target_options)
3957 {
3958 char *ret = NULL;
3959
3960 #define DO_TARG_OPTION(OPT) \
3961 ret = do_option (&target_options, ret, OPT, #OPT)
3962
3963 DO_TARG_OPTION (TARGET_WNOHANG);
3964
3965 if (target_options != 0)
3966 ret = str_comma_list_concat_elem (ret, "unknown???");
3967
3968 if (ret == NULL)
3969 ret = xstrdup ("");
3970 return ret;
3971 }
3972
3973 static void
3974 debug_print_register (const char * func,
3975 struct regcache *regcache, int regno)
3976 {
3977 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3978
3979 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3980 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3981 && gdbarch_register_name (gdbarch, regno) != NULL
3982 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3983 fprintf_unfiltered (gdb_stdlog, "(%s)",
3984 gdbarch_register_name (gdbarch, regno));
3985 else
3986 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3987 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3988 {
3989 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3990 int i, size = register_size (gdbarch, regno);
3991 gdb_byte buf[MAX_REGISTER_SIZE];
3992
3993 regcache_raw_collect (regcache, regno, buf);
3994 fprintf_unfiltered (gdb_stdlog, " = ");
3995 for (i = 0; i < size; i++)
3996 {
3997 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3998 }
3999 if (size <= sizeof (LONGEST))
4000 {
4001 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
4002
4003 fprintf_unfiltered (gdb_stdlog, " %s %s",
4004 core_addr_to_string_nz (val), plongest (val));
4005 }
4006 }
4007 fprintf_unfiltered (gdb_stdlog, "\n");
4008 }
4009
4010 void
4011 target_fetch_registers (struct regcache *regcache, int regno)
4012 {
4013 struct target_ops *t;
4014
4015 for (t = current_target.beneath; t != NULL; t = t->beneath)
4016 {
4017 if (t->to_fetch_registers != NULL)
4018 {
4019 t->to_fetch_registers (t, regcache, regno);
4020 if (targetdebug)
4021 debug_print_register ("target_fetch_registers", regcache, regno);
4022 return;
4023 }
4024 }
4025 }
4026
4027 void
4028 target_store_registers (struct regcache *regcache, int regno)
4029 {
4030 struct target_ops *t;
4031
4032 if (!may_write_registers)
4033 error (_("Writing to registers is not allowed (regno %d)"), regno);
4034
4035 current_target.to_store_registers (&current_target, regcache, regno);
4036 if (targetdebug)
4037 {
4038 debug_print_register ("target_store_registers", regcache, regno);
4039 }
4040 }
4041
4042 int
4043 target_core_of_thread (ptid_t ptid)
4044 {
4045 struct target_ops *t;
4046
4047 for (t = current_target.beneath; t != NULL; t = t->beneath)
4048 {
4049 if (t->to_core_of_thread != NULL)
4050 {
4051 int retval = t->to_core_of_thread (t, ptid);
4052
4053 if (targetdebug)
4054 fprintf_unfiltered (gdb_stdlog,
4055 "target_core_of_thread (%d) = %d\n",
4056 ptid_get_pid (ptid), retval);
4057 return retval;
4058 }
4059 }
4060
4061 return -1;
4062 }
4063
4064 int
4065 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4066 {
4067 struct target_ops *t;
4068
4069 for (t = current_target.beneath; t != NULL; t = t->beneath)
4070 {
4071 if (t->to_verify_memory != NULL)
4072 {
4073 int retval = t->to_verify_memory (t, data, memaddr, size);
4074
4075 if (targetdebug)
4076 fprintf_unfiltered (gdb_stdlog,
4077 "target_verify_memory (%s, %s) = %d\n",
4078 paddress (target_gdbarch (), memaddr),
4079 pulongest (size),
4080 retval);
4081 return retval;
4082 }
4083 }
4084
4085 tcomplain ();
4086 }
4087
4088 /* The documentation for this function is in its prototype declaration in
4089 target.h. */
4090
4091 int
4092 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4093 {
4094 struct target_ops *t;
4095
4096 for (t = current_target.beneath; t != NULL; t = t->beneath)
4097 if (t->to_insert_mask_watchpoint != NULL)
4098 {
4099 int ret;
4100
4101 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4102
4103 if (targetdebug)
4104 fprintf_unfiltered (gdb_stdlog, "\
4105 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4106 core_addr_to_string (addr),
4107 core_addr_to_string (mask), rw, ret);
4108
4109 return ret;
4110 }
4111
4112 return 1;
4113 }
4114
4115 /* The documentation for this function is in its prototype declaration in
4116 target.h. */
4117
4118 int
4119 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4120 {
4121 struct target_ops *t;
4122
4123 for (t = current_target.beneath; t != NULL; t = t->beneath)
4124 if (t->to_remove_mask_watchpoint != NULL)
4125 {
4126 int ret;
4127
4128 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4129
4130 if (targetdebug)
4131 fprintf_unfiltered (gdb_stdlog, "\
4132 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4133 core_addr_to_string (addr),
4134 core_addr_to_string (mask), rw, ret);
4135
4136 return ret;
4137 }
4138
4139 return 1;
4140 }
4141
4142 /* The documentation for this function is in its prototype declaration
4143 in target.h. */
4144
4145 int
4146 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4147 {
4148 struct target_ops *t;
4149
4150 for (t = current_target.beneath; t != NULL; t = t->beneath)
4151 if (t->to_masked_watch_num_registers != NULL)
4152 return t->to_masked_watch_num_registers (t, addr, mask);
4153
4154 return -1;
4155 }
4156
4157 /* The documentation for this function is in its prototype declaration
4158 in target.h. */
4159
4160 int
4161 target_ranged_break_num_registers (void)
4162 {
4163 struct target_ops *t;
4164
4165 for (t = current_target.beneath; t != NULL; t = t->beneath)
4166 if (t->to_ranged_break_num_registers != NULL)
4167 return t->to_ranged_break_num_registers (t);
4168
4169 return -1;
4170 }
4171
4172 /* See target.h. */
4173
4174 struct btrace_target_info *
4175 target_enable_btrace (ptid_t ptid)
4176 {
4177 struct target_ops *t;
4178
4179 for (t = current_target.beneath; t != NULL; t = t->beneath)
4180 if (t->to_enable_btrace != NULL)
4181 return t->to_enable_btrace (ptid);
4182
4183 tcomplain ();
4184 return NULL;
4185 }
4186
4187 /* See target.h. */
4188
4189 void
4190 target_disable_btrace (struct btrace_target_info *btinfo)
4191 {
4192 struct target_ops *t;
4193
4194 for (t = current_target.beneath; t != NULL; t = t->beneath)
4195 if (t->to_disable_btrace != NULL)
4196 {
4197 t->to_disable_btrace (btinfo);
4198 return;
4199 }
4200
4201 tcomplain ();
4202 }
4203
4204 /* See target.h. */
4205
4206 void
4207 target_teardown_btrace (struct btrace_target_info *btinfo)
4208 {
4209 struct target_ops *t;
4210
4211 for (t = current_target.beneath; t != NULL; t = t->beneath)
4212 if (t->to_teardown_btrace != NULL)
4213 {
4214 t->to_teardown_btrace (btinfo);
4215 return;
4216 }
4217
4218 tcomplain ();
4219 }
4220
4221 /* See target.h. */
4222
4223 enum btrace_error
4224 target_read_btrace (VEC (btrace_block_s) **btrace,
4225 struct btrace_target_info *btinfo,
4226 enum btrace_read_type type)
4227 {
4228 struct target_ops *t;
4229
4230 for (t = current_target.beneath; t != NULL; t = t->beneath)
4231 if (t->to_read_btrace != NULL)
4232 return t->to_read_btrace (btrace, btinfo, type);
4233
4234 tcomplain ();
4235 return BTRACE_ERR_NOT_SUPPORTED;
4236 }
4237
4238 /* See target.h. */
4239
4240 void
4241 target_stop_recording (void)
4242 {
4243 struct target_ops *t;
4244
4245 for (t = current_target.beneath; t != NULL; t = t->beneath)
4246 if (t->to_stop_recording != NULL)
4247 {
4248 t->to_stop_recording ();
4249 return;
4250 }
4251
4252 /* This is optional. */
4253 }
4254
4255 /* See target.h. */
4256
4257 void
4258 target_info_record (void)
4259 {
4260 struct target_ops *t;
4261
4262 for (t = current_target.beneath; t != NULL; t = t->beneath)
4263 if (t->to_info_record != NULL)
4264 {
4265 t->to_info_record ();
4266 return;
4267 }
4268
4269 tcomplain ();
4270 }
4271
4272 /* See target.h. */
4273
4274 void
4275 target_save_record (const char *filename)
4276 {
4277 struct target_ops *t;
4278
4279 for (t = current_target.beneath; t != NULL; t = t->beneath)
4280 if (t->to_save_record != NULL)
4281 {
4282 t->to_save_record (filename);
4283 return;
4284 }
4285
4286 tcomplain ();
4287 }
4288
4289 /* See target.h. */
4290
4291 int
4292 target_supports_delete_record (void)
4293 {
4294 struct target_ops *t;
4295
4296 for (t = current_target.beneath; t != NULL; t = t->beneath)
4297 if (t->to_delete_record != NULL)
4298 return 1;
4299
4300 return 0;
4301 }
4302
4303 /* See target.h. */
4304
4305 void
4306 target_delete_record (void)
4307 {
4308 struct target_ops *t;
4309
4310 for (t = current_target.beneath; t != NULL; t = t->beneath)
4311 if (t->to_delete_record != NULL)
4312 {
4313 t->to_delete_record ();
4314 return;
4315 }
4316
4317 tcomplain ();
4318 }
4319
4320 /* See target.h. */
4321
4322 int
4323 target_record_is_replaying (void)
4324 {
4325 struct target_ops *t;
4326
4327 for (t = current_target.beneath; t != NULL; t = t->beneath)
4328 if (t->to_record_is_replaying != NULL)
4329 return t->to_record_is_replaying ();
4330
4331 return 0;
4332 }
4333
4334 /* See target.h. */
4335
4336 void
4337 target_goto_record_begin (void)
4338 {
4339 struct target_ops *t;
4340
4341 for (t = current_target.beneath; t != NULL; t = t->beneath)
4342 if (t->to_goto_record_begin != NULL)
4343 {
4344 t->to_goto_record_begin ();
4345 return;
4346 }
4347
4348 tcomplain ();
4349 }
4350
4351 /* See target.h. */
4352
4353 void
4354 target_goto_record_end (void)
4355 {
4356 struct target_ops *t;
4357
4358 for (t = current_target.beneath; t != NULL; t = t->beneath)
4359 if (t->to_goto_record_end != NULL)
4360 {
4361 t->to_goto_record_end ();
4362 return;
4363 }
4364
4365 tcomplain ();
4366 }
4367
4368 /* See target.h. */
4369
4370 void
4371 target_goto_record (ULONGEST insn)
4372 {
4373 struct target_ops *t;
4374
4375 for (t = current_target.beneath; t != NULL; t = t->beneath)
4376 if (t->to_goto_record != NULL)
4377 {
4378 t->to_goto_record (insn);
4379 return;
4380 }
4381
4382 tcomplain ();
4383 }
4384
4385 /* See target.h. */
4386
4387 void
4388 target_insn_history (int size, int flags)
4389 {
4390 struct target_ops *t;
4391
4392 for (t = current_target.beneath; t != NULL; t = t->beneath)
4393 if (t->to_insn_history != NULL)
4394 {
4395 t->to_insn_history (size, flags);
4396 return;
4397 }
4398
4399 tcomplain ();
4400 }
4401
4402 /* See target.h. */
4403
4404 void
4405 target_insn_history_from (ULONGEST from, int size, int flags)
4406 {
4407 struct target_ops *t;
4408
4409 for (t = current_target.beneath; t != NULL; t = t->beneath)
4410 if (t->to_insn_history_from != NULL)
4411 {
4412 t->to_insn_history_from (from, size, flags);
4413 return;
4414 }
4415
4416 tcomplain ();
4417 }
4418
4419 /* See target.h. */
4420
4421 void
4422 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4423 {
4424 struct target_ops *t;
4425
4426 for (t = current_target.beneath; t != NULL; t = t->beneath)
4427 if (t->to_insn_history_range != NULL)
4428 {
4429 t->to_insn_history_range (begin, end, flags);
4430 return;
4431 }
4432
4433 tcomplain ();
4434 }
4435
4436 /* See target.h. */
4437
4438 void
4439 target_call_history (int size, int flags)
4440 {
4441 struct target_ops *t;
4442
4443 for (t = current_target.beneath; t != NULL; t = t->beneath)
4444 if (t->to_call_history != NULL)
4445 {
4446 t->to_call_history (size, flags);
4447 return;
4448 }
4449
4450 tcomplain ();
4451 }
4452
4453 /* See target.h. */
4454
4455 void
4456 target_call_history_from (ULONGEST begin, int size, int flags)
4457 {
4458 struct target_ops *t;
4459
4460 for (t = current_target.beneath; t != NULL; t = t->beneath)
4461 if (t->to_call_history_from != NULL)
4462 {
4463 t->to_call_history_from (begin, size, flags);
4464 return;
4465 }
4466
4467 tcomplain ();
4468 }
4469
4470 /* See target.h. */
4471
4472 void
4473 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4474 {
4475 struct target_ops *t;
4476
4477 for (t = current_target.beneath; t != NULL; t = t->beneath)
4478 if (t->to_call_history_range != NULL)
4479 {
4480 t->to_call_history_range (begin, end, flags);
4481 return;
4482 }
4483
4484 tcomplain ();
4485 }
4486
4487 static void
4488 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4489 {
4490 debug_target.to_prepare_to_store (&debug_target, regcache);
4491
4492 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4493 }
4494
4495 /* See target.h. */
4496
4497 const struct frame_unwind *
4498 target_get_unwinder (void)
4499 {
4500 struct target_ops *t;
4501
4502 for (t = current_target.beneath; t != NULL; t = t->beneath)
4503 if (t->to_get_unwinder != NULL)
4504 return t->to_get_unwinder;
4505
4506 return NULL;
4507 }
4508
4509 /* See target.h. */
4510
4511 const struct frame_unwind *
4512 target_get_tailcall_unwinder (void)
4513 {
4514 struct target_ops *t;
4515
4516 for (t = current_target.beneath; t != NULL; t = t->beneath)
4517 if (t->to_get_tailcall_unwinder != NULL)
4518 return t->to_get_tailcall_unwinder;
4519
4520 return NULL;
4521 }
4522
4523 /* See target.h. */
4524
4525 CORE_ADDR
4526 forward_target_decr_pc_after_break (struct target_ops *ops,
4527 struct gdbarch *gdbarch)
4528 {
4529 for (; ops != NULL; ops = ops->beneath)
4530 if (ops->to_decr_pc_after_break != NULL)
4531 return ops->to_decr_pc_after_break (ops, gdbarch);
4532
4533 return gdbarch_decr_pc_after_break (gdbarch);
4534 }
4535
4536 /* See target.h. */
4537
4538 CORE_ADDR
4539 target_decr_pc_after_break (struct gdbarch *gdbarch)
4540 {
4541 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4542 }
4543
4544 static int
4545 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4546 int write, struct mem_attrib *attrib,
4547 struct target_ops *target)
4548 {
4549 int retval;
4550
4551 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4552 attrib, target);
4553
4554 fprintf_unfiltered (gdb_stdlog,
4555 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4556 paddress (target_gdbarch (), memaddr), len,
4557 write ? "write" : "read", retval);
4558
4559 if (retval > 0)
4560 {
4561 int i;
4562
4563 fputs_unfiltered (", bytes =", gdb_stdlog);
4564 for (i = 0; i < retval; i++)
4565 {
4566 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4567 {
4568 if (targetdebug < 2 && i > 0)
4569 {
4570 fprintf_unfiltered (gdb_stdlog, " ...");
4571 break;
4572 }
4573 fprintf_unfiltered (gdb_stdlog, "\n");
4574 }
4575
4576 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4577 }
4578 }
4579
4580 fputc_unfiltered ('\n', gdb_stdlog);
4581
4582 return retval;
4583 }
4584
4585 static void
4586 debug_to_files_info (struct target_ops *target)
4587 {
4588 debug_target.to_files_info (target);
4589
4590 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4591 }
4592
4593 static int
4594 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4595 struct bp_target_info *bp_tgt)
4596 {
4597 int retval;
4598
4599 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4600
4601 fprintf_unfiltered (gdb_stdlog,
4602 "target_insert_breakpoint (%s, xxx) = %ld\n",
4603 core_addr_to_string (bp_tgt->placed_address),
4604 (unsigned long) retval);
4605 return retval;
4606 }
4607
4608 static int
4609 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4610 struct bp_target_info *bp_tgt)
4611 {
4612 int retval;
4613
4614 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4615
4616 fprintf_unfiltered (gdb_stdlog,
4617 "target_remove_breakpoint (%s, xxx) = %ld\n",
4618 core_addr_to_string (bp_tgt->placed_address),
4619 (unsigned long) retval);
4620 return retval;
4621 }
4622
4623 static int
4624 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4625 int type, int cnt, int from_tty)
4626 {
4627 int retval;
4628
4629 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4630 type, cnt, from_tty);
4631
4632 fprintf_unfiltered (gdb_stdlog,
4633 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4634 (unsigned long) type,
4635 (unsigned long) cnt,
4636 (unsigned long) from_tty,
4637 (unsigned long) retval);
4638 return retval;
4639 }
4640
4641 static int
4642 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
4643 {
4644 CORE_ADDR retval;
4645
4646 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
4647
4648 fprintf_unfiltered (gdb_stdlog,
4649 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4650 core_addr_to_string (addr), (unsigned long) len,
4651 core_addr_to_string (retval));
4652 return retval;
4653 }
4654
4655 static int
4656 debug_to_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int rw,
4657 struct expression *cond)
4658 {
4659 int retval;
4660
4661 retval = debug_target.to_can_accel_watchpoint_condition (addr, len,
4662 rw, cond);
4663
4664 fprintf_unfiltered (gdb_stdlog,
4665 "target_can_accel_watchpoint_condition "
4666 "(%s, %d, %d, %s) = %ld\n",
4667 core_addr_to_string (addr), len, rw,
4668 host_address_to_string (cond), (unsigned long) retval);
4669 return retval;
4670 }
4671
4672 static int
4673 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4674 {
4675 int retval;
4676
4677 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4678
4679 fprintf_unfiltered (gdb_stdlog,
4680 "target_stopped_by_watchpoint () = %ld\n",
4681 (unsigned long) retval);
4682 return retval;
4683 }
4684
4685 static int
4686 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4687 {
4688 int retval;
4689
4690 retval = debug_target.to_stopped_data_address (target, addr);
4691
4692 fprintf_unfiltered (gdb_stdlog,
4693 "target_stopped_data_address ([%s]) = %ld\n",
4694 core_addr_to_string (*addr),
4695 (unsigned long)retval);
4696 return retval;
4697 }
4698
4699 static int
4700 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4701 CORE_ADDR addr,
4702 CORE_ADDR start, int length)
4703 {
4704 int retval;
4705
4706 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4707 start, length);
4708
4709 fprintf_filtered (gdb_stdlog,
4710 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4711 core_addr_to_string (addr), core_addr_to_string (start),
4712 length, retval);
4713 return retval;
4714 }
4715
4716 static int
4717 debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
4718 struct bp_target_info *bp_tgt)
4719 {
4720 int retval;
4721
4722 retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
4723
4724 fprintf_unfiltered (gdb_stdlog,
4725 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4726 core_addr_to_string (bp_tgt->placed_address),
4727 (unsigned long) retval);
4728 return retval;
4729 }
4730
4731 static int
4732 debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
4733 struct bp_target_info *bp_tgt)
4734 {
4735 int retval;
4736
4737 retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
4738
4739 fprintf_unfiltered (gdb_stdlog,
4740 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4741 core_addr_to_string (bp_tgt->placed_address),
4742 (unsigned long) retval);
4743 return retval;
4744 }
4745
4746 static int
4747 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type,
4748 struct expression *cond)
4749 {
4750 int retval;
4751
4752 retval = debug_target.to_insert_watchpoint (addr, len, type, cond);
4753
4754 fprintf_unfiltered (gdb_stdlog,
4755 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4756 core_addr_to_string (addr), len, type,
4757 host_address_to_string (cond), (unsigned long) retval);
4758 return retval;
4759 }
4760
4761 static int
4762 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type,
4763 struct expression *cond)
4764 {
4765 int retval;
4766
4767 retval = debug_target.to_remove_watchpoint (addr, len, type, cond);
4768
4769 fprintf_unfiltered (gdb_stdlog,
4770 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4771 core_addr_to_string (addr), len, type,
4772 host_address_to_string (cond), (unsigned long) retval);
4773 return retval;
4774 }
4775
4776 static void
4777 debug_to_terminal_init (void)
4778 {
4779 debug_target.to_terminal_init ();
4780
4781 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4782 }
4783
4784 static void
4785 debug_to_terminal_inferior (void)
4786 {
4787 debug_target.to_terminal_inferior ();
4788
4789 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4790 }
4791
4792 static void
4793 debug_to_terminal_ours_for_output (void)
4794 {
4795 debug_target.to_terminal_ours_for_output ();
4796
4797 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4798 }
4799
4800 static void
4801 debug_to_terminal_ours (void)
4802 {
4803 debug_target.to_terminal_ours ();
4804
4805 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4806 }
4807
4808 static void
4809 debug_to_terminal_save_ours (void)
4810 {
4811 debug_target.to_terminal_save_ours ();
4812
4813 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4814 }
4815
4816 static void
4817 debug_to_terminal_info (const char *arg, int from_tty)
4818 {
4819 debug_target.to_terminal_info (arg, from_tty);
4820
4821 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4822 from_tty);
4823 }
4824
4825 static void
4826 debug_to_load (char *args, int from_tty)
4827 {
4828 debug_target.to_load (args, from_tty);
4829
4830 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4831 }
4832
4833 static void
4834 debug_to_post_startup_inferior (ptid_t ptid)
4835 {
4836 debug_target.to_post_startup_inferior (ptid);
4837
4838 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4839 ptid_get_pid (ptid));
4840 }
4841
4842 static int
4843 debug_to_insert_fork_catchpoint (int pid)
4844 {
4845 int retval;
4846
4847 retval = debug_target.to_insert_fork_catchpoint (pid);
4848
4849 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4850 pid, retval);
4851
4852 return retval;
4853 }
4854
4855 static int
4856 debug_to_remove_fork_catchpoint (int pid)
4857 {
4858 int retval;
4859
4860 retval = debug_target.to_remove_fork_catchpoint (pid);
4861
4862 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4863 pid, retval);
4864
4865 return retval;
4866 }
4867
4868 static int
4869 debug_to_insert_vfork_catchpoint (int pid)
4870 {
4871 int retval;
4872
4873 retval = debug_target.to_insert_vfork_catchpoint (pid);
4874
4875 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4876 pid, retval);
4877
4878 return retval;
4879 }
4880
4881 static int
4882 debug_to_remove_vfork_catchpoint (int pid)
4883 {
4884 int retval;
4885
4886 retval = debug_target.to_remove_vfork_catchpoint (pid);
4887
4888 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4889 pid, retval);
4890
4891 return retval;
4892 }
4893
4894 static int
4895 debug_to_insert_exec_catchpoint (int pid)
4896 {
4897 int retval;
4898
4899 retval = debug_target.to_insert_exec_catchpoint (pid);
4900
4901 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4902 pid, retval);
4903
4904 return retval;
4905 }
4906
4907 static int
4908 debug_to_remove_exec_catchpoint (int pid)
4909 {
4910 int retval;
4911
4912 retval = debug_target.to_remove_exec_catchpoint (pid);
4913
4914 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4915 pid, retval);
4916
4917 return retval;
4918 }
4919
4920 static int
4921 debug_to_has_exited (int pid, int wait_status, int *exit_status)
4922 {
4923 int has_exited;
4924
4925 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
4926
4927 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4928 pid, wait_status, *exit_status, has_exited);
4929
4930 return has_exited;
4931 }
4932
4933 static int
4934 debug_to_can_run (void)
4935 {
4936 int retval;
4937
4938 retval = debug_target.to_can_run ();
4939
4940 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4941
4942 return retval;
4943 }
4944
4945 static struct gdbarch *
4946 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4947 {
4948 struct gdbarch *retval;
4949
4950 retval = debug_target.to_thread_architecture (ops, ptid);
4951
4952 fprintf_unfiltered (gdb_stdlog,
4953 "target_thread_architecture (%s) = %s [%s]\n",
4954 target_pid_to_str (ptid),
4955 host_address_to_string (retval),
4956 gdbarch_bfd_arch_info (retval)->printable_name);
4957 return retval;
4958 }
4959
4960 static void
4961 debug_to_stop (ptid_t ptid)
4962 {
4963 debug_target.to_stop (ptid);
4964
4965 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4966 target_pid_to_str (ptid));
4967 }
4968
4969 static void
4970 debug_to_rcmd (char *command,
4971 struct ui_file *outbuf)
4972 {
4973 debug_target.to_rcmd (command, outbuf);
4974 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4975 }
4976
4977 static char *
4978 debug_to_pid_to_exec_file (int pid)
4979 {
4980 char *exec_file;
4981
4982 exec_file = debug_target.to_pid_to_exec_file (pid);
4983
4984 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4985 pid, exec_file);
4986
4987 return exec_file;
4988 }
4989
4990 static void
4991 setup_target_debug (void)
4992 {
4993 memcpy (&debug_target, &current_target, sizeof debug_target);
4994
4995 current_target.to_open = debug_to_open;
4996 current_target.to_post_attach = debug_to_post_attach;
4997 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4998 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4999 current_target.to_files_info = debug_to_files_info;
5000 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
5001 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
5002 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
5003 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
5004 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
5005 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
5006 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
5007 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
5008 current_target.to_stopped_data_address = debug_to_stopped_data_address;
5009 current_target.to_watchpoint_addr_within_range
5010 = debug_to_watchpoint_addr_within_range;
5011 current_target.to_region_ok_for_hw_watchpoint
5012 = debug_to_region_ok_for_hw_watchpoint;
5013 current_target.to_can_accel_watchpoint_condition
5014 = debug_to_can_accel_watchpoint_condition;
5015 current_target.to_terminal_init = debug_to_terminal_init;
5016 current_target.to_terminal_inferior = debug_to_terminal_inferior;
5017 current_target.to_terminal_ours_for_output
5018 = debug_to_terminal_ours_for_output;
5019 current_target.to_terminal_ours = debug_to_terminal_ours;
5020 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
5021 current_target.to_terminal_info = debug_to_terminal_info;
5022 current_target.to_load = debug_to_load;
5023 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
5024 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
5025 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
5026 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
5027 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
5028 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
5029 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
5030 current_target.to_has_exited = debug_to_has_exited;
5031 current_target.to_can_run = debug_to_can_run;
5032 current_target.to_stop = debug_to_stop;
5033 current_target.to_rcmd = debug_to_rcmd;
5034 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
5035 current_target.to_thread_architecture = debug_to_thread_architecture;
5036 }
5037 \f
5038
5039 static char targ_desc[] =
5040 "Names of targets and files being debugged.\nShows the entire \
5041 stack of targets currently in use (including the exec-file,\n\
5042 core-file, and process, if any), as well as the symbol file name.";
5043
5044 static void
5045 do_monitor_command (char *cmd,
5046 int from_tty)
5047 {
5048 if ((current_target.to_rcmd
5049 == (void (*) (char *, struct ui_file *)) tcomplain)
5050 || (current_target.to_rcmd == debug_to_rcmd
5051 && (debug_target.to_rcmd
5052 == (void (*) (char *, struct ui_file *)) tcomplain)))
5053 error (_("\"monitor\" command not supported by this target."));
5054 target_rcmd (cmd, gdb_stdtarg);
5055 }
5056
5057 /* Print the name of each layers of our target stack. */
5058
5059 static void
5060 maintenance_print_target_stack (char *cmd, int from_tty)
5061 {
5062 struct target_ops *t;
5063
5064 printf_filtered (_("The current target stack is:\n"));
5065
5066 for (t = target_stack; t != NULL; t = t->beneath)
5067 {
5068 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
5069 }
5070 }
5071
5072 /* Controls if async mode is permitted. */
5073 int target_async_permitted = 0;
5074
5075 /* The set command writes to this variable. If the inferior is
5076 executing, target_async_permitted is *not* updated. */
5077 static int target_async_permitted_1 = 0;
5078
5079 static void
5080 set_target_async_command (char *args, int from_tty,
5081 struct cmd_list_element *c)
5082 {
5083 if (have_live_inferiors ())
5084 {
5085 target_async_permitted_1 = target_async_permitted;
5086 error (_("Cannot change this setting while the inferior is running."));
5087 }
5088
5089 target_async_permitted = target_async_permitted_1;
5090 }
5091
5092 static void
5093 show_target_async_command (struct ui_file *file, int from_tty,
5094 struct cmd_list_element *c,
5095 const char *value)
5096 {
5097 fprintf_filtered (file,
5098 _("Controlling the inferior in "
5099 "asynchronous mode is %s.\n"), value);
5100 }
5101
5102 /* Temporary copies of permission settings. */
5103
5104 static int may_write_registers_1 = 1;
5105 static int may_write_memory_1 = 1;
5106 static int may_insert_breakpoints_1 = 1;
5107 static int may_insert_tracepoints_1 = 1;
5108 static int may_insert_fast_tracepoints_1 = 1;
5109 static int may_stop_1 = 1;
5110
5111 /* Make the user-set values match the real values again. */
5112
5113 void
5114 update_target_permissions (void)
5115 {
5116 may_write_registers_1 = may_write_registers;
5117 may_write_memory_1 = may_write_memory;
5118 may_insert_breakpoints_1 = may_insert_breakpoints;
5119 may_insert_tracepoints_1 = may_insert_tracepoints;
5120 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
5121 may_stop_1 = may_stop;
5122 }
5123
5124 /* The one function handles (most of) the permission flags in the same
5125 way. */
5126
5127 static void
5128 set_target_permissions (char *args, int from_tty,
5129 struct cmd_list_element *c)
5130 {
5131 if (target_has_execution)
5132 {
5133 update_target_permissions ();
5134 error (_("Cannot change this setting while the inferior is running."));
5135 }
5136
5137 /* Make the real values match the user-changed values. */
5138 may_write_registers = may_write_registers_1;
5139 may_insert_breakpoints = may_insert_breakpoints_1;
5140 may_insert_tracepoints = may_insert_tracepoints_1;
5141 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5142 may_stop = may_stop_1;
5143 update_observer_mode ();
5144 }
5145
5146 /* Set memory write permission independently of observer mode. */
5147
5148 static void
5149 set_write_memory_permission (char *args, int from_tty,
5150 struct cmd_list_element *c)
5151 {
5152 /* Make the real values match the user-changed values. */
5153 may_write_memory = may_write_memory_1;
5154 update_observer_mode ();
5155 }
5156
5157
5158 void
5159 initialize_targets (void)
5160 {
5161 init_dummy_target ();
5162 push_target (&dummy_target);
5163
5164 add_info ("target", target_info, targ_desc);
5165 add_info ("files", target_info, targ_desc);
5166
5167 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5168 Set target debugging."), _("\
5169 Show target debugging."), _("\
5170 When non-zero, target debugging is enabled. Higher numbers are more\n\
5171 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5172 command."),
5173 NULL,
5174 show_targetdebug,
5175 &setdebuglist, &showdebuglist);
5176
5177 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5178 &trust_readonly, _("\
5179 Set mode for reading from readonly sections."), _("\
5180 Show mode for reading from readonly sections."), _("\
5181 When this mode is on, memory reads from readonly sections (such as .text)\n\
5182 will be read from the object file instead of from the target. This will\n\
5183 result in significant performance improvement for remote targets."),
5184 NULL,
5185 show_trust_readonly,
5186 &setlist, &showlist);
5187
5188 add_com ("monitor", class_obscure, do_monitor_command,
5189 _("Send a command to the remote monitor (remote targets only)."));
5190
5191 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5192 _("Print the name of each layer of the internal target stack."),
5193 &maintenanceprintlist);
5194
5195 add_setshow_boolean_cmd ("target-async", no_class,
5196 &target_async_permitted_1, _("\
5197 Set whether gdb controls the inferior in asynchronous mode."), _("\
5198 Show whether gdb controls the inferior in asynchronous mode."), _("\
5199 Tells gdb whether to control the inferior in asynchronous mode."),
5200 set_target_async_command,
5201 show_target_async_command,
5202 &setlist,
5203 &showlist);
5204
5205 add_setshow_boolean_cmd ("may-write-registers", class_support,
5206 &may_write_registers_1, _("\
5207 Set permission to write into registers."), _("\
5208 Show permission to write into registers."), _("\
5209 When this permission is on, GDB may write into the target's registers.\n\
5210 Otherwise, any sort of write attempt will result in an error."),
5211 set_target_permissions, NULL,
5212 &setlist, &showlist);
5213
5214 add_setshow_boolean_cmd ("may-write-memory", class_support,
5215 &may_write_memory_1, _("\
5216 Set permission to write into target memory."), _("\
5217 Show permission to write into target memory."), _("\
5218 When this permission is on, GDB may write into the target's memory.\n\
5219 Otherwise, any sort of write attempt will result in an error."),
5220 set_write_memory_permission, NULL,
5221 &setlist, &showlist);
5222
5223 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5224 &may_insert_breakpoints_1, _("\
5225 Set permission to insert breakpoints in the target."), _("\
5226 Show permission to insert breakpoints in the target."), _("\
5227 When this permission is on, GDB may insert breakpoints in the program.\n\
5228 Otherwise, any sort of insertion attempt will result in an error."),
5229 set_target_permissions, NULL,
5230 &setlist, &showlist);
5231
5232 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5233 &may_insert_tracepoints_1, _("\
5234 Set permission to insert tracepoints in the target."), _("\
5235 Show permission to insert tracepoints in the target."), _("\
5236 When this permission is on, GDB may insert tracepoints in the program.\n\
5237 Otherwise, any sort of insertion attempt will result in an error."),
5238 set_target_permissions, NULL,
5239 &setlist, &showlist);
5240
5241 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5242 &may_insert_fast_tracepoints_1, _("\
5243 Set permission to insert fast tracepoints in the target."), _("\
5244 Show permission to insert fast tracepoints in the target."), _("\
5245 When this permission is on, GDB may insert fast tracepoints.\n\
5246 Otherwise, any sort of insertion attempt will result in an error."),
5247 set_target_permissions, NULL,
5248 &setlist, &showlist);
5249
5250 add_setshow_boolean_cmd ("may-interrupt", class_support,
5251 &may_stop_1, _("\
5252 Set permission to interrupt or signal the target."), _("\
5253 Show permission to interrupt or signal the target."), _("\
5254 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5255 Otherwise, any attempt to interrupt or stop will be ignored."),
5256 set_target_permissions, NULL,
5257 &setlist, &showlist);
5258 }