]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/target.c
convert to_stop_recording
[thirdparty/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47 #include "auxv.h"
48
49 static void target_info (char *, int);
50
51 static void default_terminal_info (struct target_ops *, const char *, int);
52
53 static int default_watchpoint_addr_within_range (struct target_ops *,
54 CORE_ADDR, CORE_ADDR, int);
55
56 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
57 CORE_ADDR, int);
58
59 static void default_rcmd (struct target_ops *, char *, struct ui_file *);
60
61 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
62 long lwp, long tid);
63
64 static int default_follow_fork (struct target_ops *self, int follow_child,
65 int detach_fork);
66
67 static void default_mourn_inferior (struct target_ops *self);
68
69 static int default_search_memory (struct target_ops *ops,
70 CORE_ADDR start_addr,
71 ULONGEST search_space_len,
72 const gdb_byte *pattern,
73 ULONGEST pattern_len,
74 CORE_ADDR *found_addrp);
75
76 static void tcomplain (void) ATTRIBUTE_NORETURN;
77
78 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
79
80 static int return_zero (void);
81
82 void target_ignore (void);
83
84 static void target_command (char *, int);
85
86 static struct target_ops *find_default_run_target (char *);
87
88 static target_xfer_partial_ftype default_xfer_partial;
89
90 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
91 ptid_t ptid);
92
93 static int dummy_find_memory_regions (struct target_ops *self,
94 find_memory_region_ftype ignore1,
95 void *ignore2);
96
97 static char *dummy_make_corefile_notes (struct target_ops *self,
98 bfd *ignore1, int *ignore2);
99
100 static char *default_pid_to_str (struct target_ops *ops, ptid_t ptid);
101
102 static int find_default_can_async_p (struct target_ops *ignore);
103
104 static int find_default_is_async_p (struct target_ops *ignore);
105
106 static enum exec_direction_kind default_execution_direction
107 (struct target_ops *self);
108
109 #include "target-delegates.c"
110
111 static void init_dummy_target (void);
112
113 static struct target_ops debug_target;
114
115 static void debug_to_open (char *, int);
116
117 static void debug_to_prepare_to_store (struct target_ops *self,
118 struct regcache *);
119
120 static void debug_to_files_info (struct target_ops *);
121
122 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
123 struct bp_target_info *);
124
125 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
126 struct bp_target_info *);
127
128 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
129 int, int, int);
130
131 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
132 struct gdbarch *,
133 struct bp_target_info *);
134
135 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
136 struct gdbarch *,
137 struct bp_target_info *);
138
139 static int debug_to_insert_watchpoint (struct target_ops *self,
140 CORE_ADDR, int, int,
141 struct expression *);
142
143 static int debug_to_remove_watchpoint (struct target_ops *self,
144 CORE_ADDR, int, int,
145 struct expression *);
146
147 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
148
149 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
150 CORE_ADDR, CORE_ADDR, int);
151
152 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
153 CORE_ADDR, int);
154
155 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
156 CORE_ADDR, int, int,
157 struct expression *);
158
159 static void debug_to_terminal_init (struct target_ops *self);
160
161 static void debug_to_terminal_inferior (struct target_ops *self);
162
163 static void debug_to_terminal_ours_for_output (struct target_ops *self);
164
165 static void debug_to_terminal_save_ours (struct target_ops *self);
166
167 static void debug_to_terminal_ours (struct target_ops *self);
168
169 static void debug_to_load (struct target_ops *self, char *, int);
170
171 static int debug_to_can_run (struct target_ops *self);
172
173 static void debug_to_stop (struct target_ops *self, ptid_t);
174
175 /* Pointer to array of target architecture structures; the size of the
176 array; the current index into the array; the allocated size of the
177 array. */
178 struct target_ops **target_structs;
179 unsigned target_struct_size;
180 unsigned target_struct_allocsize;
181 #define DEFAULT_ALLOCSIZE 10
182
183 /* The initial current target, so that there is always a semi-valid
184 current target. */
185
186 static struct target_ops dummy_target;
187
188 /* Top of target stack. */
189
190 static struct target_ops *target_stack;
191
192 /* The target structure we are currently using to talk to a process
193 or file or whatever "inferior" we have. */
194
195 struct target_ops current_target;
196
197 /* Command list for target. */
198
199 static struct cmd_list_element *targetlist = NULL;
200
201 /* Nonzero if we should trust readonly sections from the
202 executable when reading memory. */
203
204 static int trust_readonly = 0;
205
206 /* Nonzero if we should show true memory content including
207 memory breakpoint inserted by gdb. */
208
209 static int show_memory_breakpoints = 0;
210
211 /* These globals control whether GDB attempts to perform these
212 operations; they are useful for targets that need to prevent
213 inadvertant disruption, such as in non-stop mode. */
214
215 int may_write_registers = 1;
216
217 int may_write_memory = 1;
218
219 int may_insert_breakpoints = 1;
220
221 int may_insert_tracepoints = 1;
222
223 int may_insert_fast_tracepoints = 1;
224
225 int may_stop = 1;
226
227 /* Non-zero if we want to see trace of target level stuff. */
228
229 static unsigned int targetdebug = 0;
230 static void
231 show_targetdebug (struct ui_file *file, int from_tty,
232 struct cmd_list_element *c, const char *value)
233 {
234 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
235 }
236
237 static void setup_target_debug (void);
238
239 /* The user just typed 'target' without the name of a target. */
240
241 static void
242 target_command (char *arg, int from_tty)
243 {
244 fputs_filtered ("Argument required (target name). Try `help target'\n",
245 gdb_stdout);
246 }
247
248 /* Default target_has_* methods for process_stratum targets. */
249
250 int
251 default_child_has_all_memory (struct target_ops *ops)
252 {
253 /* If no inferior selected, then we can't read memory here. */
254 if (ptid_equal (inferior_ptid, null_ptid))
255 return 0;
256
257 return 1;
258 }
259
260 int
261 default_child_has_memory (struct target_ops *ops)
262 {
263 /* If no inferior selected, then we can't read memory here. */
264 if (ptid_equal (inferior_ptid, null_ptid))
265 return 0;
266
267 return 1;
268 }
269
270 int
271 default_child_has_stack (struct target_ops *ops)
272 {
273 /* If no inferior selected, there's no stack. */
274 if (ptid_equal (inferior_ptid, null_ptid))
275 return 0;
276
277 return 1;
278 }
279
280 int
281 default_child_has_registers (struct target_ops *ops)
282 {
283 /* Can't read registers from no inferior. */
284 if (ptid_equal (inferior_ptid, null_ptid))
285 return 0;
286
287 return 1;
288 }
289
290 int
291 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
292 {
293 /* If there's no thread selected, then we can't make it run through
294 hoops. */
295 if (ptid_equal (the_ptid, null_ptid))
296 return 0;
297
298 return 1;
299 }
300
301
302 int
303 target_has_all_memory_1 (void)
304 {
305 struct target_ops *t;
306
307 for (t = current_target.beneath; t != NULL; t = t->beneath)
308 if (t->to_has_all_memory (t))
309 return 1;
310
311 return 0;
312 }
313
314 int
315 target_has_memory_1 (void)
316 {
317 struct target_ops *t;
318
319 for (t = current_target.beneath; t != NULL; t = t->beneath)
320 if (t->to_has_memory (t))
321 return 1;
322
323 return 0;
324 }
325
326 int
327 target_has_stack_1 (void)
328 {
329 struct target_ops *t;
330
331 for (t = current_target.beneath; t != NULL; t = t->beneath)
332 if (t->to_has_stack (t))
333 return 1;
334
335 return 0;
336 }
337
338 int
339 target_has_registers_1 (void)
340 {
341 struct target_ops *t;
342
343 for (t = current_target.beneath; t != NULL; t = t->beneath)
344 if (t->to_has_registers (t))
345 return 1;
346
347 return 0;
348 }
349
350 int
351 target_has_execution_1 (ptid_t the_ptid)
352 {
353 struct target_ops *t;
354
355 for (t = current_target.beneath; t != NULL; t = t->beneath)
356 if (t->to_has_execution (t, the_ptid))
357 return 1;
358
359 return 0;
360 }
361
362 int
363 target_has_execution_current (void)
364 {
365 return target_has_execution_1 (inferior_ptid);
366 }
367
368 /* Complete initialization of T. This ensures that various fields in
369 T are set, if needed by the target implementation. */
370
371 void
372 complete_target_initialization (struct target_ops *t)
373 {
374 /* Provide default values for all "must have" methods. */
375 if (t->to_xfer_partial == NULL)
376 t->to_xfer_partial = default_xfer_partial;
377
378 if (t->to_has_all_memory == NULL)
379 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
380
381 if (t->to_has_memory == NULL)
382 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
383
384 if (t->to_has_stack == NULL)
385 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
386
387 if (t->to_has_registers == NULL)
388 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
389
390 if (t->to_has_execution == NULL)
391 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
392
393 install_delegators (t);
394 }
395
396 /* Add possible target architecture T to the list and add a new
397 command 'target T->to_shortname'. Set COMPLETER as the command's
398 completer if not NULL. */
399
400 void
401 add_target_with_completer (struct target_ops *t,
402 completer_ftype *completer)
403 {
404 struct cmd_list_element *c;
405
406 complete_target_initialization (t);
407
408 if (!target_structs)
409 {
410 target_struct_allocsize = DEFAULT_ALLOCSIZE;
411 target_structs = (struct target_ops **) xmalloc
412 (target_struct_allocsize * sizeof (*target_structs));
413 }
414 if (target_struct_size >= target_struct_allocsize)
415 {
416 target_struct_allocsize *= 2;
417 target_structs = (struct target_ops **)
418 xrealloc ((char *) target_structs,
419 target_struct_allocsize * sizeof (*target_structs));
420 }
421 target_structs[target_struct_size++] = t;
422
423 if (targetlist == NULL)
424 add_prefix_cmd ("target", class_run, target_command, _("\
425 Connect to a target machine or process.\n\
426 The first argument is the type or protocol of the target machine.\n\
427 Remaining arguments are interpreted by the target protocol. For more\n\
428 information on the arguments for a particular protocol, type\n\
429 `help target ' followed by the protocol name."),
430 &targetlist, "target ", 0, &cmdlist);
431 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
432 &targetlist);
433 if (completer != NULL)
434 set_cmd_completer (c, completer);
435 }
436
437 /* Add a possible target architecture to the list. */
438
439 void
440 add_target (struct target_ops *t)
441 {
442 add_target_with_completer (t, NULL);
443 }
444
445 /* See target.h. */
446
447 void
448 add_deprecated_target_alias (struct target_ops *t, char *alias)
449 {
450 struct cmd_list_element *c;
451 char *alt;
452
453 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
454 see PR cli/15104. */
455 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
456 alt = xstrprintf ("target %s", t->to_shortname);
457 deprecate_cmd (c, alt);
458 }
459
460 /* Stub functions */
461
462 void
463 target_ignore (void)
464 {
465 }
466
467 void
468 target_kill (void)
469 {
470 if (targetdebug)
471 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
472
473 current_target.to_kill (&current_target);
474 }
475
476 void
477 target_load (char *arg, int from_tty)
478 {
479 target_dcache_invalidate ();
480 (*current_target.to_load) (&current_target, arg, from_tty);
481 }
482
483 void
484 target_create_inferior (char *exec_file, char *args,
485 char **env, int from_tty)
486 {
487 struct target_ops *t;
488
489 for (t = current_target.beneath; t != NULL; t = t->beneath)
490 {
491 if (t->to_create_inferior != NULL)
492 {
493 t->to_create_inferior (t, exec_file, args, env, from_tty);
494 if (targetdebug)
495 fprintf_unfiltered (gdb_stdlog,
496 "target_create_inferior (%s, %s, xxx, %d)\n",
497 exec_file, args, from_tty);
498 return;
499 }
500 }
501
502 internal_error (__FILE__, __LINE__,
503 _("could not find a target to create inferior"));
504 }
505
506 void
507 target_terminal_inferior (void)
508 {
509 /* A background resume (``run&'') should leave GDB in control of the
510 terminal. Use target_can_async_p, not target_is_async_p, since at
511 this point the target is not async yet. However, if sync_execution
512 is not set, we know it will become async prior to resume. */
513 if (target_can_async_p () && !sync_execution)
514 return;
515
516 /* If GDB is resuming the inferior in the foreground, install
517 inferior's terminal modes. */
518 (*current_target.to_terminal_inferior) (&current_target);
519 }
520
521 static int
522 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
523 struct target_ops *t)
524 {
525 errno = EIO; /* Can't read/write this location. */
526 return 0; /* No bytes handled. */
527 }
528
529 static void
530 tcomplain (void)
531 {
532 error (_("You can't do that when your target is `%s'"),
533 current_target.to_shortname);
534 }
535
536 void
537 noprocess (void)
538 {
539 error (_("You can't do that without a process to debug."));
540 }
541
542 static void
543 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
544 {
545 printf_unfiltered (_("No saved terminal information.\n"));
546 }
547
548 /* A default implementation for the to_get_ada_task_ptid target method.
549
550 This function builds the PTID by using both LWP and TID as part of
551 the PTID lwp and tid elements. The pid used is the pid of the
552 inferior_ptid. */
553
554 static ptid_t
555 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
556 {
557 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
558 }
559
560 static enum exec_direction_kind
561 default_execution_direction (struct target_ops *self)
562 {
563 if (!target_can_execute_reverse)
564 return EXEC_FORWARD;
565 else if (!target_can_async_p ())
566 return EXEC_FORWARD;
567 else
568 gdb_assert_not_reached ("\
569 to_execution_direction must be implemented for reverse async");
570 }
571
572 /* Go through the target stack from top to bottom, copying over zero
573 entries in current_target, then filling in still empty entries. In
574 effect, we are doing class inheritance through the pushed target
575 vectors.
576
577 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
578 is currently implemented, is that it discards any knowledge of
579 which target an inherited method originally belonged to.
580 Consequently, new new target methods should instead explicitly and
581 locally search the target stack for the target that can handle the
582 request. */
583
584 static void
585 update_current_target (void)
586 {
587 struct target_ops *t;
588
589 /* First, reset current's contents. */
590 memset (&current_target, 0, sizeof (current_target));
591
592 /* Install the delegators. */
593 install_delegators (&current_target);
594
595 #define INHERIT(FIELD, TARGET) \
596 if (!current_target.FIELD) \
597 current_target.FIELD = (TARGET)->FIELD
598
599 for (t = target_stack; t; t = t->beneath)
600 {
601 INHERIT (to_shortname, t);
602 INHERIT (to_longname, t);
603 INHERIT (to_doc, t);
604 /* Do not inherit to_open. */
605 /* Do not inherit to_close. */
606 /* Do not inherit to_attach. */
607 /* Do not inherit to_post_attach. */
608 INHERIT (to_attach_no_wait, t);
609 /* Do not inherit to_detach. */
610 /* Do not inherit to_disconnect. */
611 /* Do not inherit to_resume. */
612 /* Do not inherit to_wait. */
613 /* Do not inherit to_fetch_registers. */
614 /* Do not inherit to_store_registers. */
615 /* Do not inherit to_prepare_to_store. */
616 INHERIT (deprecated_xfer_memory, t);
617 /* Do not inherit to_files_info. */
618 /* Do not inherit to_insert_breakpoint. */
619 /* Do not inherit to_remove_breakpoint. */
620 /* Do not inherit to_can_use_hw_breakpoint. */
621 /* Do not inherit to_insert_hw_breakpoint. */
622 /* Do not inherit to_remove_hw_breakpoint. */
623 /* Do not inherit to_ranged_break_num_registers. */
624 /* Do not inherit to_insert_watchpoint. */
625 /* Do not inherit to_remove_watchpoint. */
626 /* Do not inherit to_insert_mask_watchpoint. */
627 /* Do not inherit to_remove_mask_watchpoint. */
628 /* Do not inherit to_stopped_data_address. */
629 INHERIT (to_have_steppable_watchpoint, t);
630 INHERIT (to_have_continuable_watchpoint, t);
631 /* Do not inherit to_stopped_by_watchpoint. */
632 /* Do not inherit to_watchpoint_addr_within_range. */
633 /* Do not inherit to_region_ok_for_hw_watchpoint. */
634 /* Do not inherit to_can_accel_watchpoint_condition. */
635 /* Do not inherit to_masked_watch_num_registers. */
636 /* Do not inherit to_terminal_init. */
637 /* Do not inherit to_terminal_inferior. */
638 /* Do not inherit to_terminal_ours_for_output. */
639 /* Do not inherit to_terminal_ours. */
640 /* Do not inherit to_terminal_save_ours. */
641 /* Do not inherit to_terminal_info. */
642 /* Do not inherit to_kill. */
643 /* Do not inherit to_load. */
644 /* Do no inherit to_create_inferior. */
645 /* Do not inherit to_post_startup_inferior. */
646 /* Do not inherit to_insert_fork_catchpoint. */
647 /* Do not inherit to_remove_fork_catchpoint. */
648 /* Do not inherit to_insert_vfork_catchpoint. */
649 /* Do not inherit to_remove_vfork_catchpoint. */
650 /* Do not inherit to_follow_fork. */
651 /* Do not inherit to_insert_exec_catchpoint. */
652 /* Do not inherit to_remove_exec_catchpoint. */
653 /* Do not inherit to_set_syscall_catchpoint. */
654 /* Do not inherit to_has_exited. */
655 /* Do not inherit to_mourn_inferior. */
656 INHERIT (to_can_run, t);
657 /* Do not inherit to_pass_signals. */
658 /* Do not inherit to_program_signals. */
659 /* Do not inherit to_thread_alive. */
660 /* Do not inherit to_find_new_threads. */
661 /* Do not inherit to_pid_to_str. */
662 /* Do not inherit to_extra_thread_info. */
663 /* Do not inherit to_thread_name. */
664 /* Do not inherit to_stop. */
665 /* Do not inherit to_xfer_partial. */
666 /* Do not inherit to_rcmd. */
667 /* Do not inherit to_pid_to_exec_file. */
668 /* Do not inherit to_log_command. */
669 INHERIT (to_stratum, t);
670 /* Do not inherit to_has_all_memory. */
671 /* Do not inherit to_has_memory. */
672 /* Do not inherit to_has_stack. */
673 /* Do not inherit to_has_registers. */
674 /* Do not inherit to_has_execution. */
675 INHERIT (to_has_thread_control, t);
676 /* Do not inherit to_can_async_p. */
677 /* Do not inherit to_is_async_p. */
678 /* Do not inherit to_async. */
679 /* Do not inherit to_find_memory_regions. */
680 /* Do not inherit to_make_corefile_notes. */
681 /* Do not inherit to_get_bookmark. */
682 /* Do not inherit to_goto_bookmark. */
683 /* Do not inherit to_get_thread_local_address. */
684 /* Do not inherit to_can_execute_reverse. */
685 /* Do not inherit to_execution_direction. */
686 /* Do not inherit to_thread_architecture. */
687 /* Do not inherit to_read_description. */
688 /* Do not inherit to_get_ada_task_ptid. */
689 /* Do not inherit to_search_memory. */
690 /* Do not inherit to_supports_multi_process. */
691 /* Do not inherit to_supports_enable_disable_tracepoint. */
692 /* Do not inherit to_supports_string_tracing. */
693 /* Do not inherit to_trace_init. */
694 /* Do not inherit to_download_tracepoint. */
695 /* Do not inherit to_can_download_tracepoint. */
696 /* Do not inherit to_download_trace_state_variable. */
697 /* Do not inherit to_enable_tracepoint. */
698 /* Do not inherit to_disable_tracepoint. */
699 /* Do not inherit to_trace_set_readonly_regions. */
700 /* Do not inherit to_trace_start. */
701 /* Do not inherit to_get_trace_status. */
702 /* Do not inherit to_get_tracepoint_status. */
703 /* Do not inherit to_trace_stop. */
704 /* Do not inherit to_trace_find. */
705 /* Do not inherit to_get_trace_state_variable_value. */
706 /* Do not inherit to_save_trace_data. */
707 /* Do not inherit to_upload_tracepoints. */
708 /* Do not inherit to_upload_trace_state_variables. */
709 /* Do not inherit to_get_raw_trace_data. */
710 /* Do not inherit to_get_min_fast_tracepoint_insn_len. */
711 /* Do not inherit to_set_disconnected_tracing. */
712 /* Do not inherit to_set_circular_trace_buffer. */
713 /* Do not inherit to_set_trace_buffer_size. */
714 /* Do not inherit to_set_trace_notes. */
715 /* Do not inherit to_get_tib_address. */
716 /* Do not inherit to_set_permissions. */
717 /* Do not inherit to_static_tracepoint_marker_at. */
718 /* Do not inherit to_static_tracepoint_markers_by_strid. */
719 /* Do not inherit to_traceframe_info. */
720 /* Do not inherit to_use_agent. */
721 /* Do not inherit to_can_use_agent. */
722 /* Do not inherit to_augmented_libraries_svr4_read. */
723 INHERIT (to_magic, t);
724 /* Do not inherit
725 to_supports_evaluation_of_breakpoint_conditions. */
726 /* Do not inherit to_can_run_breakpoint_commands. */
727 /* Do not inherit to_memory_map. */
728 /* Do not inherit to_flash_erase. */
729 /* Do not inherit to_flash_done. */
730 }
731 #undef INHERIT
732
733 /* Clean up a target struct so it no longer has any zero pointers in
734 it. Some entries are defaulted to a method that print an error,
735 others are hard-wired to a standard recursive default. */
736
737 #define de_fault(field, value) \
738 if (!current_target.field) \
739 current_target.field = value
740
741 de_fault (to_open,
742 (void (*) (char *, int))
743 tcomplain);
744 de_fault (to_close,
745 (void (*) (struct target_ops *))
746 target_ignore);
747 de_fault (deprecated_xfer_memory,
748 (int (*) (CORE_ADDR, gdb_byte *, int, int,
749 struct mem_attrib *, struct target_ops *))
750 nomemory);
751 de_fault (to_can_run,
752 (int (*) (struct target_ops *))
753 return_zero);
754 current_target.to_read_description = NULL;
755
756 #undef de_fault
757
758 /* Finally, position the target-stack beneath the squashed
759 "current_target". That way code looking for a non-inherited
760 target method can quickly and simply find it. */
761 current_target.beneath = target_stack;
762
763 if (targetdebug)
764 setup_target_debug ();
765 }
766
767 /* Push a new target type into the stack of the existing target accessors,
768 possibly superseding some of the existing accessors.
769
770 Rather than allow an empty stack, we always have the dummy target at
771 the bottom stratum, so we can call the function vectors without
772 checking them. */
773
774 void
775 push_target (struct target_ops *t)
776 {
777 struct target_ops **cur;
778
779 /* Check magic number. If wrong, it probably means someone changed
780 the struct definition, but not all the places that initialize one. */
781 if (t->to_magic != OPS_MAGIC)
782 {
783 fprintf_unfiltered (gdb_stderr,
784 "Magic number of %s target struct wrong\n",
785 t->to_shortname);
786 internal_error (__FILE__, __LINE__,
787 _("failed internal consistency check"));
788 }
789
790 /* Find the proper stratum to install this target in. */
791 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
792 {
793 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
794 break;
795 }
796
797 /* If there's already targets at this stratum, remove them. */
798 /* FIXME: cagney/2003-10-15: I think this should be popping all
799 targets to CUR, and not just those at this stratum level. */
800 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
801 {
802 /* There's already something at this stratum level. Close it,
803 and un-hook it from the stack. */
804 struct target_ops *tmp = (*cur);
805
806 (*cur) = (*cur)->beneath;
807 tmp->beneath = NULL;
808 target_close (tmp);
809 }
810
811 /* We have removed all targets in our stratum, now add the new one. */
812 t->beneath = (*cur);
813 (*cur) = t;
814
815 update_current_target ();
816 }
817
818 /* Remove a target_ops vector from the stack, wherever it may be.
819 Return how many times it was removed (0 or 1). */
820
821 int
822 unpush_target (struct target_ops *t)
823 {
824 struct target_ops **cur;
825 struct target_ops *tmp;
826
827 if (t->to_stratum == dummy_stratum)
828 internal_error (__FILE__, __LINE__,
829 _("Attempt to unpush the dummy target"));
830
831 /* Look for the specified target. Note that we assume that a target
832 can only occur once in the target stack. */
833
834 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
835 {
836 if ((*cur) == t)
837 break;
838 }
839
840 /* If we don't find target_ops, quit. Only open targets should be
841 closed. */
842 if ((*cur) == NULL)
843 return 0;
844
845 /* Unchain the target. */
846 tmp = (*cur);
847 (*cur) = (*cur)->beneath;
848 tmp->beneath = NULL;
849
850 update_current_target ();
851
852 /* Finally close the target. Note we do this after unchaining, so
853 any target method calls from within the target_close
854 implementation don't end up in T anymore. */
855 target_close (t);
856
857 return 1;
858 }
859
860 void
861 pop_all_targets_above (enum strata above_stratum)
862 {
863 while ((int) (current_target.to_stratum) > (int) above_stratum)
864 {
865 if (!unpush_target (target_stack))
866 {
867 fprintf_unfiltered (gdb_stderr,
868 "pop_all_targets couldn't find target %s\n",
869 target_stack->to_shortname);
870 internal_error (__FILE__, __LINE__,
871 _("failed internal consistency check"));
872 break;
873 }
874 }
875 }
876
877 void
878 pop_all_targets (void)
879 {
880 pop_all_targets_above (dummy_stratum);
881 }
882
883 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
884
885 int
886 target_is_pushed (struct target_ops *t)
887 {
888 struct target_ops **cur;
889
890 /* Check magic number. If wrong, it probably means someone changed
891 the struct definition, but not all the places that initialize one. */
892 if (t->to_magic != OPS_MAGIC)
893 {
894 fprintf_unfiltered (gdb_stderr,
895 "Magic number of %s target struct wrong\n",
896 t->to_shortname);
897 internal_error (__FILE__, __LINE__,
898 _("failed internal consistency check"));
899 }
900
901 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
902 if (*cur == t)
903 return 1;
904
905 return 0;
906 }
907
908 /* Using the objfile specified in OBJFILE, find the address for the
909 current thread's thread-local storage with offset OFFSET. */
910 CORE_ADDR
911 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
912 {
913 volatile CORE_ADDR addr = 0;
914 struct target_ops *target;
915
916 for (target = current_target.beneath;
917 target != NULL;
918 target = target->beneath)
919 {
920 if (target->to_get_thread_local_address != NULL)
921 break;
922 }
923
924 if (target != NULL
925 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
926 {
927 ptid_t ptid = inferior_ptid;
928 volatile struct gdb_exception ex;
929
930 TRY_CATCH (ex, RETURN_MASK_ALL)
931 {
932 CORE_ADDR lm_addr;
933
934 /* Fetch the load module address for this objfile. */
935 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
936 objfile);
937 /* If it's 0, throw the appropriate exception. */
938 if (lm_addr == 0)
939 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
940 _("TLS load module not found"));
941
942 addr = target->to_get_thread_local_address (target, ptid,
943 lm_addr, offset);
944 }
945 /* If an error occurred, print TLS related messages here. Otherwise,
946 throw the error to some higher catcher. */
947 if (ex.reason < 0)
948 {
949 int objfile_is_library = (objfile->flags & OBJF_SHARED);
950
951 switch (ex.error)
952 {
953 case TLS_NO_LIBRARY_SUPPORT_ERROR:
954 error (_("Cannot find thread-local variables "
955 "in this thread library."));
956 break;
957 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
958 if (objfile_is_library)
959 error (_("Cannot find shared library `%s' in dynamic"
960 " linker's load module list"), objfile_name (objfile));
961 else
962 error (_("Cannot find executable file `%s' in dynamic"
963 " linker's load module list"), objfile_name (objfile));
964 break;
965 case TLS_NOT_ALLOCATED_YET_ERROR:
966 if (objfile_is_library)
967 error (_("The inferior has not yet allocated storage for"
968 " thread-local variables in\n"
969 "the shared library `%s'\n"
970 "for %s"),
971 objfile_name (objfile), target_pid_to_str (ptid));
972 else
973 error (_("The inferior has not yet allocated storage for"
974 " thread-local variables in\n"
975 "the executable `%s'\n"
976 "for %s"),
977 objfile_name (objfile), target_pid_to_str (ptid));
978 break;
979 case TLS_GENERIC_ERROR:
980 if (objfile_is_library)
981 error (_("Cannot find thread-local storage for %s, "
982 "shared library %s:\n%s"),
983 target_pid_to_str (ptid),
984 objfile_name (objfile), ex.message);
985 else
986 error (_("Cannot find thread-local storage for %s, "
987 "executable file %s:\n%s"),
988 target_pid_to_str (ptid),
989 objfile_name (objfile), ex.message);
990 break;
991 default:
992 throw_exception (ex);
993 break;
994 }
995 }
996 }
997 /* It wouldn't be wrong here to try a gdbarch method, too; finding
998 TLS is an ABI-specific thing. But we don't do that yet. */
999 else
1000 error (_("Cannot find thread-local variables on this target"));
1001
1002 return addr;
1003 }
1004
1005 const char *
1006 target_xfer_status_to_string (enum target_xfer_status err)
1007 {
1008 #define CASE(X) case X: return #X
1009 switch (err)
1010 {
1011 CASE(TARGET_XFER_E_IO);
1012 CASE(TARGET_XFER_E_UNAVAILABLE);
1013 default:
1014 return "<unknown>";
1015 }
1016 #undef CASE
1017 };
1018
1019
1020 #undef MIN
1021 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1022
1023 /* target_read_string -- read a null terminated string, up to LEN bytes,
1024 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1025 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1026 is responsible for freeing it. Return the number of bytes successfully
1027 read. */
1028
1029 int
1030 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1031 {
1032 int tlen, offset, i;
1033 gdb_byte buf[4];
1034 int errcode = 0;
1035 char *buffer;
1036 int buffer_allocated;
1037 char *bufptr;
1038 unsigned int nbytes_read = 0;
1039
1040 gdb_assert (string);
1041
1042 /* Small for testing. */
1043 buffer_allocated = 4;
1044 buffer = xmalloc (buffer_allocated);
1045 bufptr = buffer;
1046
1047 while (len > 0)
1048 {
1049 tlen = MIN (len, 4 - (memaddr & 3));
1050 offset = memaddr & 3;
1051
1052 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1053 if (errcode != 0)
1054 {
1055 /* The transfer request might have crossed the boundary to an
1056 unallocated region of memory. Retry the transfer, requesting
1057 a single byte. */
1058 tlen = 1;
1059 offset = 0;
1060 errcode = target_read_memory (memaddr, buf, 1);
1061 if (errcode != 0)
1062 goto done;
1063 }
1064
1065 if (bufptr - buffer + tlen > buffer_allocated)
1066 {
1067 unsigned int bytes;
1068
1069 bytes = bufptr - buffer;
1070 buffer_allocated *= 2;
1071 buffer = xrealloc (buffer, buffer_allocated);
1072 bufptr = buffer + bytes;
1073 }
1074
1075 for (i = 0; i < tlen; i++)
1076 {
1077 *bufptr++ = buf[i + offset];
1078 if (buf[i + offset] == '\000')
1079 {
1080 nbytes_read += i + 1;
1081 goto done;
1082 }
1083 }
1084
1085 memaddr += tlen;
1086 len -= tlen;
1087 nbytes_read += tlen;
1088 }
1089 done:
1090 *string = buffer;
1091 if (errnop != NULL)
1092 *errnop = errcode;
1093 return nbytes_read;
1094 }
1095
1096 struct target_section_table *
1097 target_get_section_table (struct target_ops *target)
1098 {
1099 if (targetdebug)
1100 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1101
1102 return (*target->to_get_section_table) (target);
1103 }
1104
1105 /* Find a section containing ADDR. */
1106
1107 struct target_section *
1108 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1109 {
1110 struct target_section_table *table = target_get_section_table (target);
1111 struct target_section *secp;
1112
1113 if (table == NULL)
1114 return NULL;
1115
1116 for (secp = table->sections; secp < table->sections_end; secp++)
1117 {
1118 if (addr >= secp->addr && addr < secp->endaddr)
1119 return secp;
1120 }
1121 return NULL;
1122 }
1123
1124 /* Read memory from the live target, even if currently inspecting a
1125 traceframe. The return is the same as that of target_read. */
1126
1127 static enum target_xfer_status
1128 target_read_live_memory (enum target_object object,
1129 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1130 ULONGEST *xfered_len)
1131 {
1132 enum target_xfer_status ret;
1133 struct cleanup *cleanup;
1134
1135 /* Switch momentarily out of tfind mode so to access live memory.
1136 Note that this must not clear global state, such as the frame
1137 cache, which must still remain valid for the previous traceframe.
1138 We may be _building_ the frame cache at this point. */
1139 cleanup = make_cleanup_restore_traceframe_number ();
1140 set_traceframe_number (-1);
1141
1142 ret = target_xfer_partial (current_target.beneath, object, NULL,
1143 myaddr, NULL, memaddr, len, xfered_len);
1144
1145 do_cleanups (cleanup);
1146 return ret;
1147 }
1148
1149 /* Using the set of read-only target sections of OPS, read live
1150 read-only memory. Note that the actual reads start from the
1151 top-most target again.
1152
1153 For interface/parameters/return description see target.h,
1154 to_xfer_partial. */
1155
1156 static enum target_xfer_status
1157 memory_xfer_live_readonly_partial (struct target_ops *ops,
1158 enum target_object object,
1159 gdb_byte *readbuf, ULONGEST memaddr,
1160 ULONGEST len, ULONGEST *xfered_len)
1161 {
1162 struct target_section *secp;
1163 struct target_section_table *table;
1164
1165 secp = target_section_by_addr (ops, memaddr);
1166 if (secp != NULL
1167 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1168 secp->the_bfd_section)
1169 & SEC_READONLY))
1170 {
1171 struct target_section *p;
1172 ULONGEST memend = memaddr + len;
1173
1174 table = target_get_section_table (ops);
1175
1176 for (p = table->sections; p < table->sections_end; p++)
1177 {
1178 if (memaddr >= p->addr)
1179 {
1180 if (memend <= p->endaddr)
1181 {
1182 /* Entire transfer is within this section. */
1183 return target_read_live_memory (object, memaddr,
1184 readbuf, len, xfered_len);
1185 }
1186 else if (memaddr >= p->endaddr)
1187 {
1188 /* This section ends before the transfer starts. */
1189 continue;
1190 }
1191 else
1192 {
1193 /* This section overlaps the transfer. Just do half. */
1194 len = p->endaddr - memaddr;
1195 return target_read_live_memory (object, memaddr,
1196 readbuf, len, xfered_len);
1197 }
1198 }
1199 }
1200 }
1201
1202 return TARGET_XFER_EOF;
1203 }
1204
1205 /* Read memory from more than one valid target. A core file, for
1206 instance, could have some of memory but delegate other bits to
1207 the target below it. So, we must manually try all targets. */
1208
1209 static enum target_xfer_status
1210 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1211 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1212 ULONGEST *xfered_len)
1213 {
1214 enum target_xfer_status res;
1215
1216 do
1217 {
1218 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1219 readbuf, writebuf, memaddr, len,
1220 xfered_len);
1221 if (res == TARGET_XFER_OK)
1222 break;
1223
1224 /* Stop if the target reports that the memory is not available. */
1225 if (res == TARGET_XFER_E_UNAVAILABLE)
1226 break;
1227
1228 /* We want to continue past core files to executables, but not
1229 past a running target's memory. */
1230 if (ops->to_has_all_memory (ops))
1231 break;
1232
1233 ops = ops->beneath;
1234 }
1235 while (ops != NULL);
1236
1237 return res;
1238 }
1239
1240 /* Perform a partial memory transfer.
1241 For docs see target.h, to_xfer_partial. */
1242
1243 static enum target_xfer_status
1244 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1245 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1246 ULONGEST len, ULONGEST *xfered_len)
1247 {
1248 enum target_xfer_status res;
1249 int reg_len;
1250 struct mem_region *region;
1251 struct inferior *inf;
1252
1253 /* For accesses to unmapped overlay sections, read directly from
1254 files. Must do this first, as MEMADDR may need adjustment. */
1255 if (readbuf != NULL && overlay_debugging)
1256 {
1257 struct obj_section *section = find_pc_overlay (memaddr);
1258
1259 if (pc_in_unmapped_range (memaddr, section))
1260 {
1261 struct target_section_table *table
1262 = target_get_section_table (ops);
1263 const char *section_name = section->the_bfd_section->name;
1264
1265 memaddr = overlay_mapped_address (memaddr, section);
1266 return section_table_xfer_memory_partial (readbuf, writebuf,
1267 memaddr, len, xfered_len,
1268 table->sections,
1269 table->sections_end,
1270 section_name);
1271 }
1272 }
1273
1274 /* Try the executable files, if "trust-readonly-sections" is set. */
1275 if (readbuf != NULL && trust_readonly)
1276 {
1277 struct target_section *secp;
1278 struct target_section_table *table;
1279
1280 secp = target_section_by_addr (ops, memaddr);
1281 if (secp != NULL
1282 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1283 secp->the_bfd_section)
1284 & SEC_READONLY))
1285 {
1286 table = target_get_section_table (ops);
1287 return section_table_xfer_memory_partial (readbuf, writebuf,
1288 memaddr, len, xfered_len,
1289 table->sections,
1290 table->sections_end,
1291 NULL);
1292 }
1293 }
1294
1295 /* If reading unavailable memory in the context of traceframes, and
1296 this address falls within a read-only section, fallback to
1297 reading from live memory. */
1298 if (readbuf != NULL && get_traceframe_number () != -1)
1299 {
1300 VEC(mem_range_s) *available;
1301
1302 /* If we fail to get the set of available memory, then the
1303 target does not support querying traceframe info, and so we
1304 attempt reading from the traceframe anyway (assuming the
1305 target implements the old QTro packet then). */
1306 if (traceframe_available_memory (&available, memaddr, len))
1307 {
1308 struct cleanup *old_chain;
1309
1310 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1311
1312 if (VEC_empty (mem_range_s, available)
1313 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1314 {
1315 /* Don't read into the traceframe's available
1316 memory. */
1317 if (!VEC_empty (mem_range_s, available))
1318 {
1319 LONGEST oldlen = len;
1320
1321 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1322 gdb_assert (len <= oldlen);
1323 }
1324
1325 do_cleanups (old_chain);
1326
1327 /* This goes through the topmost target again. */
1328 res = memory_xfer_live_readonly_partial (ops, object,
1329 readbuf, memaddr,
1330 len, xfered_len);
1331 if (res == TARGET_XFER_OK)
1332 return TARGET_XFER_OK;
1333 else
1334 {
1335 /* No use trying further, we know some memory starting
1336 at MEMADDR isn't available. */
1337 *xfered_len = len;
1338 return TARGET_XFER_E_UNAVAILABLE;
1339 }
1340 }
1341
1342 /* Don't try to read more than how much is available, in
1343 case the target implements the deprecated QTro packet to
1344 cater for older GDBs (the target's knowledge of read-only
1345 sections may be outdated by now). */
1346 len = VEC_index (mem_range_s, available, 0)->length;
1347
1348 do_cleanups (old_chain);
1349 }
1350 }
1351
1352 /* Try GDB's internal data cache. */
1353 region = lookup_mem_region (memaddr);
1354 /* region->hi == 0 means there's no upper bound. */
1355 if (memaddr + len < region->hi || region->hi == 0)
1356 reg_len = len;
1357 else
1358 reg_len = region->hi - memaddr;
1359
1360 switch (region->attrib.mode)
1361 {
1362 case MEM_RO:
1363 if (writebuf != NULL)
1364 return TARGET_XFER_E_IO;
1365 break;
1366
1367 case MEM_WO:
1368 if (readbuf != NULL)
1369 return TARGET_XFER_E_IO;
1370 break;
1371
1372 case MEM_FLASH:
1373 /* We only support writing to flash during "load" for now. */
1374 if (writebuf != NULL)
1375 error (_("Writing to flash memory forbidden in this context"));
1376 break;
1377
1378 case MEM_NONE:
1379 return TARGET_XFER_E_IO;
1380 }
1381
1382 if (!ptid_equal (inferior_ptid, null_ptid))
1383 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1384 else
1385 inf = NULL;
1386
1387 if (inf != NULL
1388 /* The dcache reads whole cache lines; that doesn't play well
1389 with reading from a trace buffer, because reading outside of
1390 the collected memory range fails. */
1391 && get_traceframe_number () == -1
1392 && (region->attrib.cache
1393 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1394 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1395 {
1396 DCACHE *dcache = target_dcache_get_or_init ();
1397 int l;
1398
1399 if (readbuf != NULL)
1400 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1401 else
1402 /* FIXME drow/2006-08-09: If we're going to preserve const
1403 correctness dcache_xfer_memory should take readbuf and
1404 writebuf. */
1405 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1406 reg_len, 1);
1407 if (l <= 0)
1408 return TARGET_XFER_E_IO;
1409 else
1410 {
1411 *xfered_len = (ULONGEST) l;
1412 return TARGET_XFER_OK;
1413 }
1414 }
1415
1416 /* If none of those methods found the memory we wanted, fall back
1417 to a target partial transfer. Normally a single call to
1418 to_xfer_partial is enough; if it doesn't recognize an object
1419 it will call the to_xfer_partial of the next target down.
1420 But for memory this won't do. Memory is the only target
1421 object which can be read from more than one valid target.
1422 A core file, for instance, could have some of memory but
1423 delegate other bits to the target below it. So, we must
1424 manually try all targets. */
1425
1426 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1427 xfered_len);
1428
1429 /* Make sure the cache gets updated no matter what - if we are writing
1430 to the stack. Even if this write is not tagged as such, we still need
1431 to update the cache. */
1432
1433 if (res == TARGET_XFER_OK
1434 && inf != NULL
1435 && writebuf != NULL
1436 && target_dcache_init_p ()
1437 && !region->attrib.cache
1438 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1439 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1440 {
1441 DCACHE *dcache = target_dcache_get ();
1442
1443 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1444 }
1445
1446 /* If we still haven't got anything, return the last error. We
1447 give up. */
1448 return res;
1449 }
1450
1451 /* Perform a partial memory transfer. For docs see target.h,
1452 to_xfer_partial. */
1453
1454 static enum target_xfer_status
1455 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1456 gdb_byte *readbuf, const gdb_byte *writebuf,
1457 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1458 {
1459 enum target_xfer_status res;
1460
1461 /* Zero length requests are ok and require no work. */
1462 if (len == 0)
1463 return TARGET_XFER_EOF;
1464
1465 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1466 breakpoint insns, thus hiding out from higher layers whether
1467 there are software breakpoints inserted in the code stream. */
1468 if (readbuf != NULL)
1469 {
1470 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1471 xfered_len);
1472
1473 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1474 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1475 }
1476 else
1477 {
1478 void *buf;
1479 struct cleanup *old_chain;
1480
1481 /* A large write request is likely to be partially satisfied
1482 by memory_xfer_partial_1. We will continually malloc
1483 and free a copy of the entire write request for breakpoint
1484 shadow handling even though we only end up writing a small
1485 subset of it. Cap writes to 4KB to mitigate this. */
1486 len = min (4096, len);
1487
1488 buf = xmalloc (len);
1489 old_chain = make_cleanup (xfree, buf);
1490 memcpy (buf, writebuf, len);
1491
1492 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1493 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1494 xfered_len);
1495
1496 do_cleanups (old_chain);
1497 }
1498
1499 return res;
1500 }
1501
1502 static void
1503 restore_show_memory_breakpoints (void *arg)
1504 {
1505 show_memory_breakpoints = (uintptr_t) arg;
1506 }
1507
1508 struct cleanup *
1509 make_show_memory_breakpoints_cleanup (int show)
1510 {
1511 int current = show_memory_breakpoints;
1512
1513 show_memory_breakpoints = show;
1514 return make_cleanup (restore_show_memory_breakpoints,
1515 (void *) (uintptr_t) current);
1516 }
1517
1518 /* For docs see target.h, to_xfer_partial. */
1519
1520 enum target_xfer_status
1521 target_xfer_partial (struct target_ops *ops,
1522 enum target_object object, const char *annex,
1523 gdb_byte *readbuf, const gdb_byte *writebuf,
1524 ULONGEST offset, ULONGEST len,
1525 ULONGEST *xfered_len)
1526 {
1527 enum target_xfer_status retval;
1528
1529 gdb_assert (ops->to_xfer_partial != NULL);
1530
1531 /* Transfer is done when LEN is zero. */
1532 if (len == 0)
1533 return TARGET_XFER_EOF;
1534
1535 if (writebuf && !may_write_memory)
1536 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1537 core_addr_to_string_nz (offset), plongest (len));
1538
1539 *xfered_len = 0;
1540
1541 /* If this is a memory transfer, let the memory-specific code
1542 have a look at it instead. Memory transfers are more
1543 complicated. */
1544 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1545 || object == TARGET_OBJECT_CODE_MEMORY)
1546 retval = memory_xfer_partial (ops, object, readbuf,
1547 writebuf, offset, len, xfered_len);
1548 else if (object == TARGET_OBJECT_RAW_MEMORY)
1549 {
1550 /* Request the normal memory object from other layers. */
1551 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1552 xfered_len);
1553 }
1554 else
1555 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1556 writebuf, offset, len, xfered_len);
1557
1558 if (targetdebug)
1559 {
1560 const unsigned char *myaddr = NULL;
1561
1562 fprintf_unfiltered (gdb_stdlog,
1563 "%s:target_xfer_partial "
1564 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1565 ops->to_shortname,
1566 (int) object,
1567 (annex ? annex : "(null)"),
1568 host_address_to_string (readbuf),
1569 host_address_to_string (writebuf),
1570 core_addr_to_string_nz (offset),
1571 pulongest (len), retval,
1572 pulongest (*xfered_len));
1573
1574 if (readbuf)
1575 myaddr = readbuf;
1576 if (writebuf)
1577 myaddr = writebuf;
1578 if (retval == TARGET_XFER_OK && myaddr != NULL)
1579 {
1580 int i;
1581
1582 fputs_unfiltered (", bytes =", gdb_stdlog);
1583 for (i = 0; i < *xfered_len; i++)
1584 {
1585 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1586 {
1587 if (targetdebug < 2 && i > 0)
1588 {
1589 fprintf_unfiltered (gdb_stdlog, " ...");
1590 break;
1591 }
1592 fprintf_unfiltered (gdb_stdlog, "\n");
1593 }
1594
1595 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1596 }
1597 }
1598
1599 fputc_unfiltered ('\n', gdb_stdlog);
1600 }
1601
1602 /* Check implementations of to_xfer_partial update *XFERED_LEN
1603 properly. Do assertion after printing debug messages, so that we
1604 can find more clues on assertion failure from debugging messages. */
1605 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1606 gdb_assert (*xfered_len > 0);
1607
1608 return retval;
1609 }
1610
1611 /* Read LEN bytes of target memory at address MEMADDR, placing the
1612 results in GDB's memory at MYADDR. Returns either 0 for success or
1613 TARGET_XFER_E_IO if any error occurs.
1614
1615 If an error occurs, no guarantee is made about the contents of the data at
1616 MYADDR. In particular, the caller should not depend upon partial reads
1617 filling the buffer with good data. There is no way for the caller to know
1618 how much good data might have been transfered anyway. Callers that can
1619 deal with partial reads should call target_read (which will retry until
1620 it makes no progress, and then return how much was transferred). */
1621
1622 int
1623 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1624 {
1625 /* Dispatch to the topmost target, not the flattened current_target.
1626 Memory accesses check target->to_has_(all_)memory, and the
1627 flattened target doesn't inherit those. */
1628 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1629 myaddr, memaddr, len) == len)
1630 return 0;
1631 else
1632 return TARGET_XFER_E_IO;
1633 }
1634
1635 /* Like target_read_memory, but specify explicitly that this is a read
1636 from the target's raw memory. That is, this read bypasses the
1637 dcache, breakpoint shadowing, etc. */
1638
1639 int
1640 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1641 {
1642 /* See comment in target_read_memory about why the request starts at
1643 current_target.beneath. */
1644 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1645 myaddr, memaddr, len) == len)
1646 return 0;
1647 else
1648 return TARGET_XFER_E_IO;
1649 }
1650
1651 /* Like target_read_memory, but specify explicitly that this is a read from
1652 the target's stack. This may trigger different cache behavior. */
1653
1654 int
1655 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1656 {
1657 /* See comment in target_read_memory about why the request starts at
1658 current_target.beneath. */
1659 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1660 myaddr, memaddr, len) == len)
1661 return 0;
1662 else
1663 return TARGET_XFER_E_IO;
1664 }
1665
1666 /* Like target_read_memory, but specify explicitly that this is a read from
1667 the target's code. This may trigger different cache behavior. */
1668
1669 int
1670 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1671 {
1672 /* See comment in target_read_memory about why the request starts at
1673 current_target.beneath. */
1674 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1675 myaddr, memaddr, len) == len)
1676 return 0;
1677 else
1678 return TARGET_XFER_E_IO;
1679 }
1680
1681 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1682 Returns either 0 for success or TARGET_XFER_E_IO if any
1683 error occurs. If an error occurs, no guarantee is made about how
1684 much data got written. Callers that can deal with partial writes
1685 should call target_write. */
1686
1687 int
1688 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1689 {
1690 /* See comment in target_read_memory about why the request starts at
1691 current_target.beneath. */
1692 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1693 myaddr, memaddr, len) == len)
1694 return 0;
1695 else
1696 return TARGET_XFER_E_IO;
1697 }
1698
1699 /* Write LEN bytes from MYADDR to target raw memory at address
1700 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1701 if any error occurs. If an error occurs, no guarantee is made
1702 about how much data got written. Callers that can deal with
1703 partial writes should call target_write. */
1704
1705 int
1706 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1707 {
1708 /* See comment in target_read_memory about why the request starts at
1709 current_target.beneath. */
1710 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1711 myaddr, memaddr, len) == len)
1712 return 0;
1713 else
1714 return TARGET_XFER_E_IO;
1715 }
1716
1717 /* Fetch the target's memory map. */
1718
1719 VEC(mem_region_s) *
1720 target_memory_map (void)
1721 {
1722 VEC(mem_region_s) *result;
1723 struct mem_region *last_one, *this_one;
1724 int ix;
1725 struct target_ops *t;
1726
1727 if (targetdebug)
1728 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1729
1730 result = current_target.to_memory_map (&current_target);
1731 if (result == NULL)
1732 return NULL;
1733
1734 qsort (VEC_address (mem_region_s, result),
1735 VEC_length (mem_region_s, result),
1736 sizeof (struct mem_region), mem_region_cmp);
1737
1738 /* Check that regions do not overlap. Simultaneously assign
1739 a numbering for the "mem" commands to use to refer to
1740 each region. */
1741 last_one = NULL;
1742 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1743 {
1744 this_one->number = ix;
1745
1746 if (last_one && last_one->hi > this_one->lo)
1747 {
1748 warning (_("Overlapping regions in memory map: ignoring"));
1749 VEC_free (mem_region_s, result);
1750 return NULL;
1751 }
1752 last_one = this_one;
1753 }
1754
1755 return result;
1756 }
1757
1758 void
1759 target_flash_erase (ULONGEST address, LONGEST length)
1760 {
1761 if (targetdebug)
1762 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1763 hex_string (address), phex (length, 0));
1764 current_target.to_flash_erase (&current_target, address, length);
1765 }
1766
1767 void
1768 target_flash_done (void)
1769 {
1770 if (targetdebug)
1771 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1772 current_target.to_flash_done (&current_target);
1773 }
1774
1775 static void
1776 show_trust_readonly (struct ui_file *file, int from_tty,
1777 struct cmd_list_element *c, const char *value)
1778 {
1779 fprintf_filtered (file,
1780 _("Mode for reading from readonly sections is %s.\n"),
1781 value);
1782 }
1783
1784 /* More generic transfers. */
1785
1786 static enum target_xfer_status
1787 default_xfer_partial (struct target_ops *ops, enum target_object object,
1788 const char *annex, gdb_byte *readbuf,
1789 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
1790 ULONGEST *xfered_len)
1791 {
1792 if (object == TARGET_OBJECT_MEMORY
1793 && ops->deprecated_xfer_memory != NULL)
1794 /* If available, fall back to the target's
1795 "deprecated_xfer_memory" method. */
1796 {
1797 int xfered = -1;
1798
1799 errno = 0;
1800 if (writebuf != NULL)
1801 {
1802 void *buffer = xmalloc (len);
1803 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1804
1805 memcpy (buffer, writebuf, len);
1806 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1807 1/*write*/, NULL, ops);
1808 do_cleanups (cleanup);
1809 }
1810 if (readbuf != NULL)
1811 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1812 0/*read*/, NULL, ops);
1813 if (xfered > 0)
1814 {
1815 *xfered_len = (ULONGEST) xfered;
1816 return TARGET_XFER_E_IO;
1817 }
1818 else if (xfered == 0 && errno == 0)
1819 /* "deprecated_xfer_memory" uses 0, cross checked against
1820 ERRNO as one indication of an error. */
1821 return TARGET_XFER_EOF;
1822 else
1823 return TARGET_XFER_E_IO;
1824 }
1825 else
1826 {
1827 gdb_assert (ops->beneath != NULL);
1828 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1829 readbuf, writebuf, offset, len,
1830 xfered_len);
1831 }
1832 }
1833
1834 /* Target vector read/write partial wrapper functions. */
1835
1836 static enum target_xfer_status
1837 target_read_partial (struct target_ops *ops,
1838 enum target_object object,
1839 const char *annex, gdb_byte *buf,
1840 ULONGEST offset, ULONGEST len,
1841 ULONGEST *xfered_len)
1842 {
1843 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1844 xfered_len);
1845 }
1846
1847 static enum target_xfer_status
1848 target_write_partial (struct target_ops *ops,
1849 enum target_object object,
1850 const char *annex, const gdb_byte *buf,
1851 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1852 {
1853 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1854 xfered_len);
1855 }
1856
1857 /* Wrappers to perform the full transfer. */
1858
1859 /* For docs on target_read see target.h. */
1860
1861 LONGEST
1862 target_read (struct target_ops *ops,
1863 enum target_object object,
1864 const char *annex, gdb_byte *buf,
1865 ULONGEST offset, LONGEST len)
1866 {
1867 LONGEST xfered = 0;
1868
1869 while (xfered < len)
1870 {
1871 ULONGEST xfered_len;
1872 enum target_xfer_status status;
1873
1874 status = target_read_partial (ops, object, annex,
1875 (gdb_byte *) buf + xfered,
1876 offset + xfered, len - xfered,
1877 &xfered_len);
1878
1879 /* Call an observer, notifying them of the xfer progress? */
1880 if (status == TARGET_XFER_EOF)
1881 return xfered;
1882 else if (status == TARGET_XFER_OK)
1883 {
1884 xfered += xfered_len;
1885 QUIT;
1886 }
1887 else
1888 return -1;
1889
1890 }
1891 return len;
1892 }
1893
1894 /* Assuming that the entire [begin, end) range of memory cannot be
1895 read, try to read whatever subrange is possible to read.
1896
1897 The function returns, in RESULT, either zero or one memory block.
1898 If there's a readable subrange at the beginning, it is completely
1899 read and returned. Any further readable subrange will not be read.
1900 Otherwise, if there's a readable subrange at the end, it will be
1901 completely read and returned. Any readable subranges before it
1902 (obviously, not starting at the beginning), will be ignored. In
1903 other cases -- either no readable subrange, or readable subrange(s)
1904 that is neither at the beginning, or end, nothing is returned.
1905
1906 The purpose of this function is to handle a read across a boundary
1907 of accessible memory in a case when memory map is not available.
1908 The above restrictions are fine for this case, but will give
1909 incorrect results if the memory is 'patchy'. However, supporting
1910 'patchy' memory would require trying to read every single byte,
1911 and it seems unacceptable solution. Explicit memory map is
1912 recommended for this case -- and target_read_memory_robust will
1913 take care of reading multiple ranges then. */
1914
1915 static void
1916 read_whatever_is_readable (struct target_ops *ops,
1917 ULONGEST begin, ULONGEST end,
1918 VEC(memory_read_result_s) **result)
1919 {
1920 gdb_byte *buf = xmalloc (end - begin);
1921 ULONGEST current_begin = begin;
1922 ULONGEST current_end = end;
1923 int forward;
1924 memory_read_result_s r;
1925 ULONGEST xfered_len;
1926
1927 /* If we previously failed to read 1 byte, nothing can be done here. */
1928 if (end - begin <= 1)
1929 {
1930 xfree (buf);
1931 return;
1932 }
1933
1934 /* Check that either first or the last byte is readable, and give up
1935 if not. This heuristic is meant to permit reading accessible memory
1936 at the boundary of accessible region. */
1937 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1938 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
1939 {
1940 forward = 1;
1941 ++current_begin;
1942 }
1943 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1944 buf + (end-begin) - 1, end - 1, 1,
1945 &xfered_len) == TARGET_XFER_OK)
1946 {
1947 forward = 0;
1948 --current_end;
1949 }
1950 else
1951 {
1952 xfree (buf);
1953 return;
1954 }
1955
1956 /* Loop invariant is that the [current_begin, current_end) was previously
1957 found to be not readable as a whole.
1958
1959 Note loop condition -- if the range has 1 byte, we can't divide the range
1960 so there's no point trying further. */
1961 while (current_end - current_begin > 1)
1962 {
1963 ULONGEST first_half_begin, first_half_end;
1964 ULONGEST second_half_begin, second_half_end;
1965 LONGEST xfer;
1966 ULONGEST middle = current_begin + (current_end - current_begin)/2;
1967
1968 if (forward)
1969 {
1970 first_half_begin = current_begin;
1971 first_half_end = middle;
1972 second_half_begin = middle;
1973 second_half_end = current_end;
1974 }
1975 else
1976 {
1977 first_half_begin = middle;
1978 first_half_end = current_end;
1979 second_half_begin = current_begin;
1980 second_half_end = middle;
1981 }
1982
1983 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1984 buf + (first_half_begin - begin),
1985 first_half_begin,
1986 first_half_end - first_half_begin);
1987
1988 if (xfer == first_half_end - first_half_begin)
1989 {
1990 /* This half reads up fine. So, the error must be in the
1991 other half. */
1992 current_begin = second_half_begin;
1993 current_end = second_half_end;
1994 }
1995 else
1996 {
1997 /* This half is not readable. Because we've tried one byte, we
1998 know some part of this half if actually redable. Go to the next
1999 iteration to divide again and try to read.
2000
2001 We don't handle the other half, because this function only tries
2002 to read a single readable subrange. */
2003 current_begin = first_half_begin;
2004 current_end = first_half_end;
2005 }
2006 }
2007
2008 if (forward)
2009 {
2010 /* The [begin, current_begin) range has been read. */
2011 r.begin = begin;
2012 r.end = current_begin;
2013 r.data = buf;
2014 }
2015 else
2016 {
2017 /* The [current_end, end) range has been read. */
2018 LONGEST rlen = end - current_end;
2019
2020 r.data = xmalloc (rlen);
2021 memcpy (r.data, buf + current_end - begin, rlen);
2022 r.begin = current_end;
2023 r.end = end;
2024 xfree (buf);
2025 }
2026 VEC_safe_push(memory_read_result_s, (*result), &r);
2027 }
2028
2029 void
2030 free_memory_read_result_vector (void *x)
2031 {
2032 VEC(memory_read_result_s) *v = x;
2033 memory_read_result_s *current;
2034 int ix;
2035
2036 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2037 {
2038 xfree (current->data);
2039 }
2040 VEC_free (memory_read_result_s, v);
2041 }
2042
2043 VEC(memory_read_result_s) *
2044 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2045 {
2046 VEC(memory_read_result_s) *result = 0;
2047
2048 LONGEST xfered = 0;
2049 while (xfered < len)
2050 {
2051 struct mem_region *region = lookup_mem_region (offset + xfered);
2052 LONGEST rlen;
2053
2054 /* If there is no explicit region, a fake one should be created. */
2055 gdb_assert (region);
2056
2057 if (region->hi == 0)
2058 rlen = len - xfered;
2059 else
2060 rlen = region->hi - offset;
2061
2062 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2063 {
2064 /* Cannot read this region. Note that we can end up here only
2065 if the region is explicitly marked inaccessible, or
2066 'inaccessible-by-default' is in effect. */
2067 xfered += rlen;
2068 }
2069 else
2070 {
2071 LONGEST to_read = min (len - xfered, rlen);
2072 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2073
2074 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2075 (gdb_byte *) buffer,
2076 offset + xfered, to_read);
2077 /* Call an observer, notifying them of the xfer progress? */
2078 if (xfer <= 0)
2079 {
2080 /* Got an error reading full chunk. See if maybe we can read
2081 some subrange. */
2082 xfree (buffer);
2083 read_whatever_is_readable (ops, offset + xfered,
2084 offset + xfered + to_read, &result);
2085 xfered += to_read;
2086 }
2087 else
2088 {
2089 struct memory_read_result r;
2090 r.data = buffer;
2091 r.begin = offset + xfered;
2092 r.end = r.begin + xfer;
2093 VEC_safe_push (memory_read_result_s, result, &r);
2094 xfered += xfer;
2095 }
2096 QUIT;
2097 }
2098 }
2099 return result;
2100 }
2101
2102
2103 /* An alternative to target_write with progress callbacks. */
2104
2105 LONGEST
2106 target_write_with_progress (struct target_ops *ops,
2107 enum target_object object,
2108 const char *annex, const gdb_byte *buf,
2109 ULONGEST offset, LONGEST len,
2110 void (*progress) (ULONGEST, void *), void *baton)
2111 {
2112 LONGEST xfered = 0;
2113
2114 /* Give the progress callback a chance to set up. */
2115 if (progress)
2116 (*progress) (0, baton);
2117
2118 while (xfered < len)
2119 {
2120 ULONGEST xfered_len;
2121 enum target_xfer_status status;
2122
2123 status = target_write_partial (ops, object, annex,
2124 (gdb_byte *) buf + xfered,
2125 offset + xfered, len - xfered,
2126 &xfered_len);
2127
2128 if (status == TARGET_XFER_EOF)
2129 return xfered;
2130 if (TARGET_XFER_STATUS_ERROR_P (status))
2131 return -1;
2132
2133 gdb_assert (status == TARGET_XFER_OK);
2134 if (progress)
2135 (*progress) (xfered_len, baton);
2136
2137 xfered += xfered_len;
2138 QUIT;
2139 }
2140 return len;
2141 }
2142
2143 /* For docs on target_write see target.h. */
2144
2145 LONGEST
2146 target_write (struct target_ops *ops,
2147 enum target_object object,
2148 const char *annex, const gdb_byte *buf,
2149 ULONGEST offset, LONGEST len)
2150 {
2151 return target_write_with_progress (ops, object, annex, buf, offset, len,
2152 NULL, NULL);
2153 }
2154
2155 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2156 the size of the transferred data. PADDING additional bytes are
2157 available in *BUF_P. This is a helper function for
2158 target_read_alloc; see the declaration of that function for more
2159 information. */
2160
2161 static LONGEST
2162 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2163 const char *annex, gdb_byte **buf_p, int padding)
2164 {
2165 size_t buf_alloc, buf_pos;
2166 gdb_byte *buf;
2167
2168 /* This function does not have a length parameter; it reads the
2169 entire OBJECT). Also, it doesn't support objects fetched partly
2170 from one target and partly from another (in a different stratum,
2171 e.g. a core file and an executable). Both reasons make it
2172 unsuitable for reading memory. */
2173 gdb_assert (object != TARGET_OBJECT_MEMORY);
2174
2175 /* Start by reading up to 4K at a time. The target will throttle
2176 this number down if necessary. */
2177 buf_alloc = 4096;
2178 buf = xmalloc (buf_alloc);
2179 buf_pos = 0;
2180 while (1)
2181 {
2182 ULONGEST xfered_len;
2183 enum target_xfer_status status;
2184
2185 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2186 buf_pos, buf_alloc - buf_pos - padding,
2187 &xfered_len);
2188
2189 if (status == TARGET_XFER_EOF)
2190 {
2191 /* Read all there was. */
2192 if (buf_pos == 0)
2193 xfree (buf);
2194 else
2195 *buf_p = buf;
2196 return buf_pos;
2197 }
2198 else if (status != TARGET_XFER_OK)
2199 {
2200 /* An error occurred. */
2201 xfree (buf);
2202 return TARGET_XFER_E_IO;
2203 }
2204
2205 buf_pos += xfered_len;
2206
2207 /* If the buffer is filling up, expand it. */
2208 if (buf_alloc < buf_pos * 2)
2209 {
2210 buf_alloc *= 2;
2211 buf = xrealloc (buf, buf_alloc);
2212 }
2213
2214 QUIT;
2215 }
2216 }
2217
2218 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2219 the size of the transferred data. See the declaration in "target.h"
2220 function for more information about the return value. */
2221
2222 LONGEST
2223 target_read_alloc (struct target_ops *ops, enum target_object object,
2224 const char *annex, gdb_byte **buf_p)
2225 {
2226 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2227 }
2228
2229 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2230 returned as a string, allocated using xmalloc. If an error occurs
2231 or the transfer is unsupported, NULL is returned. Empty objects
2232 are returned as allocated but empty strings. A warning is issued
2233 if the result contains any embedded NUL bytes. */
2234
2235 char *
2236 target_read_stralloc (struct target_ops *ops, enum target_object object,
2237 const char *annex)
2238 {
2239 gdb_byte *buffer;
2240 char *bufstr;
2241 LONGEST i, transferred;
2242
2243 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2244 bufstr = (char *) buffer;
2245
2246 if (transferred < 0)
2247 return NULL;
2248
2249 if (transferred == 0)
2250 return xstrdup ("");
2251
2252 bufstr[transferred] = 0;
2253
2254 /* Check for embedded NUL bytes; but allow trailing NULs. */
2255 for (i = strlen (bufstr); i < transferred; i++)
2256 if (bufstr[i] != 0)
2257 {
2258 warning (_("target object %d, annex %s, "
2259 "contained unexpected null characters"),
2260 (int) object, annex ? annex : "(none)");
2261 break;
2262 }
2263
2264 return bufstr;
2265 }
2266
2267 /* Memory transfer methods. */
2268
2269 void
2270 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2271 LONGEST len)
2272 {
2273 /* This method is used to read from an alternate, non-current
2274 target. This read must bypass the overlay support (as symbols
2275 don't match this target), and GDB's internal cache (wrong cache
2276 for this target). */
2277 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2278 != len)
2279 memory_error (TARGET_XFER_E_IO, addr);
2280 }
2281
2282 ULONGEST
2283 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2284 int len, enum bfd_endian byte_order)
2285 {
2286 gdb_byte buf[sizeof (ULONGEST)];
2287
2288 gdb_assert (len <= sizeof (buf));
2289 get_target_memory (ops, addr, buf, len);
2290 return extract_unsigned_integer (buf, len, byte_order);
2291 }
2292
2293 /* See target.h. */
2294
2295 int
2296 target_insert_breakpoint (struct gdbarch *gdbarch,
2297 struct bp_target_info *bp_tgt)
2298 {
2299 if (!may_insert_breakpoints)
2300 {
2301 warning (_("May not insert breakpoints"));
2302 return 1;
2303 }
2304
2305 return current_target.to_insert_breakpoint (&current_target,
2306 gdbarch, bp_tgt);
2307 }
2308
2309 /* See target.h. */
2310
2311 int
2312 target_remove_breakpoint (struct gdbarch *gdbarch,
2313 struct bp_target_info *bp_tgt)
2314 {
2315 /* This is kind of a weird case to handle, but the permission might
2316 have been changed after breakpoints were inserted - in which case
2317 we should just take the user literally and assume that any
2318 breakpoints should be left in place. */
2319 if (!may_insert_breakpoints)
2320 {
2321 warning (_("May not remove breakpoints"));
2322 return 1;
2323 }
2324
2325 return current_target.to_remove_breakpoint (&current_target,
2326 gdbarch, bp_tgt);
2327 }
2328
2329 static void
2330 target_info (char *args, int from_tty)
2331 {
2332 struct target_ops *t;
2333 int has_all_mem = 0;
2334
2335 if (symfile_objfile != NULL)
2336 printf_unfiltered (_("Symbols from \"%s\".\n"),
2337 objfile_name (symfile_objfile));
2338
2339 for (t = target_stack; t != NULL; t = t->beneath)
2340 {
2341 if (!(*t->to_has_memory) (t))
2342 continue;
2343
2344 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2345 continue;
2346 if (has_all_mem)
2347 printf_unfiltered (_("\tWhile running this, "
2348 "GDB does not access memory from...\n"));
2349 printf_unfiltered ("%s:\n", t->to_longname);
2350 (t->to_files_info) (t);
2351 has_all_mem = (*t->to_has_all_memory) (t);
2352 }
2353 }
2354
2355 /* This function is called before any new inferior is created, e.g.
2356 by running a program, attaching, or connecting to a target.
2357 It cleans up any state from previous invocations which might
2358 change between runs. This is a subset of what target_preopen
2359 resets (things which might change between targets). */
2360
2361 void
2362 target_pre_inferior (int from_tty)
2363 {
2364 /* Clear out solib state. Otherwise the solib state of the previous
2365 inferior might have survived and is entirely wrong for the new
2366 target. This has been observed on GNU/Linux using glibc 2.3. How
2367 to reproduce:
2368
2369 bash$ ./foo&
2370 [1] 4711
2371 bash$ ./foo&
2372 [1] 4712
2373 bash$ gdb ./foo
2374 [...]
2375 (gdb) attach 4711
2376 (gdb) detach
2377 (gdb) attach 4712
2378 Cannot access memory at address 0xdeadbeef
2379 */
2380
2381 /* In some OSs, the shared library list is the same/global/shared
2382 across inferiors. If code is shared between processes, so are
2383 memory regions and features. */
2384 if (!gdbarch_has_global_solist (target_gdbarch ()))
2385 {
2386 no_shared_libraries (NULL, from_tty);
2387
2388 invalidate_target_mem_regions ();
2389
2390 target_clear_description ();
2391 }
2392
2393 agent_capability_invalidate ();
2394 }
2395
2396 /* Callback for iterate_over_inferiors. Gets rid of the given
2397 inferior. */
2398
2399 static int
2400 dispose_inferior (struct inferior *inf, void *args)
2401 {
2402 struct thread_info *thread;
2403
2404 thread = any_thread_of_process (inf->pid);
2405 if (thread)
2406 {
2407 switch_to_thread (thread->ptid);
2408
2409 /* Core inferiors actually should be detached, not killed. */
2410 if (target_has_execution)
2411 target_kill ();
2412 else
2413 target_detach (NULL, 0);
2414 }
2415
2416 return 0;
2417 }
2418
2419 /* This is to be called by the open routine before it does
2420 anything. */
2421
2422 void
2423 target_preopen (int from_tty)
2424 {
2425 dont_repeat ();
2426
2427 if (have_inferiors ())
2428 {
2429 if (!from_tty
2430 || !have_live_inferiors ()
2431 || query (_("A program is being debugged already. Kill it? ")))
2432 iterate_over_inferiors (dispose_inferior, NULL);
2433 else
2434 error (_("Program not killed."));
2435 }
2436
2437 /* Calling target_kill may remove the target from the stack. But if
2438 it doesn't (which seems like a win for UDI), remove it now. */
2439 /* Leave the exec target, though. The user may be switching from a
2440 live process to a core of the same program. */
2441 pop_all_targets_above (file_stratum);
2442
2443 target_pre_inferior (from_tty);
2444 }
2445
2446 /* Detach a target after doing deferred register stores. */
2447
2448 void
2449 target_detach (const char *args, int from_tty)
2450 {
2451 struct target_ops* t;
2452
2453 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2454 /* Don't remove global breakpoints here. They're removed on
2455 disconnection from the target. */
2456 ;
2457 else
2458 /* If we're in breakpoints-always-inserted mode, have to remove
2459 them before detaching. */
2460 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2461
2462 prepare_for_detach ();
2463
2464 current_target.to_detach (&current_target, args, from_tty);
2465 if (targetdebug)
2466 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2467 args, from_tty);
2468 }
2469
2470 void
2471 target_disconnect (char *args, int from_tty)
2472 {
2473 struct target_ops *t;
2474
2475 /* If we're in breakpoints-always-inserted mode or if breakpoints
2476 are global across processes, we have to remove them before
2477 disconnecting. */
2478 remove_breakpoints ();
2479
2480 for (t = current_target.beneath; t != NULL; t = t->beneath)
2481 if (t->to_disconnect != NULL)
2482 {
2483 if (targetdebug)
2484 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2485 args, from_tty);
2486 t->to_disconnect (t, args, from_tty);
2487 return;
2488 }
2489
2490 tcomplain ();
2491 }
2492
2493 ptid_t
2494 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2495 {
2496 struct target_ops *t;
2497 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2498 status, options);
2499
2500 if (targetdebug)
2501 {
2502 char *status_string;
2503 char *options_string;
2504
2505 status_string = target_waitstatus_to_string (status);
2506 options_string = target_options_to_string (options);
2507 fprintf_unfiltered (gdb_stdlog,
2508 "target_wait (%d, status, options={%s})"
2509 " = %d, %s\n",
2510 ptid_get_pid (ptid), options_string,
2511 ptid_get_pid (retval), status_string);
2512 xfree (status_string);
2513 xfree (options_string);
2514 }
2515
2516 return retval;
2517 }
2518
2519 char *
2520 target_pid_to_str (ptid_t ptid)
2521 {
2522 return (*current_target.to_pid_to_str) (&current_target, ptid);
2523 }
2524
2525 char *
2526 target_thread_name (struct thread_info *info)
2527 {
2528 return current_target.to_thread_name (&current_target, info);
2529 }
2530
2531 void
2532 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2533 {
2534 struct target_ops *t;
2535
2536 target_dcache_invalidate ();
2537
2538 current_target.to_resume (&current_target, ptid, step, signal);
2539 if (targetdebug)
2540 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2541 ptid_get_pid (ptid),
2542 step ? "step" : "continue",
2543 gdb_signal_to_name (signal));
2544
2545 registers_changed_ptid (ptid);
2546 set_executing (ptid, 1);
2547 set_running (ptid, 1);
2548 clear_inline_frame_state (ptid);
2549 }
2550
2551 void
2552 target_pass_signals (int numsigs, unsigned char *pass_signals)
2553 {
2554 if (targetdebug)
2555 {
2556 int i;
2557
2558 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2559 numsigs);
2560
2561 for (i = 0; i < numsigs; i++)
2562 if (pass_signals[i])
2563 fprintf_unfiltered (gdb_stdlog, " %s",
2564 gdb_signal_to_name (i));
2565
2566 fprintf_unfiltered (gdb_stdlog, " })\n");
2567 }
2568
2569 (*current_target.to_pass_signals) (&current_target, numsigs, pass_signals);
2570 }
2571
2572 void
2573 target_program_signals (int numsigs, unsigned char *program_signals)
2574 {
2575 if (targetdebug)
2576 {
2577 int i;
2578
2579 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2580 numsigs);
2581
2582 for (i = 0; i < numsigs; i++)
2583 if (program_signals[i])
2584 fprintf_unfiltered (gdb_stdlog, " %s",
2585 gdb_signal_to_name (i));
2586
2587 fprintf_unfiltered (gdb_stdlog, " })\n");
2588 }
2589
2590 (*current_target.to_program_signals) (&current_target,
2591 numsigs, program_signals);
2592 }
2593
2594 static int
2595 default_follow_fork (struct target_ops *self, int follow_child,
2596 int detach_fork)
2597 {
2598 /* Some target returned a fork event, but did not know how to follow it. */
2599 internal_error (__FILE__, __LINE__,
2600 _("could not find a target to follow fork"));
2601 }
2602
2603 /* Look through the list of possible targets for a target that can
2604 follow forks. */
2605
2606 int
2607 target_follow_fork (int follow_child, int detach_fork)
2608 {
2609 int retval = current_target.to_follow_fork (&current_target,
2610 follow_child, detach_fork);
2611
2612 if (targetdebug)
2613 fprintf_unfiltered (gdb_stdlog,
2614 "target_follow_fork (%d, %d) = %d\n",
2615 follow_child, detach_fork, retval);
2616 return retval;
2617 }
2618
2619 static void
2620 default_mourn_inferior (struct target_ops *self)
2621 {
2622 internal_error (__FILE__, __LINE__,
2623 _("could not find a target to follow mourn inferior"));
2624 }
2625
2626 void
2627 target_mourn_inferior (void)
2628 {
2629 current_target.to_mourn_inferior (&current_target);
2630 if (targetdebug)
2631 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2632
2633 /* We no longer need to keep handles on any of the object files.
2634 Make sure to release them to avoid unnecessarily locking any
2635 of them while we're not actually debugging. */
2636 bfd_cache_close_all ();
2637 }
2638
2639 /* Look for a target which can describe architectural features, starting
2640 from TARGET. If we find one, return its description. */
2641
2642 const struct target_desc *
2643 target_read_description (struct target_ops *target)
2644 {
2645 struct target_ops *t;
2646
2647 for (t = target; t != NULL; t = t->beneath)
2648 if (t->to_read_description != NULL)
2649 {
2650 const struct target_desc *tdesc;
2651
2652 tdesc = t->to_read_description (t);
2653 if (tdesc)
2654 return tdesc;
2655 }
2656
2657 return NULL;
2658 }
2659
2660 /* This implements a basic search of memory, reading target memory and
2661 performing the search here (as opposed to performing the search in on the
2662 target side with, for example, gdbserver). */
2663
2664 int
2665 simple_search_memory (struct target_ops *ops,
2666 CORE_ADDR start_addr, ULONGEST search_space_len,
2667 const gdb_byte *pattern, ULONGEST pattern_len,
2668 CORE_ADDR *found_addrp)
2669 {
2670 /* NOTE: also defined in find.c testcase. */
2671 #define SEARCH_CHUNK_SIZE 16000
2672 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2673 /* Buffer to hold memory contents for searching. */
2674 gdb_byte *search_buf;
2675 unsigned search_buf_size;
2676 struct cleanup *old_cleanups;
2677
2678 search_buf_size = chunk_size + pattern_len - 1;
2679
2680 /* No point in trying to allocate a buffer larger than the search space. */
2681 if (search_space_len < search_buf_size)
2682 search_buf_size = search_space_len;
2683
2684 search_buf = malloc (search_buf_size);
2685 if (search_buf == NULL)
2686 error (_("Unable to allocate memory to perform the search."));
2687 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2688
2689 /* Prime the search buffer. */
2690
2691 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2692 search_buf, start_addr, search_buf_size) != search_buf_size)
2693 {
2694 warning (_("Unable to access %s bytes of target "
2695 "memory at %s, halting search."),
2696 pulongest (search_buf_size), hex_string (start_addr));
2697 do_cleanups (old_cleanups);
2698 return -1;
2699 }
2700
2701 /* Perform the search.
2702
2703 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2704 When we've scanned N bytes we copy the trailing bytes to the start and
2705 read in another N bytes. */
2706
2707 while (search_space_len >= pattern_len)
2708 {
2709 gdb_byte *found_ptr;
2710 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2711
2712 found_ptr = memmem (search_buf, nr_search_bytes,
2713 pattern, pattern_len);
2714
2715 if (found_ptr != NULL)
2716 {
2717 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2718
2719 *found_addrp = found_addr;
2720 do_cleanups (old_cleanups);
2721 return 1;
2722 }
2723
2724 /* Not found in this chunk, skip to next chunk. */
2725
2726 /* Don't let search_space_len wrap here, it's unsigned. */
2727 if (search_space_len >= chunk_size)
2728 search_space_len -= chunk_size;
2729 else
2730 search_space_len = 0;
2731
2732 if (search_space_len >= pattern_len)
2733 {
2734 unsigned keep_len = search_buf_size - chunk_size;
2735 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2736 int nr_to_read;
2737
2738 /* Copy the trailing part of the previous iteration to the front
2739 of the buffer for the next iteration. */
2740 gdb_assert (keep_len == pattern_len - 1);
2741 memcpy (search_buf, search_buf + chunk_size, keep_len);
2742
2743 nr_to_read = min (search_space_len - keep_len, chunk_size);
2744
2745 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2746 search_buf + keep_len, read_addr,
2747 nr_to_read) != nr_to_read)
2748 {
2749 warning (_("Unable to access %s bytes of target "
2750 "memory at %s, halting search."),
2751 plongest (nr_to_read),
2752 hex_string (read_addr));
2753 do_cleanups (old_cleanups);
2754 return -1;
2755 }
2756
2757 start_addr += chunk_size;
2758 }
2759 }
2760
2761 /* Not found. */
2762
2763 do_cleanups (old_cleanups);
2764 return 0;
2765 }
2766
2767 /* Default implementation of memory-searching. */
2768
2769 static int
2770 default_search_memory (struct target_ops *self,
2771 CORE_ADDR start_addr, ULONGEST search_space_len,
2772 const gdb_byte *pattern, ULONGEST pattern_len,
2773 CORE_ADDR *found_addrp)
2774 {
2775 /* Start over from the top of the target stack. */
2776 return simple_search_memory (current_target.beneath,
2777 start_addr, search_space_len,
2778 pattern, pattern_len, found_addrp);
2779 }
2780
2781 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2782 sequence of bytes in PATTERN with length PATTERN_LEN.
2783
2784 The result is 1 if found, 0 if not found, and -1 if there was an error
2785 requiring halting of the search (e.g. memory read error).
2786 If the pattern is found the address is recorded in FOUND_ADDRP. */
2787
2788 int
2789 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2790 const gdb_byte *pattern, ULONGEST pattern_len,
2791 CORE_ADDR *found_addrp)
2792 {
2793 int found;
2794
2795 if (targetdebug)
2796 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2797 hex_string (start_addr));
2798
2799 found = current_target.to_search_memory (&current_target, start_addr,
2800 search_space_len,
2801 pattern, pattern_len, found_addrp);
2802
2803 if (targetdebug)
2804 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2805
2806 return found;
2807 }
2808
2809 /* Look through the currently pushed targets. If none of them will
2810 be able to restart the currently running process, issue an error
2811 message. */
2812
2813 void
2814 target_require_runnable (void)
2815 {
2816 struct target_ops *t;
2817
2818 for (t = target_stack; t != NULL; t = t->beneath)
2819 {
2820 /* If this target knows how to create a new program, then
2821 assume we will still be able to after killing the current
2822 one. Either killing and mourning will not pop T, or else
2823 find_default_run_target will find it again. */
2824 if (t->to_create_inferior != NULL)
2825 return;
2826
2827 /* Do not worry about thread_stratum targets that can not
2828 create inferiors. Assume they will be pushed again if
2829 necessary, and continue to the process_stratum. */
2830 if (t->to_stratum == thread_stratum
2831 || t->to_stratum == arch_stratum)
2832 continue;
2833
2834 error (_("The \"%s\" target does not support \"run\". "
2835 "Try \"help target\" or \"continue\"."),
2836 t->to_shortname);
2837 }
2838
2839 /* This function is only called if the target is running. In that
2840 case there should have been a process_stratum target and it
2841 should either know how to create inferiors, or not... */
2842 internal_error (__FILE__, __LINE__, _("No targets found"));
2843 }
2844
2845 /* Look through the list of possible targets for a target that can
2846 execute a run or attach command without any other data. This is
2847 used to locate the default process stratum.
2848
2849 If DO_MESG is not NULL, the result is always valid (error() is
2850 called for errors); else, return NULL on error. */
2851
2852 static struct target_ops *
2853 find_default_run_target (char *do_mesg)
2854 {
2855 struct target_ops **t;
2856 struct target_ops *runable = NULL;
2857 int count;
2858
2859 count = 0;
2860
2861 for (t = target_structs; t < target_structs + target_struct_size;
2862 ++t)
2863 {
2864 if ((*t)->to_can_run && target_can_run (*t))
2865 {
2866 runable = *t;
2867 ++count;
2868 }
2869 }
2870
2871 if (count != 1)
2872 {
2873 if (do_mesg)
2874 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2875 else
2876 return NULL;
2877 }
2878
2879 return runable;
2880 }
2881
2882 void
2883 find_default_attach (struct target_ops *ops, char *args, int from_tty)
2884 {
2885 struct target_ops *t;
2886
2887 t = find_default_run_target ("attach");
2888 (t->to_attach) (t, args, from_tty);
2889 return;
2890 }
2891
2892 void
2893 find_default_create_inferior (struct target_ops *ops,
2894 char *exec_file, char *allargs, char **env,
2895 int from_tty)
2896 {
2897 struct target_ops *t;
2898
2899 t = find_default_run_target ("run");
2900 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
2901 return;
2902 }
2903
2904 static int
2905 find_default_can_async_p (struct target_ops *ignore)
2906 {
2907 struct target_ops *t;
2908
2909 /* This may be called before the target is pushed on the stack;
2910 look for the default process stratum. If there's none, gdb isn't
2911 configured with a native debugger, and target remote isn't
2912 connected yet. */
2913 t = find_default_run_target (NULL);
2914 if (t && t->to_can_async_p != delegate_can_async_p)
2915 return (t->to_can_async_p) (t);
2916 return 0;
2917 }
2918
2919 static int
2920 find_default_is_async_p (struct target_ops *ignore)
2921 {
2922 struct target_ops *t;
2923
2924 /* This may be called before the target is pushed on the stack;
2925 look for the default process stratum. If there's none, gdb isn't
2926 configured with a native debugger, and target remote isn't
2927 connected yet. */
2928 t = find_default_run_target (NULL);
2929 if (t && t->to_is_async_p != delegate_is_async_p)
2930 return (t->to_is_async_p) (t);
2931 return 0;
2932 }
2933
2934 static int
2935 find_default_supports_non_stop (struct target_ops *self)
2936 {
2937 struct target_ops *t;
2938
2939 t = find_default_run_target (NULL);
2940 if (t && t->to_supports_non_stop)
2941 return (t->to_supports_non_stop) (t);
2942 return 0;
2943 }
2944
2945 int
2946 target_supports_non_stop (void)
2947 {
2948 struct target_ops *t;
2949
2950 for (t = &current_target; t != NULL; t = t->beneath)
2951 if (t->to_supports_non_stop)
2952 return t->to_supports_non_stop (t);
2953
2954 return 0;
2955 }
2956
2957 /* Implement the "info proc" command. */
2958
2959 int
2960 target_info_proc (char *args, enum info_proc_what what)
2961 {
2962 struct target_ops *t;
2963
2964 /* If we're already connected to something that can get us OS
2965 related data, use it. Otherwise, try using the native
2966 target. */
2967 if (current_target.to_stratum >= process_stratum)
2968 t = current_target.beneath;
2969 else
2970 t = find_default_run_target (NULL);
2971
2972 for (; t != NULL; t = t->beneath)
2973 {
2974 if (t->to_info_proc != NULL)
2975 {
2976 t->to_info_proc (t, args, what);
2977
2978 if (targetdebug)
2979 fprintf_unfiltered (gdb_stdlog,
2980 "target_info_proc (\"%s\", %d)\n", args, what);
2981
2982 return 1;
2983 }
2984 }
2985
2986 return 0;
2987 }
2988
2989 static int
2990 find_default_supports_disable_randomization (struct target_ops *self)
2991 {
2992 struct target_ops *t;
2993
2994 t = find_default_run_target (NULL);
2995 if (t && t->to_supports_disable_randomization)
2996 return (t->to_supports_disable_randomization) (t);
2997 return 0;
2998 }
2999
3000 int
3001 target_supports_disable_randomization (void)
3002 {
3003 struct target_ops *t;
3004
3005 for (t = &current_target; t != NULL; t = t->beneath)
3006 if (t->to_supports_disable_randomization)
3007 return t->to_supports_disable_randomization (t);
3008
3009 return 0;
3010 }
3011
3012 char *
3013 target_get_osdata (const char *type)
3014 {
3015 struct target_ops *t;
3016
3017 /* If we're already connected to something that can get us OS
3018 related data, use it. Otherwise, try using the native
3019 target. */
3020 if (current_target.to_stratum >= process_stratum)
3021 t = current_target.beneath;
3022 else
3023 t = find_default_run_target ("get OS data");
3024
3025 if (!t)
3026 return NULL;
3027
3028 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3029 }
3030
3031 /* Determine the current address space of thread PTID. */
3032
3033 struct address_space *
3034 target_thread_address_space (ptid_t ptid)
3035 {
3036 struct address_space *aspace;
3037 struct inferior *inf;
3038 struct target_ops *t;
3039
3040 for (t = current_target.beneath; t != NULL; t = t->beneath)
3041 {
3042 if (t->to_thread_address_space != NULL)
3043 {
3044 aspace = t->to_thread_address_space (t, ptid);
3045 gdb_assert (aspace);
3046
3047 if (targetdebug)
3048 fprintf_unfiltered (gdb_stdlog,
3049 "target_thread_address_space (%s) = %d\n",
3050 target_pid_to_str (ptid),
3051 address_space_num (aspace));
3052 return aspace;
3053 }
3054 }
3055
3056 /* Fall-back to the "main" address space of the inferior. */
3057 inf = find_inferior_pid (ptid_get_pid (ptid));
3058
3059 if (inf == NULL || inf->aspace == NULL)
3060 internal_error (__FILE__, __LINE__,
3061 _("Can't determine the current "
3062 "address space of thread %s\n"),
3063 target_pid_to_str (ptid));
3064
3065 return inf->aspace;
3066 }
3067
3068
3069 /* Target file operations. */
3070
3071 static struct target_ops *
3072 default_fileio_target (void)
3073 {
3074 /* If we're already connected to something that can perform
3075 file I/O, use it. Otherwise, try using the native target. */
3076 if (current_target.to_stratum >= process_stratum)
3077 return current_target.beneath;
3078 else
3079 return find_default_run_target ("file I/O");
3080 }
3081
3082 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3083 target file descriptor, or -1 if an error occurs (and set
3084 *TARGET_ERRNO). */
3085 int
3086 target_fileio_open (const char *filename, int flags, int mode,
3087 int *target_errno)
3088 {
3089 struct target_ops *t;
3090
3091 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3092 {
3093 if (t->to_fileio_open != NULL)
3094 {
3095 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
3096
3097 if (targetdebug)
3098 fprintf_unfiltered (gdb_stdlog,
3099 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3100 filename, flags, mode,
3101 fd, fd != -1 ? 0 : *target_errno);
3102 return fd;
3103 }
3104 }
3105
3106 *target_errno = FILEIO_ENOSYS;
3107 return -1;
3108 }
3109
3110 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3111 Return the number of bytes written, or -1 if an error occurs
3112 (and set *TARGET_ERRNO). */
3113 int
3114 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3115 ULONGEST offset, int *target_errno)
3116 {
3117 struct target_ops *t;
3118
3119 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3120 {
3121 if (t->to_fileio_pwrite != NULL)
3122 {
3123 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
3124 target_errno);
3125
3126 if (targetdebug)
3127 fprintf_unfiltered (gdb_stdlog,
3128 "target_fileio_pwrite (%d,...,%d,%s) "
3129 "= %d (%d)\n",
3130 fd, len, pulongest (offset),
3131 ret, ret != -1 ? 0 : *target_errno);
3132 return ret;
3133 }
3134 }
3135
3136 *target_errno = FILEIO_ENOSYS;
3137 return -1;
3138 }
3139
3140 /* Read up to LEN bytes FD on the target into READ_BUF.
3141 Return the number of bytes read, or -1 if an error occurs
3142 (and set *TARGET_ERRNO). */
3143 int
3144 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3145 ULONGEST offset, int *target_errno)
3146 {
3147 struct target_ops *t;
3148
3149 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3150 {
3151 if (t->to_fileio_pread != NULL)
3152 {
3153 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3154 target_errno);
3155
3156 if (targetdebug)
3157 fprintf_unfiltered (gdb_stdlog,
3158 "target_fileio_pread (%d,...,%d,%s) "
3159 "= %d (%d)\n",
3160 fd, len, pulongest (offset),
3161 ret, ret != -1 ? 0 : *target_errno);
3162 return ret;
3163 }
3164 }
3165
3166 *target_errno = FILEIO_ENOSYS;
3167 return -1;
3168 }
3169
3170 /* Close FD on the target. Return 0, or -1 if an error occurs
3171 (and set *TARGET_ERRNO). */
3172 int
3173 target_fileio_close (int fd, int *target_errno)
3174 {
3175 struct target_ops *t;
3176
3177 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3178 {
3179 if (t->to_fileio_close != NULL)
3180 {
3181 int ret = t->to_fileio_close (t, fd, target_errno);
3182
3183 if (targetdebug)
3184 fprintf_unfiltered (gdb_stdlog,
3185 "target_fileio_close (%d) = %d (%d)\n",
3186 fd, ret, ret != -1 ? 0 : *target_errno);
3187 return ret;
3188 }
3189 }
3190
3191 *target_errno = FILEIO_ENOSYS;
3192 return -1;
3193 }
3194
3195 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3196 occurs (and set *TARGET_ERRNO). */
3197 int
3198 target_fileio_unlink (const char *filename, int *target_errno)
3199 {
3200 struct target_ops *t;
3201
3202 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3203 {
3204 if (t->to_fileio_unlink != NULL)
3205 {
3206 int ret = t->to_fileio_unlink (t, filename, target_errno);
3207
3208 if (targetdebug)
3209 fprintf_unfiltered (gdb_stdlog,
3210 "target_fileio_unlink (%s) = %d (%d)\n",
3211 filename, ret, ret != -1 ? 0 : *target_errno);
3212 return ret;
3213 }
3214 }
3215
3216 *target_errno = FILEIO_ENOSYS;
3217 return -1;
3218 }
3219
3220 /* Read value of symbolic link FILENAME on the target. Return a
3221 null-terminated string allocated via xmalloc, or NULL if an error
3222 occurs (and set *TARGET_ERRNO). */
3223 char *
3224 target_fileio_readlink (const char *filename, int *target_errno)
3225 {
3226 struct target_ops *t;
3227
3228 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3229 {
3230 if (t->to_fileio_readlink != NULL)
3231 {
3232 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3233
3234 if (targetdebug)
3235 fprintf_unfiltered (gdb_stdlog,
3236 "target_fileio_readlink (%s) = %s (%d)\n",
3237 filename, ret? ret : "(nil)",
3238 ret? 0 : *target_errno);
3239 return ret;
3240 }
3241 }
3242
3243 *target_errno = FILEIO_ENOSYS;
3244 return NULL;
3245 }
3246
3247 static void
3248 target_fileio_close_cleanup (void *opaque)
3249 {
3250 int fd = *(int *) opaque;
3251 int target_errno;
3252
3253 target_fileio_close (fd, &target_errno);
3254 }
3255
3256 /* Read target file FILENAME. Store the result in *BUF_P and
3257 return the size of the transferred data. PADDING additional bytes are
3258 available in *BUF_P. This is a helper function for
3259 target_fileio_read_alloc; see the declaration of that function for more
3260 information. */
3261
3262 static LONGEST
3263 target_fileio_read_alloc_1 (const char *filename,
3264 gdb_byte **buf_p, int padding)
3265 {
3266 struct cleanup *close_cleanup;
3267 size_t buf_alloc, buf_pos;
3268 gdb_byte *buf;
3269 LONGEST n;
3270 int fd;
3271 int target_errno;
3272
3273 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3274 if (fd == -1)
3275 return -1;
3276
3277 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3278
3279 /* Start by reading up to 4K at a time. The target will throttle
3280 this number down if necessary. */
3281 buf_alloc = 4096;
3282 buf = xmalloc (buf_alloc);
3283 buf_pos = 0;
3284 while (1)
3285 {
3286 n = target_fileio_pread (fd, &buf[buf_pos],
3287 buf_alloc - buf_pos - padding, buf_pos,
3288 &target_errno);
3289 if (n < 0)
3290 {
3291 /* An error occurred. */
3292 do_cleanups (close_cleanup);
3293 xfree (buf);
3294 return -1;
3295 }
3296 else if (n == 0)
3297 {
3298 /* Read all there was. */
3299 do_cleanups (close_cleanup);
3300 if (buf_pos == 0)
3301 xfree (buf);
3302 else
3303 *buf_p = buf;
3304 return buf_pos;
3305 }
3306
3307 buf_pos += n;
3308
3309 /* If the buffer is filling up, expand it. */
3310 if (buf_alloc < buf_pos * 2)
3311 {
3312 buf_alloc *= 2;
3313 buf = xrealloc (buf, buf_alloc);
3314 }
3315
3316 QUIT;
3317 }
3318 }
3319
3320 /* Read target file FILENAME. Store the result in *BUF_P and return
3321 the size of the transferred data. See the declaration in "target.h"
3322 function for more information about the return value. */
3323
3324 LONGEST
3325 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3326 {
3327 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3328 }
3329
3330 /* Read target file FILENAME. The result is NUL-terminated and
3331 returned as a string, allocated using xmalloc. If an error occurs
3332 or the transfer is unsupported, NULL is returned. Empty objects
3333 are returned as allocated but empty strings. A warning is issued
3334 if the result contains any embedded NUL bytes. */
3335
3336 char *
3337 target_fileio_read_stralloc (const char *filename)
3338 {
3339 gdb_byte *buffer;
3340 char *bufstr;
3341 LONGEST i, transferred;
3342
3343 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3344 bufstr = (char *) buffer;
3345
3346 if (transferred < 0)
3347 return NULL;
3348
3349 if (transferred == 0)
3350 return xstrdup ("");
3351
3352 bufstr[transferred] = 0;
3353
3354 /* Check for embedded NUL bytes; but allow trailing NULs. */
3355 for (i = strlen (bufstr); i < transferred; i++)
3356 if (bufstr[i] != 0)
3357 {
3358 warning (_("target file %s "
3359 "contained unexpected null characters"),
3360 filename);
3361 break;
3362 }
3363
3364 return bufstr;
3365 }
3366
3367
3368 static int
3369 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3370 CORE_ADDR addr, int len)
3371 {
3372 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3373 }
3374
3375 static int
3376 default_watchpoint_addr_within_range (struct target_ops *target,
3377 CORE_ADDR addr,
3378 CORE_ADDR start, int length)
3379 {
3380 return addr >= start && addr < start + length;
3381 }
3382
3383 static struct gdbarch *
3384 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3385 {
3386 return target_gdbarch ();
3387 }
3388
3389 static int
3390 return_zero (void)
3391 {
3392 return 0;
3393 }
3394
3395 /*
3396 * Find the next target down the stack from the specified target.
3397 */
3398
3399 struct target_ops *
3400 find_target_beneath (struct target_ops *t)
3401 {
3402 return t->beneath;
3403 }
3404
3405 /* See target.h. */
3406
3407 struct target_ops *
3408 find_target_at (enum strata stratum)
3409 {
3410 struct target_ops *t;
3411
3412 for (t = current_target.beneath; t != NULL; t = t->beneath)
3413 if (t->to_stratum == stratum)
3414 return t;
3415
3416 return NULL;
3417 }
3418
3419 \f
3420 /* The inferior process has died. Long live the inferior! */
3421
3422 void
3423 generic_mourn_inferior (void)
3424 {
3425 ptid_t ptid;
3426
3427 ptid = inferior_ptid;
3428 inferior_ptid = null_ptid;
3429
3430 /* Mark breakpoints uninserted in case something tries to delete a
3431 breakpoint while we delete the inferior's threads (which would
3432 fail, since the inferior is long gone). */
3433 mark_breakpoints_out ();
3434
3435 if (!ptid_equal (ptid, null_ptid))
3436 {
3437 int pid = ptid_get_pid (ptid);
3438 exit_inferior (pid);
3439 }
3440
3441 /* Note this wipes step-resume breakpoints, so needs to be done
3442 after exit_inferior, which ends up referencing the step-resume
3443 breakpoints through clear_thread_inferior_resources. */
3444 breakpoint_init_inferior (inf_exited);
3445
3446 registers_changed ();
3447
3448 reopen_exec_file ();
3449 reinit_frame_cache ();
3450
3451 if (deprecated_detach_hook)
3452 deprecated_detach_hook ();
3453 }
3454 \f
3455 /* Convert a normal process ID to a string. Returns the string in a
3456 static buffer. */
3457
3458 char *
3459 normal_pid_to_str (ptid_t ptid)
3460 {
3461 static char buf[32];
3462
3463 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3464 return buf;
3465 }
3466
3467 static char *
3468 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
3469 {
3470 return normal_pid_to_str (ptid);
3471 }
3472
3473 /* Error-catcher for target_find_memory_regions. */
3474 static int
3475 dummy_find_memory_regions (struct target_ops *self,
3476 find_memory_region_ftype ignore1, void *ignore2)
3477 {
3478 error (_("Command not implemented for this target."));
3479 return 0;
3480 }
3481
3482 /* Error-catcher for target_make_corefile_notes. */
3483 static char *
3484 dummy_make_corefile_notes (struct target_ops *self,
3485 bfd *ignore1, int *ignore2)
3486 {
3487 error (_("Command not implemented for this target."));
3488 return NULL;
3489 }
3490
3491 /* Set up the handful of non-empty slots needed by the dummy target
3492 vector. */
3493
3494 static void
3495 init_dummy_target (void)
3496 {
3497 dummy_target.to_shortname = "None";
3498 dummy_target.to_longname = "None";
3499 dummy_target.to_doc = "";
3500 dummy_target.to_create_inferior = find_default_create_inferior;
3501 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3502 dummy_target.to_supports_disable_randomization
3503 = find_default_supports_disable_randomization;
3504 dummy_target.to_stratum = dummy_stratum;
3505 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3506 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3507 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3508 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3509 dummy_target.to_has_execution
3510 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3511 dummy_target.to_magic = OPS_MAGIC;
3512
3513 install_dummy_methods (&dummy_target);
3514 }
3515 \f
3516 static void
3517 debug_to_open (char *args, int from_tty)
3518 {
3519 debug_target.to_open (args, from_tty);
3520
3521 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3522 }
3523
3524 void
3525 target_close (struct target_ops *targ)
3526 {
3527 gdb_assert (!target_is_pushed (targ));
3528
3529 if (targ->to_xclose != NULL)
3530 targ->to_xclose (targ);
3531 else if (targ->to_close != NULL)
3532 targ->to_close (targ);
3533
3534 if (targetdebug)
3535 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3536 }
3537
3538 void
3539 target_attach (char *args, int from_tty)
3540 {
3541 current_target.to_attach (&current_target, args, from_tty);
3542 if (targetdebug)
3543 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3544 args, from_tty);
3545 }
3546
3547 int
3548 target_thread_alive (ptid_t ptid)
3549 {
3550 int retval;
3551
3552 retval = current_target.to_thread_alive (&current_target, ptid);
3553 if (targetdebug)
3554 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3555 ptid_get_pid (ptid), retval);
3556
3557 return retval;
3558 }
3559
3560 void
3561 target_find_new_threads (void)
3562 {
3563 current_target.to_find_new_threads (&current_target);
3564 if (targetdebug)
3565 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3566 }
3567
3568 void
3569 target_stop (ptid_t ptid)
3570 {
3571 if (!may_stop)
3572 {
3573 warning (_("May not interrupt or stop the target, ignoring attempt"));
3574 return;
3575 }
3576
3577 (*current_target.to_stop) (&current_target, ptid);
3578 }
3579
3580 static void
3581 debug_to_post_attach (struct target_ops *self, int pid)
3582 {
3583 debug_target.to_post_attach (&debug_target, pid);
3584
3585 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3586 }
3587
3588 /* Concatenate ELEM to LIST, a comma separate list, and return the
3589 result. The LIST incoming argument is released. */
3590
3591 static char *
3592 str_comma_list_concat_elem (char *list, const char *elem)
3593 {
3594 if (list == NULL)
3595 return xstrdup (elem);
3596 else
3597 return reconcat (list, list, ", ", elem, (char *) NULL);
3598 }
3599
3600 /* Helper for target_options_to_string. If OPT is present in
3601 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3602 Returns the new resulting string. OPT is removed from
3603 TARGET_OPTIONS. */
3604
3605 static char *
3606 do_option (int *target_options, char *ret,
3607 int opt, char *opt_str)
3608 {
3609 if ((*target_options & opt) != 0)
3610 {
3611 ret = str_comma_list_concat_elem (ret, opt_str);
3612 *target_options &= ~opt;
3613 }
3614
3615 return ret;
3616 }
3617
3618 char *
3619 target_options_to_string (int target_options)
3620 {
3621 char *ret = NULL;
3622
3623 #define DO_TARG_OPTION(OPT) \
3624 ret = do_option (&target_options, ret, OPT, #OPT)
3625
3626 DO_TARG_OPTION (TARGET_WNOHANG);
3627
3628 if (target_options != 0)
3629 ret = str_comma_list_concat_elem (ret, "unknown???");
3630
3631 if (ret == NULL)
3632 ret = xstrdup ("");
3633 return ret;
3634 }
3635
3636 static void
3637 debug_print_register (const char * func,
3638 struct regcache *regcache, int regno)
3639 {
3640 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3641
3642 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3643 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3644 && gdbarch_register_name (gdbarch, regno) != NULL
3645 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3646 fprintf_unfiltered (gdb_stdlog, "(%s)",
3647 gdbarch_register_name (gdbarch, regno));
3648 else
3649 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3650 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3651 {
3652 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3653 int i, size = register_size (gdbarch, regno);
3654 gdb_byte buf[MAX_REGISTER_SIZE];
3655
3656 regcache_raw_collect (regcache, regno, buf);
3657 fprintf_unfiltered (gdb_stdlog, " = ");
3658 for (i = 0; i < size; i++)
3659 {
3660 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3661 }
3662 if (size <= sizeof (LONGEST))
3663 {
3664 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3665
3666 fprintf_unfiltered (gdb_stdlog, " %s %s",
3667 core_addr_to_string_nz (val), plongest (val));
3668 }
3669 }
3670 fprintf_unfiltered (gdb_stdlog, "\n");
3671 }
3672
3673 void
3674 target_fetch_registers (struct regcache *regcache, int regno)
3675 {
3676 current_target.to_fetch_registers (&current_target, regcache, regno);
3677 if (targetdebug)
3678 debug_print_register ("target_fetch_registers", regcache, regno);
3679 }
3680
3681 void
3682 target_store_registers (struct regcache *regcache, int regno)
3683 {
3684 struct target_ops *t;
3685
3686 if (!may_write_registers)
3687 error (_("Writing to registers is not allowed (regno %d)"), regno);
3688
3689 current_target.to_store_registers (&current_target, regcache, regno);
3690 if (targetdebug)
3691 {
3692 debug_print_register ("target_store_registers", regcache, regno);
3693 }
3694 }
3695
3696 int
3697 target_core_of_thread (ptid_t ptid)
3698 {
3699 int retval = current_target.to_core_of_thread (&current_target, ptid);
3700
3701 if (targetdebug)
3702 fprintf_unfiltered (gdb_stdlog,
3703 "target_core_of_thread (%d) = %d\n",
3704 ptid_get_pid (ptid), retval);
3705 return retval;
3706 }
3707
3708 int
3709 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3710 {
3711 int retval = current_target.to_verify_memory (&current_target,
3712 data, memaddr, size);
3713
3714 if (targetdebug)
3715 fprintf_unfiltered (gdb_stdlog,
3716 "target_verify_memory (%s, %s) = %d\n",
3717 paddress (target_gdbarch (), memaddr),
3718 pulongest (size),
3719 retval);
3720 return retval;
3721 }
3722
3723 /* The documentation for this function is in its prototype declaration in
3724 target.h. */
3725
3726 int
3727 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3728 {
3729 int ret;
3730
3731 ret = current_target.to_insert_mask_watchpoint (&current_target,
3732 addr, mask, rw);
3733
3734 if (targetdebug)
3735 fprintf_unfiltered (gdb_stdlog, "\
3736 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3737 core_addr_to_string (addr),
3738 core_addr_to_string (mask), rw, ret);
3739
3740 return ret;
3741 }
3742
3743 /* The documentation for this function is in its prototype declaration in
3744 target.h. */
3745
3746 int
3747 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3748 {
3749 int ret;
3750
3751 ret = current_target.to_remove_mask_watchpoint (&current_target,
3752 addr, mask, rw);
3753
3754 if (targetdebug)
3755 fprintf_unfiltered (gdb_stdlog, "\
3756 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
3757 core_addr_to_string (addr),
3758 core_addr_to_string (mask), rw, ret);
3759
3760 return ret;
3761 }
3762
3763 /* The documentation for this function is in its prototype declaration
3764 in target.h. */
3765
3766 int
3767 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
3768 {
3769 return current_target.to_masked_watch_num_registers (&current_target,
3770 addr, mask);
3771 }
3772
3773 /* The documentation for this function is in its prototype declaration
3774 in target.h. */
3775
3776 int
3777 target_ranged_break_num_registers (void)
3778 {
3779 return current_target.to_ranged_break_num_registers (&current_target);
3780 }
3781
3782 /* See target.h. */
3783
3784 struct btrace_target_info *
3785 target_enable_btrace (ptid_t ptid)
3786 {
3787 return current_target.to_enable_btrace (&current_target, ptid);
3788 }
3789
3790 /* See target.h. */
3791
3792 void
3793 target_disable_btrace (struct btrace_target_info *btinfo)
3794 {
3795 current_target.to_disable_btrace (&current_target, btinfo);
3796 }
3797
3798 /* See target.h. */
3799
3800 void
3801 target_teardown_btrace (struct btrace_target_info *btinfo)
3802 {
3803 current_target.to_teardown_btrace (&current_target, btinfo);
3804 }
3805
3806 /* See target.h. */
3807
3808 enum btrace_error
3809 target_read_btrace (VEC (btrace_block_s) **btrace,
3810 struct btrace_target_info *btinfo,
3811 enum btrace_read_type type)
3812 {
3813 return current_target.to_read_btrace (&current_target, btrace, btinfo, type);
3814 }
3815
3816 /* See target.h. */
3817
3818 void
3819 target_stop_recording (void)
3820 {
3821 current_target.to_stop_recording (&current_target);
3822 }
3823
3824 /* See target.h. */
3825
3826 void
3827 target_info_record (void)
3828 {
3829 struct target_ops *t;
3830
3831 for (t = current_target.beneath; t != NULL; t = t->beneath)
3832 if (t->to_info_record != NULL)
3833 {
3834 t->to_info_record (t);
3835 return;
3836 }
3837
3838 tcomplain ();
3839 }
3840
3841 /* See target.h. */
3842
3843 void
3844 target_save_record (const char *filename)
3845 {
3846 current_target.to_save_record (&current_target, filename);
3847 }
3848
3849 /* See target.h. */
3850
3851 int
3852 target_supports_delete_record (void)
3853 {
3854 struct target_ops *t;
3855
3856 for (t = current_target.beneath; t != NULL; t = t->beneath)
3857 if (t->to_delete_record != NULL)
3858 return 1;
3859
3860 return 0;
3861 }
3862
3863 /* See target.h. */
3864
3865 void
3866 target_delete_record (void)
3867 {
3868 current_target.to_delete_record (&current_target);
3869 }
3870
3871 /* See target.h. */
3872
3873 int
3874 target_record_is_replaying (void)
3875 {
3876 return current_target.to_record_is_replaying (&current_target);
3877 }
3878
3879 /* See target.h. */
3880
3881 void
3882 target_goto_record_begin (void)
3883 {
3884 current_target.to_goto_record_begin (&current_target);
3885 }
3886
3887 /* See target.h. */
3888
3889 void
3890 target_goto_record_end (void)
3891 {
3892 current_target.to_goto_record_end (&current_target);
3893 }
3894
3895 /* See target.h. */
3896
3897 void
3898 target_goto_record (ULONGEST insn)
3899 {
3900 current_target.to_goto_record (&current_target, insn);
3901 }
3902
3903 /* See target.h. */
3904
3905 void
3906 target_insn_history (int size, int flags)
3907 {
3908 current_target.to_insn_history (&current_target, size, flags);
3909 }
3910
3911 /* See target.h. */
3912
3913 void
3914 target_insn_history_from (ULONGEST from, int size, int flags)
3915 {
3916 current_target.to_insn_history_from (&current_target, from, size, flags);
3917 }
3918
3919 /* See target.h. */
3920
3921 void
3922 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
3923 {
3924 current_target.to_insn_history_range (&current_target, begin, end, flags);
3925 }
3926
3927 /* See target.h. */
3928
3929 void
3930 target_call_history (int size, int flags)
3931 {
3932 current_target.to_call_history (&current_target, size, flags);
3933 }
3934
3935 /* See target.h. */
3936
3937 void
3938 target_call_history_from (ULONGEST begin, int size, int flags)
3939 {
3940 current_target.to_call_history_from (&current_target, begin, size, flags);
3941 }
3942
3943 /* See target.h. */
3944
3945 void
3946 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
3947 {
3948 current_target.to_call_history_range (&current_target, begin, end, flags);
3949 }
3950
3951 static void
3952 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
3953 {
3954 debug_target.to_prepare_to_store (&debug_target, regcache);
3955
3956 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
3957 }
3958
3959 /* See target.h. */
3960
3961 const struct frame_unwind *
3962 target_get_unwinder (void)
3963 {
3964 struct target_ops *t;
3965
3966 for (t = current_target.beneath; t != NULL; t = t->beneath)
3967 if (t->to_get_unwinder != NULL)
3968 return t->to_get_unwinder;
3969
3970 return NULL;
3971 }
3972
3973 /* See target.h. */
3974
3975 const struct frame_unwind *
3976 target_get_tailcall_unwinder (void)
3977 {
3978 struct target_ops *t;
3979
3980 for (t = current_target.beneath; t != NULL; t = t->beneath)
3981 if (t->to_get_tailcall_unwinder != NULL)
3982 return t->to_get_tailcall_unwinder;
3983
3984 return NULL;
3985 }
3986
3987 /* See target.h. */
3988
3989 CORE_ADDR
3990 forward_target_decr_pc_after_break (struct target_ops *ops,
3991 struct gdbarch *gdbarch)
3992 {
3993 for (; ops != NULL; ops = ops->beneath)
3994 if (ops->to_decr_pc_after_break != NULL)
3995 return ops->to_decr_pc_after_break (ops, gdbarch);
3996
3997 return gdbarch_decr_pc_after_break (gdbarch);
3998 }
3999
4000 /* See target.h. */
4001
4002 CORE_ADDR
4003 target_decr_pc_after_break (struct gdbarch *gdbarch)
4004 {
4005 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4006 }
4007
4008 static int
4009 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4010 int write, struct mem_attrib *attrib,
4011 struct target_ops *target)
4012 {
4013 int retval;
4014
4015 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4016 attrib, target);
4017
4018 fprintf_unfiltered (gdb_stdlog,
4019 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4020 paddress (target_gdbarch (), memaddr), len,
4021 write ? "write" : "read", retval);
4022
4023 if (retval > 0)
4024 {
4025 int i;
4026
4027 fputs_unfiltered (", bytes =", gdb_stdlog);
4028 for (i = 0; i < retval; i++)
4029 {
4030 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4031 {
4032 if (targetdebug < 2 && i > 0)
4033 {
4034 fprintf_unfiltered (gdb_stdlog, " ...");
4035 break;
4036 }
4037 fprintf_unfiltered (gdb_stdlog, "\n");
4038 }
4039
4040 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4041 }
4042 }
4043
4044 fputc_unfiltered ('\n', gdb_stdlog);
4045
4046 return retval;
4047 }
4048
4049 static void
4050 debug_to_files_info (struct target_ops *target)
4051 {
4052 debug_target.to_files_info (target);
4053
4054 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4055 }
4056
4057 static int
4058 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4059 struct bp_target_info *bp_tgt)
4060 {
4061 int retval;
4062
4063 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4064
4065 fprintf_unfiltered (gdb_stdlog,
4066 "target_insert_breakpoint (%s, xxx) = %ld\n",
4067 core_addr_to_string (bp_tgt->placed_address),
4068 (unsigned long) retval);
4069 return retval;
4070 }
4071
4072 static int
4073 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4074 struct bp_target_info *bp_tgt)
4075 {
4076 int retval;
4077
4078 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4079
4080 fprintf_unfiltered (gdb_stdlog,
4081 "target_remove_breakpoint (%s, xxx) = %ld\n",
4082 core_addr_to_string (bp_tgt->placed_address),
4083 (unsigned long) retval);
4084 return retval;
4085 }
4086
4087 static int
4088 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4089 int type, int cnt, int from_tty)
4090 {
4091 int retval;
4092
4093 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4094 type, cnt, from_tty);
4095
4096 fprintf_unfiltered (gdb_stdlog,
4097 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4098 (unsigned long) type,
4099 (unsigned long) cnt,
4100 (unsigned long) from_tty,
4101 (unsigned long) retval);
4102 return retval;
4103 }
4104
4105 static int
4106 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4107 CORE_ADDR addr, int len)
4108 {
4109 CORE_ADDR retval;
4110
4111 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4112 addr, len);
4113
4114 fprintf_unfiltered (gdb_stdlog,
4115 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4116 core_addr_to_string (addr), (unsigned long) len,
4117 core_addr_to_string (retval));
4118 return retval;
4119 }
4120
4121 static int
4122 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
4123 CORE_ADDR addr, int len, int rw,
4124 struct expression *cond)
4125 {
4126 int retval;
4127
4128 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
4129 addr, len,
4130 rw, cond);
4131
4132 fprintf_unfiltered (gdb_stdlog,
4133 "target_can_accel_watchpoint_condition "
4134 "(%s, %d, %d, %s) = %ld\n",
4135 core_addr_to_string (addr), len, rw,
4136 host_address_to_string (cond), (unsigned long) retval);
4137 return retval;
4138 }
4139
4140 static int
4141 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4142 {
4143 int retval;
4144
4145 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4146
4147 fprintf_unfiltered (gdb_stdlog,
4148 "target_stopped_by_watchpoint () = %ld\n",
4149 (unsigned long) retval);
4150 return retval;
4151 }
4152
4153 static int
4154 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4155 {
4156 int retval;
4157
4158 retval = debug_target.to_stopped_data_address (target, addr);
4159
4160 fprintf_unfiltered (gdb_stdlog,
4161 "target_stopped_data_address ([%s]) = %ld\n",
4162 core_addr_to_string (*addr),
4163 (unsigned long)retval);
4164 return retval;
4165 }
4166
4167 static int
4168 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4169 CORE_ADDR addr,
4170 CORE_ADDR start, int length)
4171 {
4172 int retval;
4173
4174 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4175 start, length);
4176
4177 fprintf_filtered (gdb_stdlog,
4178 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4179 core_addr_to_string (addr), core_addr_to_string (start),
4180 length, retval);
4181 return retval;
4182 }
4183
4184 static int
4185 debug_to_insert_hw_breakpoint (struct target_ops *self,
4186 struct gdbarch *gdbarch,
4187 struct bp_target_info *bp_tgt)
4188 {
4189 int retval;
4190
4191 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4192 gdbarch, bp_tgt);
4193
4194 fprintf_unfiltered (gdb_stdlog,
4195 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4196 core_addr_to_string (bp_tgt->placed_address),
4197 (unsigned long) retval);
4198 return retval;
4199 }
4200
4201 static int
4202 debug_to_remove_hw_breakpoint (struct target_ops *self,
4203 struct gdbarch *gdbarch,
4204 struct bp_target_info *bp_tgt)
4205 {
4206 int retval;
4207
4208 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4209 gdbarch, bp_tgt);
4210
4211 fprintf_unfiltered (gdb_stdlog,
4212 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4213 core_addr_to_string (bp_tgt->placed_address),
4214 (unsigned long) retval);
4215 return retval;
4216 }
4217
4218 static int
4219 debug_to_insert_watchpoint (struct target_ops *self,
4220 CORE_ADDR addr, int len, int type,
4221 struct expression *cond)
4222 {
4223 int retval;
4224
4225 retval = debug_target.to_insert_watchpoint (&debug_target,
4226 addr, len, type, cond);
4227
4228 fprintf_unfiltered (gdb_stdlog,
4229 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4230 core_addr_to_string (addr), len, type,
4231 host_address_to_string (cond), (unsigned long) retval);
4232 return retval;
4233 }
4234
4235 static int
4236 debug_to_remove_watchpoint (struct target_ops *self,
4237 CORE_ADDR addr, int len, int type,
4238 struct expression *cond)
4239 {
4240 int retval;
4241
4242 retval = debug_target.to_remove_watchpoint (&debug_target,
4243 addr, len, type, cond);
4244
4245 fprintf_unfiltered (gdb_stdlog,
4246 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4247 core_addr_to_string (addr), len, type,
4248 host_address_to_string (cond), (unsigned long) retval);
4249 return retval;
4250 }
4251
4252 static void
4253 debug_to_terminal_init (struct target_ops *self)
4254 {
4255 debug_target.to_terminal_init (&debug_target);
4256
4257 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4258 }
4259
4260 static void
4261 debug_to_terminal_inferior (struct target_ops *self)
4262 {
4263 debug_target.to_terminal_inferior (&debug_target);
4264
4265 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4266 }
4267
4268 static void
4269 debug_to_terminal_ours_for_output (struct target_ops *self)
4270 {
4271 debug_target.to_terminal_ours_for_output (&debug_target);
4272
4273 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4274 }
4275
4276 static void
4277 debug_to_terminal_ours (struct target_ops *self)
4278 {
4279 debug_target.to_terminal_ours (&debug_target);
4280
4281 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4282 }
4283
4284 static void
4285 debug_to_terminal_save_ours (struct target_ops *self)
4286 {
4287 debug_target.to_terminal_save_ours (&debug_target);
4288
4289 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4290 }
4291
4292 static void
4293 debug_to_terminal_info (struct target_ops *self,
4294 const char *arg, int from_tty)
4295 {
4296 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4297
4298 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4299 from_tty);
4300 }
4301
4302 static void
4303 debug_to_load (struct target_ops *self, char *args, int from_tty)
4304 {
4305 debug_target.to_load (&debug_target, args, from_tty);
4306
4307 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4308 }
4309
4310 static void
4311 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4312 {
4313 debug_target.to_post_startup_inferior (&debug_target, ptid);
4314
4315 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4316 ptid_get_pid (ptid));
4317 }
4318
4319 static int
4320 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4321 {
4322 int retval;
4323
4324 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4325
4326 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4327 pid, retval);
4328
4329 return retval;
4330 }
4331
4332 static int
4333 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4334 {
4335 int retval;
4336
4337 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4338
4339 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4340 pid, retval);
4341
4342 return retval;
4343 }
4344
4345 static int
4346 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4347 {
4348 int retval;
4349
4350 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4351
4352 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4353 pid, retval);
4354
4355 return retval;
4356 }
4357
4358 static int
4359 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4360 {
4361 int retval;
4362
4363 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4364
4365 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4366 pid, retval);
4367
4368 return retval;
4369 }
4370
4371 static int
4372 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4373 {
4374 int retval;
4375
4376 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4377
4378 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4379 pid, retval);
4380
4381 return retval;
4382 }
4383
4384 static int
4385 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4386 {
4387 int retval;
4388
4389 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4390
4391 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4392 pid, retval);
4393
4394 return retval;
4395 }
4396
4397 static int
4398 debug_to_has_exited (struct target_ops *self,
4399 int pid, int wait_status, int *exit_status)
4400 {
4401 int has_exited;
4402
4403 has_exited = debug_target.to_has_exited (&debug_target,
4404 pid, wait_status, exit_status);
4405
4406 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4407 pid, wait_status, *exit_status, has_exited);
4408
4409 return has_exited;
4410 }
4411
4412 static int
4413 debug_to_can_run (struct target_ops *self)
4414 {
4415 int retval;
4416
4417 retval = debug_target.to_can_run (&debug_target);
4418
4419 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4420
4421 return retval;
4422 }
4423
4424 static struct gdbarch *
4425 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4426 {
4427 struct gdbarch *retval;
4428
4429 retval = debug_target.to_thread_architecture (ops, ptid);
4430
4431 fprintf_unfiltered (gdb_stdlog,
4432 "target_thread_architecture (%s) = %s [%s]\n",
4433 target_pid_to_str (ptid),
4434 host_address_to_string (retval),
4435 gdbarch_bfd_arch_info (retval)->printable_name);
4436 return retval;
4437 }
4438
4439 static void
4440 debug_to_stop (struct target_ops *self, ptid_t ptid)
4441 {
4442 debug_target.to_stop (&debug_target, ptid);
4443
4444 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4445 target_pid_to_str (ptid));
4446 }
4447
4448 static void
4449 debug_to_rcmd (struct target_ops *self, char *command,
4450 struct ui_file *outbuf)
4451 {
4452 debug_target.to_rcmd (&debug_target, command, outbuf);
4453 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4454 }
4455
4456 static char *
4457 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
4458 {
4459 char *exec_file;
4460
4461 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
4462
4463 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4464 pid, exec_file);
4465
4466 return exec_file;
4467 }
4468
4469 static void
4470 setup_target_debug (void)
4471 {
4472 memcpy (&debug_target, &current_target, sizeof debug_target);
4473
4474 current_target.to_open = debug_to_open;
4475 current_target.to_post_attach = debug_to_post_attach;
4476 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4477 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4478 current_target.to_files_info = debug_to_files_info;
4479 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4480 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4481 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4482 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4483 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4484 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4485 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4486 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4487 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4488 current_target.to_watchpoint_addr_within_range
4489 = debug_to_watchpoint_addr_within_range;
4490 current_target.to_region_ok_for_hw_watchpoint
4491 = debug_to_region_ok_for_hw_watchpoint;
4492 current_target.to_can_accel_watchpoint_condition
4493 = debug_to_can_accel_watchpoint_condition;
4494 current_target.to_terminal_init = debug_to_terminal_init;
4495 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4496 current_target.to_terminal_ours_for_output
4497 = debug_to_terminal_ours_for_output;
4498 current_target.to_terminal_ours = debug_to_terminal_ours;
4499 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4500 current_target.to_terminal_info = debug_to_terminal_info;
4501 current_target.to_load = debug_to_load;
4502 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4503 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4504 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4505 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4506 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4507 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4508 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4509 current_target.to_has_exited = debug_to_has_exited;
4510 current_target.to_can_run = debug_to_can_run;
4511 current_target.to_stop = debug_to_stop;
4512 current_target.to_rcmd = debug_to_rcmd;
4513 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4514 current_target.to_thread_architecture = debug_to_thread_architecture;
4515 }
4516 \f
4517
4518 static char targ_desc[] =
4519 "Names of targets and files being debugged.\nShows the entire \
4520 stack of targets currently in use (including the exec-file,\n\
4521 core-file, and process, if any), as well as the symbol file name.";
4522
4523 static void
4524 default_rcmd (struct target_ops *self, char *command, struct ui_file *output)
4525 {
4526 error (_("\"monitor\" command not supported by this target."));
4527 }
4528
4529 static void
4530 do_monitor_command (char *cmd,
4531 int from_tty)
4532 {
4533 target_rcmd (cmd, gdb_stdtarg);
4534 }
4535
4536 /* Print the name of each layers of our target stack. */
4537
4538 static void
4539 maintenance_print_target_stack (char *cmd, int from_tty)
4540 {
4541 struct target_ops *t;
4542
4543 printf_filtered (_("The current target stack is:\n"));
4544
4545 for (t = target_stack; t != NULL; t = t->beneath)
4546 {
4547 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4548 }
4549 }
4550
4551 /* Controls if async mode is permitted. */
4552 int target_async_permitted = 0;
4553
4554 /* The set command writes to this variable. If the inferior is
4555 executing, target_async_permitted is *not* updated. */
4556 static int target_async_permitted_1 = 0;
4557
4558 static void
4559 set_target_async_command (char *args, int from_tty,
4560 struct cmd_list_element *c)
4561 {
4562 if (have_live_inferiors ())
4563 {
4564 target_async_permitted_1 = target_async_permitted;
4565 error (_("Cannot change this setting while the inferior is running."));
4566 }
4567
4568 target_async_permitted = target_async_permitted_1;
4569 }
4570
4571 static void
4572 show_target_async_command (struct ui_file *file, int from_tty,
4573 struct cmd_list_element *c,
4574 const char *value)
4575 {
4576 fprintf_filtered (file,
4577 _("Controlling the inferior in "
4578 "asynchronous mode is %s.\n"), value);
4579 }
4580
4581 /* Temporary copies of permission settings. */
4582
4583 static int may_write_registers_1 = 1;
4584 static int may_write_memory_1 = 1;
4585 static int may_insert_breakpoints_1 = 1;
4586 static int may_insert_tracepoints_1 = 1;
4587 static int may_insert_fast_tracepoints_1 = 1;
4588 static int may_stop_1 = 1;
4589
4590 /* Make the user-set values match the real values again. */
4591
4592 void
4593 update_target_permissions (void)
4594 {
4595 may_write_registers_1 = may_write_registers;
4596 may_write_memory_1 = may_write_memory;
4597 may_insert_breakpoints_1 = may_insert_breakpoints;
4598 may_insert_tracepoints_1 = may_insert_tracepoints;
4599 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4600 may_stop_1 = may_stop;
4601 }
4602
4603 /* The one function handles (most of) the permission flags in the same
4604 way. */
4605
4606 static void
4607 set_target_permissions (char *args, int from_tty,
4608 struct cmd_list_element *c)
4609 {
4610 if (target_has_execution)
4611 {
4612 update_target_permissions ();
4613 error (_("Cannot change this setting while the inferior is running."));
4614 }
4615
4616 /* Make the real values match the user-changed values. */
4617 may_write_registers = may_write_registers_1;
4618 may_insert_breakpoints = may_insert_breakpoints_1;
4619 may_insert_tracepoints = may_insert_tracepoints_1;
4620 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4621 may_stop = may_stop_1;
4622 update_observer_mode ();
4623 }
4624
4625 /* Set memory write permission independently of observer mode. */
4626
4627 static void
4628 set_write_memory_permission (char *args, int from_tty,
4629 struct cmd_list_element *c)
4630 {
4631 /* Make the real values match the user-changed values. */
4632 may_write_memory = may_write_memory_1;
4633 update_observer_mode ();
4634 }
4635
4636
4637 void
4638 initialize_targets (void)
4639 {
4640 init_dummy_target ();
4641 push_target (&dummy_target);
4642
4643 add_info ("target", target_info, targ_desc);
4644 add_info ("files", target_info, targ_desc);
4645
4646 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4647 Set target debugging."), _("\
4648 Show target debugging."), _("\
4649 When non-zero, target debugging is enabled. Higher numbers are more\n\
4650 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4651 command."),
4652 NULL,
4653 show_targetdebug,
4654 &setdebuglist, &showdebuglist);
4655
4656 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4657 &trust_readonly, _("\
4658 Set mode for reading from readonly sections."), _("\
4659 Show mode for reading from readonly sections."), _("\
4660 When this mode is on, memory reads from readonly sections (such as .text)\n\
4661 will be read from the object file instead of from the target. This will\n\
4662 result in significant performance improvement for remote targets."),
4663 NULL,
4664 show_trust_readonly,
4665 &setlist, &showlist);
4666
4667 add_com ("monitor", class_obscure, do_monitor_command,
4668 _("Send a command to the remote monitor (remote targets only)."));
4669
4670 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4671 _("Print the name of each layer of the internal target stack."),
4672 &maintenanceprintlist);
4673
4674 add_setshow_boolean_cmd ("target-async", no_class,
4675 &target_async_permitted_1, _("\
4676 Set whether gdb controls the inferior in asynchronous mode."), _("\
4677 Show whether gdb controls the inferior in asynchronous mode."), _("\
4678 Tells gdb whether to control the inferior in asynchronous mode."),
4679 set_target_async_command,
4680 show_target_async_command,
4681 &setlist,
4682 &showlist);
4683
4684 add_setshow_boolean_cmd ("may-write-registers", class_support,
4685 &may_write_registers_1, _("\
4686 Set permission to write into registers."), _("\
4687 Show permission to write into registers."), _("\
4688 When this permission is on, GDB may write into the target's registers.\n\
4689 Otherwise, any sort of write attempt will result in an error."),
4690 set_target_permissions, NULL,
4691 &setlist, &showlist);
4692
4693 add_setshow_boolean_cmd ("may-write-memory", class_support,
4694 &may_write_memory_1, _("\
4695 Set permission to write into target memory."), _("\
4696 Show permission to write into target memory."), _("\
4697 When this permission is on, GDB may write into the target's memory.\n\
4698 Otherwise, any sort of write attempt will result in an error."),
4699 set_write_memory_permission, NULL,
4700 &setlist, &showlist);
4701
4702 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4703 &may_insert_breakpoints_1, _("\
4704 Set permission to insert breakpoints in the target."), _("\
4705 Show permission to insert breakpoints in the target."), _("\
4706 When this permission is on, GDB may insert breakpoints in the program.\n\
4707 Otherwise, any sort of insertion attempt will result in an error."),
4708 set_target_permissions, NULL,
4709 &setlist, &showlist);
4710
4711 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4712 &may_insert_tracepoints_1, _("\
4713 Set permission to insert tracepoints in the target."), _("\
4714 Show permission to insert tracepoints in the target."), _("\
4715 When this permission is on, GDB may insert tracepoints in the program.\n\
4716 Otherwise, any sort of insertion attempt will result in an error."),
4717 set_target_permissions, NULL,
4718 &setlist, &showlist);
4719
4720 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4721 &may_insert_fast_tracepoints_1, _("\
4722 Set permission to insert fast tracepoints in the target."), _("\
4723 Show permission to insert fast tracepoints in the target."), _("\
4724 When this permission is on, GDB may insert fast tracepoints.\n\
4725 Otherwise, any sort of insertion attempt will result in an error."),
4726 set_target_permissions, NULL,
4727 &setlist, &showlist);
4728
4729 add_setshow_boolean_cmd ("may-interrupt", class_support,
4730 &may_stop_1, _("\
4731 Set permission to interrupt or signal the target."), _("\
4732 Show permission to interrupt or signal the target."), _("\
4733 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4734 Otherwise, any attempt to interrupt or stop will be ignored."),
4735 set_target_permissions, NULL,
4736 &setlist, &showlist);
4737 }