]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/target.c
convert to_find_memory_regions
[thirdparty/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (struct target_ops *, const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
56 CORE_ADDR, int);
57
58 static void default_rcmd (struct target_ops *, char *, struct ui_file *);
59
60 static void tcomplain (void) ATTRIBUTE_NORETURN;
61
62 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
63
64 static int return_zero (void);
65
66 static int return_minus_one (void);
67
68 static void *return_null (void);
69
70 void target_ignore (void);
71
72 static void target_command (char *, int);
73
74 static struct target_ops *find_default_run_target (char *);
75
76 static target_xfer_partial_ftype default_xfer_partial;
77
78 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
79 ptid_t ptid);
80
81 static int dummy_find_memory_regions (struct target_ops *self,
82 find_memory_region_ftype ignore1,
83 void *ignore2);
84
85 static int find_default_can_async_p (struct target_ops *ignore);
86
87 static int find_default_is_async_p (struct target_ops *ignore);
88
89 #include "target-delegates.c"
90
91 static void init_dummy_target (void);
92
93 static struct target_ops debug_target;
94
95 static void debug_to_open (char *, int);
96
97 static void debug_to_prepare_to_store (struct target_ops *self,
98 struct regcache *);
99
100 static void debug_to_files_info (struct target_ops *);
101
102 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
103 struct bp_target_info *);
104
105 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
106 struct bp_target_info *);
107
108 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
109 int, int, int);
110
111 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
112 struct gdbarch *,
113 struct bp_target_info *);
114
115 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
116 struct gdbarch *,
117 struct bp_target_info *);
118
119 static int debug_to_insert_watchpoint (struct target_ops *self,
120 CORE_ADDR, int, int,
121 struct expression *);
122
123 static int debug_to_remove_watchpoint (struct target_ops *self,
124 CORE_ADDR, int, int,
125 struct expression *);
126
127 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
128
129 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
130 CORE_ADDR, CORE_ADDR, int);
131
132 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
133 CORE_ADDR, int);
134
135 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
136 CORE_ADDR, int, int,
137 struct expression *);
138
139 static void debug_to_terminal_init (struct target_ops *self);
140
141 static void debug_to_terminal_inferior (struct target_ops *self);
142
143 static void debug_to_terminal_ours_for_output (struct target_ops *self);
144
145 static void debug_to_terminal_save_ours (struct target_ops *self);
146
147 static void debug_to_terminal_ours (struct target_ops *self);
148
149 static void debug_to_load (struct target_ops *self, char *, int);
150
151 static int debug_to_can_run (struct target_ops *self);
152
153 static void debug_to_stop (struct target_ops *self, ptid_t);
154
155 /* Pointer to array of target architecture structures; the size of the
156 array; the current index into the array; the allocated size of the
157 array. */
158 struct target_ops **target_structs;
159 unsigned target_struct_size;
160 unsigned target_struct_allocsize;
161 #define DEFAULT_ALLOCSIZE 10
162
163 /* The initial current target, so that there is always a semi-valid
164 current target. */
165
166 static struct target_ops dummy_target;
167
168 /* Top of target stack. */
169
170 static struct target_ops *target_stack;
171
172 /* The target structure we are currently using to talk to a process
173 or file or whatever "inferior" we have. */
174
175 struct target_ops current_target;
176
177 /* Command list for target. */
178
179 static struct cmd_list_element *targetlist = NULL;
180
181 /* Nonzero if we should trust readonly sections from the
182 executable when reading memory. */
183
184 static int trust_readonly = 0;
185
186 /* Nonzero if we should show true memory content including
187 memory breakpoint inserted by gdb. */
188
189 static int show_memory_breakpoints = 0;
190
191 /* These globals control whether GDB attempts to perform these
192 operations; they are useful for targets that need to prevent
193 inadvertant disruption, such as in non-stop mode. */
194
195 int may_write_registers = 1;
196
197 int may_write_memory = 1;
198
199 int may_insert_breakpoints = 1;
200
201 int may_insert_tracepoints = 1;
202
203 int may_insert_fast_tracepoints = 1;
204
205 int may_stop = 1;
206
207 /* Non-zero if we want to see trace of target level stuff. */
208
209 static unsigned int targetdebug = 0;
210 static void
211 show_targetdebug (struct ui_file *file, int from_tty,
212 struct cmd_list_element *c, const char *value)
213 {
214 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
215 }
216
217 static void setup_target_debug (void);
218
219 /* The user just typed 'target' without the name of a target. */
220
221 static void
222 target_command (char *arg, int from_tty)
223 {
224 fputs_filtered ("Argument required (target name). Try `help target'\n",
225 gdb_stdout);
226 }
227
228 /* Default target_has_* methods for process_stratum targets. */
229
230 int
231 default_child_has_all_memory (struct target_ops *ops)
232 {
233 /* If no inferior selected, then we can't read memory here. */
234 if (ptid_equal (inferior_ptid, null_ptid))
235 return 0;
236
237 return 1;
238 }
239
240 int
241 default_child_has_memory (struct target_ops *ops)
242 {
243 /* If no inferior selected, then we can't read memory here. */
244 if (ptid_equal (inferior_ptid, null_ptid))
245 return 0;
246
247 return 1;
248 }
249
250 int
251 default_child_has_stack (struct target_ops *ops)
252 {
253 /* If no inferior selected, there's no stack. */
254 if (ptid_equal (inferior_ptid, null_ptid))
255 return 0;
256
257 return 1;
258 }
259
260 int
261 default_child_has_registers (struct target_ops *ops)
262 {
263 /* Can't read registers from no inferior. */
264 if (ptid_equal (inferior_ptid, null_ptid))
265 return 0;
266
267 return 1;
268 }
269
270 int
271 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
272 {
273 /* If there's no thread selected, then we can't make it run through
274 hoops. */
275 if (ptid_equal (the_ptid, null_ptid))
276 return 0;
277
278 return 1;
279 }
280
281
282 int
283 target_has_all_memory_1 (void)
284 {
285 struct target_ops *t;
286
287 for (t = current_target.beneath; t != NULL; t = t->beneath)
288 if (t->to_has_all_memory (t))
289 return 1;
290
291 return 0;
292 }
293
294 int
295 target_has_memory_1 (void)
296 {
297 struct target_ops *t;
298
299 for (t = current_target.beneath; t != NULL; t = t->beneath)
300 if (t->to_has_memory (t))
301 return 1;
302
303 return 0;
304 }
305
306 int
307 target_has_stack_1 (void)
308 {
309 struct target_ops *t;
310
311 for (t = current_target.beneath; t != NULL; t = t->beneath)
312 if (t->to_has_stack (t))
313 return 1;
314
315 return 0;
316 }
317
318 int
319 target_has_registers_1 (void)
320 {
321 struct target_ops *t;
322
323 for (t = current_target.beneath; t != NULL; t = t->beneath)
324 if (t->to_has_registers (t))
325 return 1;
326
327 return 0;
328 }
329
330 int
331 target_has_execution_1 (ptid_t the_ptid)
332 {
333 struct target_ops *t;
334
335 for (t = current_target.beneath; t != NULL; t = t->beneath)
336 if (t->to_has_execution (t, the_ptid))
337 return 1;
338
339 return 0;
340 }
341
342 int
343 target_has_execution_current (void)
344 {
345 return target_has_execution_1 (inferior_ptid);
346 }
347
348 /* Complete initialization of T. This ensures that various fields in
349 T are set, if needed by the target implementation. */
350
351 void
352 complete_target_initialization (struct target_ops *t)
353 {
354 /* Provide default values for all "must have" methods. */
355 if (t->to_xfer_partial == NULL)
356 t->to_xfer_partial = default_xfer_partial;
357
358 if (t->to_has_all_memory == NULL)
359 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
360
361 if (t->to_has_memory == NULL)
362 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
363
364 if (t->to_has_stack == NULL)
365 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
366
367 if (t->to_has_registers == NULL)
368 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
369
370 if (t->to_has_execution == NULL)
371 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
372
373 install_delegators (t);
374 }
375
376 /* Add possible target architecture T to the list and add a new
377 command 'target T->to_shortname'. Set COMPLETER as the command's
378 completer if not NULL. */
379
380 void
381 add_target_with_completer (struct target_ops *t,
382 completer_ftype *completer)
383 {
384 struct cmd_list_element *c;
385
386 complete_target_initialization (t);
387
388 if (!target_structs)
389 {
390 target_struct_allocsize = DEFAULT_ALLOCSIZE;
391 target_structs = (struct target_ops **) xmalloc
392 (target_struct_allocsize * sizeof (*target_structs));
393 }
394 if (target_struct_size >= target_struct_allocsize)
395 {
396 target_struct_allocsize *= 2;
397 target_structs = (struct target_ops **)
398 xrealloc ((char *) target_structs,
399 target_struct_allocsize * sizeof (*target_structs));
400 }
401 target_structs[target_struct_size++] = t;
402
403 if (targetlist == NULL)
404 add_prefix_cmd ("target", class_run, target_command, _("\
405 Connect to a target machine or process.\n\
406 The first argument is the type or protocol of the target machine.\n\
407 Remaining arguments are interpreted by the target protocol. For more\n\
408 information on the arguments for a particular protocol, type\n\
409 `help target ' followed by the protocol name."),
410 &targetlist, "target ", 0, &cmdlist);
411 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
412 &targetlist);
413 if (completer != NULL)
414 set_cmd_completer (c, completer);
415 }
416
417 /* Add a possible target architecture to the list. */
418
419 void
420 add_target (struct target_ops *t)
421 {
422 add_target_with_completer (t, NULL);
423 }
424
425 /* See target.h. */
426
427 void
428 add_deprecated_target_alias (struct target_ops *t, char *alias)
429 {
430 struct cmd_list_element *c;
431 char *alt;
432
433 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
434 see PR cli/15104. */
435 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
436 alt = xstrprintf ("target %s", t->to_shortname);
437 deprecate_cmd (c, alt);
438 }
439
440 /* Stub functions */
441
442 void
443 target_ignore (void)
444 {
445 }
446
447 void
448 target_kill (void)
449 {
450 struct target_ops *t;
451
452 for (t = current_target.beneath; t != NULL; t = t->beneath)
453 if (t->to_kill != NULL)
454 {
455 if (targetdebug)
456 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
457
458 t->to_kill (t);
459 return;
460 }
461
462 noprocess ();
463 }
464
465 void
466 target_load (char *arg, int from_tty)
467 {
468 target_dcache_invalidate ();
469 (*current_target.to_load) (&current_target, arg, from_tty);
470 }
471
472 void
473 target_create_inferior (char *exec_file, char *args,
474 char **env, int from_tty)
475 {
476 struct target_ops *t;
477
478 for (t = current_target.beneath; t != NULL; t = t->beneath)
479 {
480 if (t->to_create_inferior != NULL)
481 {
482 t->to_create_inferior (t, exec_file, args, env, from_tty);
483 if (targetdebug)
484 fprintf_unfiltered (gdb_stdlog,
485 "target_create_inferior (%s, %s, xxx, %d)\n",
486 exec_file, args, from_tty);
487 return;
488 }
489 }
490
491 internal_error (__FILE__, __LINE__,
492 _("could not find a target to create inferior"));
493 }
494
495 void
496 target_terminal_inferior (void)
497 {
498 /* A background resume (``run&'') should leave GDB in control of the
499 terminal. Use target_can_async_p, not target_is_async_p, since at
500 this point the target is not async yet. However, if sync_execution
501 is not set, we know it will become async prior to resume. */
502 if (target_can_async_p () && !sync_execution)
503 return;
504
505 /* If GDB is resuming the inferior in the foreground, install
506 inferior's terminal modes. */
507 (*current_target.to_terminal_inferior) (&current_target);
508 }
509
510 static int
511 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
512 struct target_ops *t)
513 {
514 errno = EIO; /* Can't read/write this location. */
515 return 0; /* No bytes handled. */
516 }
517
518 static void
519 tcomplain (void)
520 {
521 error (_("You can't do that when your target is `%s'"),
522 current_target.to_shortname);
523 }
524
525 void
526 noprocess (void)
527 {
528 error (_("You can't do that without a process to debug."));
529 }
530
531 static void
532 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
533 {
534 printf_unfiltered (_("No saved terminal information.\n"));
535 }
536
537 /* A default implementation for the to_get_ada_task_ptid target method.
538
539 This function builds the PTID by using both LWP and TID as part of
540 the PTID lwp and tid elements. The pid used is the pid of the
541 inferior_ptid. */
542
543 static ptid_t
544 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
545 {
546 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
547 }
548
549 static enum exec_direction_kind
550 default_execution_direction (struct target_ops *self)
551 {
552 if (!target_can_execute_reverse)
553 return EXEC_FORWARD;
554 else if (!target_can_async_p ())
555 return EXEC_FORWARD;
556 else
557 gdb_assert_not_reached ("\
558 to_execution_direction must be implemented for reverse async");
559 }
560
561 /* Go through the target stack from top to bottom, copying over zero
562 entries in current_target, then filling in still empty entries. In
563 effect, we are doing class inheritance through the pushed target
564 vectors.
565
566 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
567 is currently implemented, is that it discards any knowledge of
568 which target an inherited method originally belonged to.
569 Consequently, new new target methods should instead explicitly and
570 locally search the target stack for the target that can handle the
571 request. */
572
573 static void
574 update_current_target (void)
575 {
576 struct target_ops *t;
577
578 /* First, reset current's contents. */
579 memset (&current_target, 0, sizeof (current_target));
580
581 /* Install the delegators. */
582 install_delegators (&current_target);
583
584 #define INHERIT(FIELD, TARGET) \
585 if (!current_target.FIELD) \
586 current_target.FIELD = (TARGET)->FIELD
587
588 for (t = target_stack; t; t = t->beneath)
589 {
590 INHERIT (to_shortname, t);
591 INHERIT (to_longname, t);
592 INHERIT (to_doc, t);
593 /* Do not inherit to_open. */
594 /* Do not inherit to_close. */
595 /* Do not inherit to_attach. */
596 /* Do not inherit to_post_attach. */
597 INHERIT (to_attach_no_wait, t);
598 /* Do not inherit to_detach. */
599 /* Do not inherit to_disconnect. */
600 /* Do not inherit to_resume. */
601 /* Do not inherit to_wait. */
602 /* Do not inherit to_fetch_registers. */
603 /* Do not inherit to_store_registers. */
604 /* Do not inherit to_prepare_to_store. */
605 INHERIT (deprecated_xfer_memory, t);
606 /* Do not inherit to_files_info. */
607 /* Do not inherit to_insert_breakpoint. */
608 /* Do not inherit to_remove_breakpoint. */
609 /* Do not inherit to_can_use_hw_breakpoint. */
610 /* Do not inherit to_insert_hw_breakpoint. */
611 /* Do not inherit to_remove_hw_breakpoint. */
612 /* Do not inherit to_ranged_break_num_registers. */
613 /* Do not inherit to_insert_watchpoint. */
614 /* Do not inherit to_remove_watchpoint. */
615 /* Do not inherit to_insert_mask_watchpoint. */
616 /* Do not inherit to_remove_mask_watchpoint. */
617 /* Do not inherit to_stopped_data_address. */
618 INHERIT (to_have_steppable_watchpoint, t);
619 INHERIT (to_have_continuable_watchpoint, t);
620 /* Do not inherit to_stopped_by_watchpoint. */
621 /* Do not inherit to_watchpoint_addr_within_range. */
622 /* Do not inherit to_region_ok_for_hw_watchpoint. */
623 /* Do not inherit to_can_accel_watchpoint_condition. */
624 /* Do not inherit to_masked_watch_num_registers. */
625 /* Do not inherit to_terminal_init. */
626 /* Do not inherit to_terminal_inferior. */
627 /* Do not inherit to_terminal_ours_for_output. */
628 /* Do not inherit to_terminal_ours. */
629 /* Do not inherit to_terminal_save_ours. */
630 /* Do not inherit to_terminal_info. */
631 /* Do not inherit to_kill. */
632 /* Do not inherit to_load. */
633 /* Do no inherit to_create_inferior. */
634 /* Do not inherit to_post_startup_inferior. */
635 /* Do not inherit to_insert_fork_catchpoint. */
636 /* Do not inherit to_remove_fork_catchpoint. */
637 /* Do not inherit to_insert_vfork_catchpoint. */
638 /* Do not inherit to_remove_vfork_catchpoint. */
639 /* Do not inherit to_follow_fork. */
640 /* Do not inherit to_insert_exec_catchpoint. */
641 /* Do not inherit to_remove_exec_catchpoint. */
642 /* Do not inherit to_set_syscall_catchpoint. */
643 /* Do not inherit to_has_exited. */
644 /* Do not inherit to_mourn_inferior. */
645 INHERIT (to_can_run, t);
646 /* Do not inherit to_pass_signals. */
647 /* Do not inherit to_program_signals. */
648 /* Do not inherit to_thread_alive. */
649 /* Do not inherit to_find_new_threads. */
650 /* Do not inherit to_pid_to_str. */
651 /* Do not inherit to_extra_thread_info. */
652 /* Do not inherit to_thread_name. */
653 INHERIT (to_stop, t);
654 /* Do not inherit to_xfer_partial. */
655 /* Do not inherit to_rcmd. */
656 /* Do not inherit to_pid_to_exec_file. */
657 /* Do not inherit to_log_command. */
658 INHERIT (to_stratum, t);
659 /* Do not inherit to_has_all_memory. */
660 /* Do not inherit to_has_memory. */
661 /* Do not inherit to_has_stack. */
662 /* Do not inherit to_has_registers. */
663 /* Do not inherit to_has_execution. */
664 INHERIT (to_has_thread_control, t);
665 /* Do not inherit to_can_async_p. */
666 /* Do not inherit to_is_async_p. */
667 /* Do not inherit to_async. */
668 /* Do not inherit to_find_memory_regions. */
669 INHERIT (to_make_corefile_notes, t);
670 INHERIT (to_get_bookmark, t);
671 INHERIT (to_goto_bookmark, t);
672 /* Do not inherit to_get_thread_local_address. */
673 INHERIT (to_can_execute_reverse, t);
674 INHERIT (to_execution_direction, t);
675 INHERIT (to_thread_architecture, t);
676 /* Do not inherit to_read_description. */
677 INHERIT (to_get_ada_task_ptid, t);
678 /* Do not inherit to_search_memory. */
679 INHERIT (to_supports_multi_process, t);
680 INHERIT (to_supports_enable_disable_tracepoint, t);
681 INHERIT (to_supports_string_tracing, t);
682 INHERIT (to_trace_init, t);
683 INHERIT (to_download_tracepoint, t);
684 INHERIT (to_can_download_tracepoint, t);
685 INHERIT (to_download_trace_state_variable, t);
686 INHERIT (to_enable_tracepoint, t);
687 INHERIT (to_disable_tracepoint, t);
688 INHERIT (to_trace_set_readonly_regions, t);
689 INHERIT (to_trace_start, t);
690 INHERIT (to_get_trace_status, t);
691 INHERIT (to_get_tracepoint_status, t);
692 INHERIT (to_trace_stop, t);
693 INHERIT (to_trace_find, t);
694 INHERIT (to_get_trace_state_variable_value, t);
695 INHERIT (to_save_trace_data, t);
696 INHERIT (to_upload_tracepoints, t);
697 INHERIT (to_upload_trace_state_variables, t);
698 INHERIT (to_get_raw_trace_data, t);
699 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
700 INHERIT (to_set_disconnected_tracing, t);
701 INHERIT (to_set_circular_trace_buffer, t);
702 INHERIT (to_set_trace_buffer_size, t);
703 INHERIT (to_set_trace_notes, t);
704 INHERIT (to_get_tib_address, t);
705 INHERIT (to_set_permissions, t);
706 INHERIT (to_static_tracepoint_marker_at, t);
707 INHERIT (to_static_tracepoint_markers_by_strid, t);
708 INHERIT (to_traceframe_info, t);
709 INHERIT (to_use_agent, t);
710 INHERIT (to_can_use_agent, t);
711 INHERIT (to_augmented_libraries_svr4_read, t);
712 INHERIT (to_magic, t);
713 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
714 INHERIT (to_can_run_breakpoint_commands, t);
715 /* Do not inherit to_memory_map. */
716 /* Do not inherit to_flash_erase. */
717 /* Do not inherit to_flash_done. */
718 }
719 #undef INHERIT
720
721 /* Clean up a target struct so it no longer has any zero pointers in
722 it. Some entries are defaulted to a method that print an error,
723 others are hard-wired to a standard recursive default. */
724
725 #define de_fault(field, value) \
726 if (!current_target.field) \
727 current_target.field = value
728
729 de_fault (to_open,
730 (void (*) (char *, int))
731 tcomplain);
732 de_fault (to_close,
733 (void (*) (struct target_ops *))
734 target_ignore);
735 de_fault (deprecated_xfer_memory,
736 (int (*) (CORE_ADDR, gdb_byte *, int, int,
737 struct mem_attrib *, struct target_ops *))
738 nomemory);
739 de_fault (to_can_run,
740 (int (*) (struct target_ops *))
741 return_zero);
742 de_fault (to_stop,
743 (void (*) (struct target_ops *, ptid_t))
744 target_ignore);
745 de_fault (to_thread_architecture,
746 default_thread_architecture);
747 current_target.to_read_description = NULL;
748 de_fault (to_get_ada_task_ptid,
749 (ptid_t (*) (struct target_ops *, long, long))
750 default_get_ada_task_ptid);
751 de_fault (to_supports_multi_process,
752 (int (*) (struct target_ops *))
753 return_zero);
754 de_fault (to_supports_enable_disable_tracepoint,
755 (int (*) (struct target_ops *))
756 return_zero);
757 de_fault (to_supports_string_tracing,
758 (int (*) (struct target_ops *))
759 return_zero);
760 de_fault (to_trace_init,
761 (void (*) (struct target_ops *))
762 tcomplain);
763 de_fault (to_download_tracepoint,
764 (void (*) (struct target_ops *, struct bp_location *))
765 tcomplain);
766 de_fault (to_can_download_tracepoint,
767 (int (*) (struct target_ops *))
768 return_zero);
769 de_fault (to_download_trace_state_variable,
770 (void (*) (struct target_ops *, struct trace_state_variable *))
771 tcomplain);
772 de_fault (to_enable_tracepoint,
773 (void (*) (struct target_ops *, struct bp_location *))
774 tcomplain);
775 de_fault (to_disable_tracepoint,
776 (void (*) (struct target_ops *, struct bp_location *))
777 tcomplain);
778 de_fault (to_trace_set_readonly_regions,
779 (void (*) (struct target_ops *))
780 tcomplain);
781 de_fault (to_trace_start,
782 (void (*) (struct target_ops *))
783 tcomplain);
784 de_fault (to_get_trace_status,
785 (int (*) (struct target_ops *, struct trace_status *))
786 return_minus_one);
787 de_fault (to_get_tracepoint_status,
788 (void (*) (struct target_ops *, struct breakpoint *,
789 struct uploaded_tp *))
790 tcomplain);
791 de_fault (to_trace_stop,
792 (void (*) (struct target_ops *))
793 tcomplain);
794 de_fault (to_trace_find,
795 (int (*) (struct target_ops *,
796 enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
797 return_minus_one);
798 de_fault (to_get_trace_state_variable_value,
799 (int (*) (struct target_ops *, int, LONGEST *))
800 return_zero);
801 de_fault (to_save_trace_data,
802 (int (*) (struct target_ops *, const char *))
803 tcomplain);
804 de_fault (to_upload_tracepoints,
805 (int (*) (struct target_ops *, struct uploaded_tp **))
806 return_zero);
807 de_fault (to_upload_trace_state_variables,
808 (int (*) (struct target_ops *, struct uploaded_tsv **))
809 return_zero);
810 de_fault (to_get_raw_trace_data,
811 (LONGEST (*) (struct target_ops *, gdb_byte *, ULONGEST, LONGEST))
812 tcomplain);
813 de_fault (to_get_min_fast_tracepoint_insn_len,
814 (int (*) (struct target_ops *))
815 return_minus_one);
816 de_fault (to_set_disconnected_tracing,
817 (void (*) (struct target_ops *, int))
818 target_ignore);
819 de_fault (to_set_circular_trace_buffer,
820 (void (*) (struct target_ops *, int))
821 target_ignore);
822 de_fault (to_set_trace_buffer_size,
823 (void (*) (struct target_ops *, LONGEST))
824 target_ignore);
825 de_fault (to_set_trace_notes,
826 (int (*) (struct target_ops *,
827 const char *, const char *, const char *))
828 return_zero);
829 de_fault (to_get_tib_address,
830 (int (*) (struct target_ops *, ptid_t, CORE_ADDR *))
831 tcomplain);
832 de_fault (to_set_permissions,
833 (void (*) (struct target_ops *))
834 target_ignore);
835 de_fault (to_static_tracepoint_marker_at,
836 (int (*) (struct target_ops *,
837 CORE_ADDR, struct static_tracepoint_marker *))
838 return_zero);
839 de_fault (to_static_tracepoint_markers_by_strid,
840 (VEC(static_tracepoint_marker_p) * (*) (struct target_ops *,
841 const char *))
842 tcomplain);
843 de_fault (to_traceframe_info,
844 (struct traceframe_info * (*) (struct target_ops *))
845 return_null);
846 de_fault (to_supports_evaluation_of_breakpoint_conditions,
847 (int (*) (struct target_ops *))
848 return_zero);
849 de_fault (to_can_run_breakpoint_commands,
850 (int (*) (struct target_ops *))
851 return_zero);
852 de_fault (to_use_agent,
853 (int (*) (struct target_ops *, int))
854 tcomplain);
855 de_fault (to_can_use_agent,
856 (int (*) (struct target_ops *))
857 return_zero);
858 de_fault (to_augmented_libraries_svr4_read,
859 (int (*) (struct target_ops *))
860 return_zero);
861 de_fault (to_execution_direction, default_execution_direction);
862
863 #undef de_fault
864
865 /* Finally, position the target-stack beneath the squashed
866 "current_target". That way code looking for a non-inherited
867 target method can quickly and simply find it. */
868 current_target.beneath = target_stack;
869
870 if (targetdebug)
871 setup_target_debug ();
872 }
873
874 /* Push a new target type into the stack of the existing target accessors,
875 possibly superseding some of the existing accessors.
876
877 Rather than allow an empty stack, we always have the dummy target at
878 the bottom stratum, so we can call the function vectors without
879 checking them. */
880
881 void
882 push_target (struct target_ops *t)
883 {
884 struct target_ops **cur;
885
886 /* Check magic number. If wrong, it probably means someone changed
887 the struct definition, but not all the places that initialize one. */
888 if (t->to_magic != OPS_MAGIC)
889 {
890 fprintf_unfiltered (gdb_stderr,
891 "Magic number of %s target struct wrong\n",
892 t->to_shortname);
893 internal_error (__FILE__, __LINE__,
894 _("failed internal consistency check"));
895 }
896
897 /* Find the proper stratum to install this target in. */
898 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
899 {
900 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
901 break;
902 }
903
904 /* If there's already targets at this stratum, remove them. */
905 /* FIXME: cagney/2003-10-15: I think this should be popping all
906 targets to CUR, and not just those at this stratum level. */
907 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
908 {
909 /* There's already something at this stratum level. Close it,
910 and un-hook it from the stack. */
911 struct target_ops *tmp = (*cur);
912
913 (*cur) = (*cur)->beneath;
914 tmp->beneath = NULL;
915 target_close (tmp);
916 }
917
918 /* We have removed all targets in our stratum, now add the new one. */
919 t->beneath = (*cur);
920 (*cur) = t;
921
922 update_current_target ();
923 }
924
925 /* Remove a target_ops vector from the stack, wherever it may be.
926 Return how many times it was removed (0 or 1). */
927
928 int
929 unpush_target (struct target_ops *t)
930 {
931 struct target_ops **cur;
932 struct target_ops *tmp;
933
934 if (t->to_stratum == dummy_stratum)
935 internal_error (__FILE__, __LINE__,
936 _("Attempt to unpush the dummy target"));
937
938 /* Look for the specified target. Note that we assume that a target
939 can only occur once in the target stack. */
940
941 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
942 {
943 if ((*cur) == t)
944 break;
945 }
946
947 /* If we don't find target_ops, quit. Only open targets should be
948 closed. */
949 if ((*cur) == NULL)
950 return 0;
951
952 /* Unchain the target. */
953 tmp = (*cur);
954 (*cur) = (*cur)->beneath;
955 tmp->beneath = NULL;
956
957 update_current_target ();
958
959 /* Finally close the target. Note we do this after unchaining, so
960 any target method calls from within the target_close
961 implementation don't end up in T anymore. */
962 target_close (t);
963
964 return 1;
965 }
966
967 void
968 pop_all_targets_above (enum strata above_stratum)
969 {
970 while ((int) (current_target.to_stratum) > (int) above_stratum)
971 {
972 if (!unpush_target (target_stack))
973 {
974 fprintf_unfiltered (gdb_stderr,
975 "pop_all_targets couldn't find target %s\n",
976 target_stack->to_shortname);
977 internal_error (__FILE__, __LINE__,
978 _("failed internal consistency check"));
979 break;
980 }
981 }
982 }
983
984 void
985 pop_all_targets (void)
986 {
987 pop_all_targets_above (dummy_stratum);
988 }
989
990 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
991
992 int
993 target_is_pushed (struct target_ops *t)
994 {
995 struct target_ops **cur;
996
997 /* Check magic number. If wrong, it probably means someone changed
998 the struct definition, but not all the places that initialize one. */
999 if (t->to_magic != OPS_MAGIC)
1000 {
1001 fprintf_unfiltered (gdb_stderr,
1002 "Magic number of %s target struct wrong\n",
1003 t->to_shortname);
1004 internal_error (__FILE__, __LINE__,
1005 _("failed internal consistency check"));
1006 }
1007
1008 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1009 if (*cur == t)
1010 return 1;
1011
1012 return 0;
1013 }
1014
1015 /* Using the objfile specified in OBJFILE, find the address for the
1016 current thread's thread-local storage with offset OFFSET. */
1017 CORE_ADDR
1018 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1019 {
1020 volatile CORE_ADDR addr = 0;
1021 struct target_ops *target;
1022
1023 for (target = current_target.beneath;
1024 target != NULL;
1025 target = target->beneath)
1026 {
1027 if (target->to_get_thread_local_address != NULL)
1028 break;
1029 }
1030
1031 if (target != NULL
1032 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1033 {
1034 ptid_t ptid = inferior_ptid;
1035 volatile struct gdb_exception ex;
1036
1037 TRY_CATCH (ex, RETURN_MASK_ALL)
1038 {
1039 CORE_ADDR lm_addr;
1040
1041 /* Fetch the load module address for this objfile. */
1042 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1043 objfile);
1044 /* If it's 0, throw the appropriate exception. */
1045 if (lm_addr == 0)
1046 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1047 _("TLS load module not found"));
1048
1049 addr = target->to_get_thread_local_address (target, ptid,
1050 lm_addr, offset);
1051 }
1052 /* If an error occurred, print TLS related messages here. Otherwise,
1053 throw the error to some higher catcher. */
1054 if (ex.reason < 0)
1055 {
1056 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1057
1058 switch (ex.error)
1059 {
1060 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1061 error (_("Cannot find thread-local variables "
1062 "in this thread library."));
1063 break;
1064 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1065 if (objfile_is_library)
1066 error (_("Cannot find shared library `%s' in dynamic"
1067 " linker's load module list"), objfile_name (objfile));
1068 else
1069 error (_("Cannot find executable file `%s' in dynamic"
1070 " linker's load module list"), objfile_name (objfile));
1071 break;
1072 case TLS_NOT_ALLOCATED_YET_ERROR:
1073 if (objfile_is_library)
1074 error (_("The inferior has not yet allocated storage for"
1075 " thread-local variables in\n"
1076 "the shared library `%s'\n"
1077 "for %s"),
1078 objfile_name (objfile), target_pid_to_str (ptid));
1079 else
1080 error (_("The inferior has not yet allocated storage for"
1081 " thread-local variables in\n"
1082 "the executable `%s'\n"
1083 "for %s"),
1084 objfile_name (objfile), target_pid_to_str (ptid));
1085 break;
1086 case TLS_GENERIC_ERROR:
1087 if (objfile_is_library)
1088 error (_("Cannot find thread-local storage for %s, "
1089 "shared library %s:\n%s"),
1090 target_pid_to_str (ptid),
1091 objfile_name (objfile), ex.message);
1092 else
1093 error (_("Cannot find thread-local storage for %s, "
1094 "executable file %s:\n%s"),
1095 target_pid_to_str (ptid),
1096 objfile_name (objfile), ex.message);
1097 break;
1098 default:
1099 throw_exception (ex);
1100 break;
1101 }
1102 }
1103 }
1104 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1105 TLS is an ABI-specific thing. But we don't do that yet. */
1106 else
1107 error (_("Cannot find thread-local variables on this target"));
1108
1109 return addr;
1110 }
1111
1112 const char *
1113 target_xfer_status_to_string (enum target_xfer_status err)
1114 {
1115 #define CASE(X) case X: return #X
1116 switch (err)
1117 {
1118 CASE(TARGET_XFER_E_IO);
1119 CASE(TARGET_XFER_E_UNAVAILABLE);
1120 default:
1121 return "<unknown>";
1122 }
1123 #undef CASE
1124 };
1125
1126
1127 #undef MIN
1128 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1129
1130 /* target_read_string -- read a null terminated string, up to LEN bytes,
1131 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1132 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1133 is responsible for freeing it. Return the number of bytes successfully
1134 read. */
1135
1136 int
1137 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1138 {
1139 int tlen, offset, i;
1140 gdb_byte buf[4];
1141 int errcode = 0;
1142 char *buffer;
1143 int buffer_allocated;
1144 char *bufptr;
1145 unsigned int nbytes_read = 0;
1146
1147 gdb_assert (string);
1148
1149 /* Small for testing. */
1150 buffer_allocated = 4;
1151 buffer = xmalloc (buffer_allocated);
1152 bufptr = buffer;
1153
1154 while (len > 0)
1155 {
1156 tlen = MIN (len, 4 - (memaddr & 3));
1157 offset = memaddr & 3;
1158
1159 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1160 if (errcode != 0)
1161 {
1162 /* The transfer request might have crossed the boundary to an
1163 unallocated region of memory. Retry the transfer, requesting
1164 a single byte. */
1165 tlen = 1;
1166 offset = 0;
1167 errcode = target_read_memory (memaddr, buf, 1);
1168 if (errcode != 0)
1169 goto done;
1170 }
1171
1172 if (bufptr - buffer + tlen > buffer_allocated)
1173 {
1174 unsigned int bytes;
1175
1176 bytes = bufptr - buffer;
1177 buffer_allocated *= 2;
1178 buffer = xrealloc (buffer, buffer_allocated);
1179 bufptr = buffer + bytes;
1180 }
1181
1182 for (i = 0; i < tlen; i++)
1183 {
1184 *bufptr++ = buf[i + offset];
1185 if (buf[i + offset] == '\000')
1186 {
1187 nbytes_read += i + 1;
1188 goto done;
1189 }
1190 }
1191
1192 memaddr += tlen;
1193 len -= tlen;
1194 nbytes_read += tlen;
1195 }
1196 done:
1197 *string = buffer;
1198 if (errnop != NULL)
1199 *errnop = errcode;
1200 return nbytes_read;
1201 }
1202
1203 struct target_section_table *
1204 target_get_section_table (struct target_ops *target)
1205 {
1206 struct target_ops *t;
1207
1208 if (targetdebug)
1209 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1210
1211 for (t = target; t != NULL; t = t->beneath)
1212 if (t->to_get_section_table != NULL)
1213 return (*t->to_get_section_table) (t);
1214
1215 return NULL;
1216 }
1217
1218 /* Find a section containing ADDR. */
1219
1220 struct target_section *
1221 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1222 {
1223 struct target_section_table *table = target_get_section_table (target);
1224 struct target_section *secp;
1225
1226 if (table == NULL)
1227 return NULL;
1228
1229 for (secp = table->sections; secp < table->sections_end; secp++)
1230 {
1231 if (addr >= secp->addr && addr < secp->endaddr)
1232 return secp;
1233 }
1234 return NULL;
1235 }
1236
1237 /* Read memory from the live target, even if currently inspecting a
1238 traceframe. The return is the same as that of target_read. */
1239
1240 static enum target_xfer_status
1241 target_read_live_memory (enum target_object object,
1242 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1243 ULONGEST *xfered_len)
1244 {
1245 enum target_xfer_status ret;
1246 struct cleanup *cleanup;
1247
1248 /* Switch momentarily out of tfind mode so to access live memory.
1249 Note that this must not clear global state, such as the frame
1250 cache, which must still remain valid for the previous traceframe.
1251 We may be _building_ the frame cache at this point. */
1252 cleanup = make_cleanup_restore_traceframe_number ();
1253 set_traceframe_number (-1);
1254
1255 ret = target_xfer_partial (current_target.beneath, object, NULL,
1256 myaddr, NULL, memaddr, len, xfered_len);
1257
1258 do_cleanups (cleanup);
1259 return ret;
1260 }
1261
1262 /* Using the set of read-only target sections of OPS, read live
1263 read-only memory. Note that the actual reads start from the
1264 top-most target again.
1265
1266 For interface/parameters/return description see target.h,
1267 to_xfer_partial. */
1268
1269 static enum target_xfer_status
1270 memory_xfer_live_readonly_partial (struct target_ops *ops,
1271 enum target_object object,
1272 gdb_byte *readbuf, ULONGEST memaddr,
1273 ULONGEST len, ULONGEST *xfered_len)
1274 {
1275 struct target_section *secp;
1276 struct target_section_table *table;
1277
1278 secp = target_section_by_addr (ops, memaddr);
1279 if (secp != NULL
1280 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1281 secp->the_bfd_section)
1282 & SEC_READONLY))
1283 {
1284 struct target_section *p;
1285 ULONGEST memend = memaddr + len;
1286
1287 table = target_get_section_table (ops);
1288
1289 for (p = table->sections; p < table->sections_end; p++)
1290 {
1291 if (memaddr >= p->addr)
1292 {
1293 if (memend <= p->endaddr)
1294 {
1295 /* Entire transfer is within this section. */
1296 return target_read_live_memory (object, memaddr,
1297 readbuf, len, xfered_len);
1298 }
1299 else if (memaddr >= p->endaddr)
1300 {
1301 /* This section ends before the transfer starts. */
1302 continue;
1303 }
1304 else
1305 {
1306 /* This section overlaps the transfer. Just do half. */
1307 len = p->endaddr - memaddr;
1308 return target_read_live_memory (object, memaddr,
1309 readbuf, len, xfered_len);
1310 }
1311 }
1312 }
1313 }
1314
1315 return TARGET_XFER_EOF;
1316 }
1317
1318 /* Read memory from more than one valid target. A core file, for
1319 instance, could have some of memory but delegate other bits to
1320 the target below it. So, we must manually try all targets. */
1321
1322 static enum target_xfer_status
1323 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1324 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1325 ULONGEST *xfered_len)
1326 {
1327 enum target_xfer_status res;
1328
1329 do
1330 {
1331 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1332 readbuf, writebuf, memaddr, len,
1333 xfered_len);
1334 if (res == TARGET_XFER_OK)
1335 break;
1336
1337 /* Stop if the target reports that the memory is not available. */
1338 if (res == TARGET_XFER_E_UNAVAILABLE)
1339 break;
1340
1341 /* We want to continue past core files to executables, but not
1342 past a running target's memory. */
1343 if (ops->to_has_all_memory (ops))
1344 break;
1345
1346 ops = ops->beneath;
1347 }
1348 while (ops != NULL);
1349
1350 return res;
1351 }
1352
1353 /* Perform a partial memory transfer.
1354 For docs see target.h, to_xfer_partial. */
1355
1356 static enum target_xfer_status
1357 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1358 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1359 ULONGEST len, ULONGEST *xfered_len)
1360 {
1361 enum target_xfer_status res;
1362 int reg_len;
1363 struct mem_region *region;
1364 struct inferior *inf;
1365
1366 /* For accesses to unmapped overlay sections, read directly from
1367 files. Must do this first, as MEMADDR may need adjustment. */
1368 if (readbuf != NULL && overlay_debugging)
1369 {
1370 struct obj_section *section = find_pc_overlay (memaddr);
1371
1372 if (pc_in_unmapped_range (memaddr, section))
1373 {
1374 struct target_section_table *table
1375 = target_get_section_table (ops);
1376 const char *section_name = section->the_bfd_section->name;
1377
1378 memaddr = overlay_mapped_address (memaddr, section);
1379 return section_table_xfer_memory_partial (readbuf, writebuf,
1380 memaddr, len, xfered_len,
1381 table->sections,
1382 table->sections_end,
1383 section_name);
1384 }
1385 }
1386
1387 /* Try the executable files, if "trust-readonly-sections" is set. */
1388 if (readbuf != NULL && trust_readonly)
1389 {
1390 struct target_section *secp;
1391 struct target_section_table *table;
1392
1393 secp = target_section_by_addr (ops, memaddr);
1394 if (secp != NULL
1395 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1396 secp->the_bfd_section)
1397 & SEC_READONLY))
1398 {
1399 table = target_get_section_table (ops);
1400 return section_table_xfer_memory_partial (readbuf, writebuf,
1401 memaddr, len, xfered_len,
1402 table->sections,
1403 table->sections_end,
1404 NULL);
1405 }
1406 }
1407
1408 /* If reading unavailable memory in the context of traceframes, and
1409 this address falls within a read-only section, fallback to
1410 reading from live memory. */
1411 if (readbuf != NULL && get_traceframe_number () != -1)
1412 {
1413 VEC(mem_range_s) *available;
1414
1415 /* If we fail to get the set of available memory, then the
1416 target does not support querying traceframe info, and so we
1417 attempt reading from the traceframe anyway (assuming the
1418 target implements the old QTro packet then). */
1419 if (traceframe_available_memory (&available, memaddr, len))
1420 {
1421 struct cleanup *old_chain;
1422
1423 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1424
1425 if (VEC_empty (mem_range_s, available)
1426 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1427 {
1428 /* Don't read into the traceframe's available
1429 memory. */
1430 if (!VEC_empty (mem_range_s, available))
1431 {
1432 LONGEST oldlen = len;
1433
1434 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1435 gdb_assert (len <= oldlen);
1436 }
1437
1438 do_cleanups (old_chain);
1439
1440 /* This goes through the topmost target again. */
1441 res = memory_xfer_live_readonly_partial (ops, object,
1442 readbuf, memaddr,
1443 len, xfered_len);
1444 if (res == TARGET_XFER_OK)
1445 return TARGET_XFER_OK;
1446 else
1447 {
1448 /* No use trying further, we know some memory starting
1449 at MEMADDR isn't available. */
1450 *xfered_len = len;
1451 return TARGET_XFER_E_UNAVAILABLE;
1452 }
1453 }
1454
1455 /* Don't try to read more than how much is available, in
1456 case the target implements the deprecated QTro packet to
1457 cater for older GDBs (the target's knowledge of read-only
1458 sections may be outdated by now). */
1459 len = VEC_index (mem_range_s, available, 0)->length;
1460
1461 do_cleanups (old_chain);
1462 }
1463 }
1464
1465 /* Try GDB's internal data cache. */
1466 region = lookup_mem_region (memaddr);
1467 /* region->hi == 0 means there's no upper bound. */
1468 if (memaddr + len < region->hi || region->hi == 0)
1469 reg_len = len;
1470 else
1471 reg_len = region->hi - memaddr;
1472
1473 switch (region->attrib.mode)
1474 {
1475 case MEM_RO:
1476 if (writebuf != NULL)
1477 return TARGET_XFER_E_IO;
1478 break;
1479
1480 case MEM_WO:
1481 if (readbuf != NULL)
1482 return TARGET_XFER_E_IO;
1483 break;
1484
1485 case MEM_FLASH:
1486 /* We only support writing to flash during "load" for now. */
1487 if (writebuf != NULL)
1488 error (_("Writing to flash memory forbidden in this context"));
1489 break;
1490
1491 case MEM_NONE:
1492 return TARGET_XFER_E_IO;
1493 }
1494
1495 if (!ptid_equal (inferior_ptid, null_ptid))
1496 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1497 else
1498 inf = NULL;
1499
1500 if (inf != NULL
1501 /* The dcache reads whole cache lines; that doesn't play well
1502 with reading from a trace buffer, because reading outside of
1503 the collected memory range fails. */
1504 && get_traceframe_number () == -1
1505 && (region->attrib.cache
1506 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1507 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1508 {
1509 DCACHE *dcache = target_dcache_get_or_init ();
1510 int l;
1511
1512 if (readbuf != NULL)
1513 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1514 else
1515 /* FIXME drow/2006-08-09: If we're going to preserve const
1516 correctness dcache_xfer_memory should take readbuf and
1517 writebuf. */
1518 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1519 reg_len, 1);
1520 if (l <= 0)
1521 return TARGET_XFER_E_IO;
1522 else
1523 {
1524 *xfered_len = (ULONGEST) l;
1525 return TARGET_XFER_OK;
1526 }
1527 }
1528
1529 /* If none of those methods found the memory we wanted, fall back
1530 to a target partial transfer. Normally a single call to
1531 to_xfer_partial is enough; if it doesn't recognize an object
1532 it will call the to_xfer_partial of the next target down.
1533 But for memory this won't do. Memory is the only target
1534 object which can be read from more than one valid target.
1535 A core file, for instance, could have some of memory but
1536 delegate other bits to the target below it. So, we must
1537 manually try all targets. */
1538
1539 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1540 xfered_len);
1541
1542 /* Make sure the cache gets updated no matter what - if we are writing
1543 to the stack. Even if this write is not tagged as such, we still need
1544 to update the cache. */
1545
1546 if (res == TARGET_XFER_OK
1547 && inf != NULL
1548 && writebuf != NULL
1549 && target_dcache_init_p ()
1550 && !region->attrib.cache
1551 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1552 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1553 {
1554 DCACHE *dcache = target_dcache_get ();
1555
1556 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1557 }
1558
1559 /* If we still haven't got anything, return the last error. We
1560 give up. */
1561 return res;
1562 }
1563
1564 /* Perform a partial memory transfer. For docs see target.h,
1565 to_xfer_partial. */
1566
1567 static enum target_xfer_status
1568 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1569 gdb_byte *readbuf, const gdb_byte *writebuf,
1570 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1571 {
1572 enum target_xfer_status res;
1573
1574 /* Zero length requests are ok and require no work. */
1575 if (len == 0)
1576 return TARGET_XFER_EOF;
1577
1578 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1579 breakpoint insns, thus hiding out from higher layers whether
1580 there are software breakpoints inserted in the code stream. */
1581 if (readbuf != NULL)
1582 {
1583 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1584 xfered_len);
1585
1586 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1587 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1588 }
1589 else
1590 {
1591 void *buf;
1592 struct cleanup *old_chain;
1593
1594 /* A large write request is likely to be partially satisfied
1595 by memory_xfer_partial_1. We will continually malloc
1596 and free a copy of the entire write request for breakpoint
1597 shadow handling even though we only end up writing a small
1598 subset of it. Cap writes to 4KB to mitigate this. */
1599 len = min (4096, len);
1600
1601 buf = xmalloc (len);
1602 old_chain = make_cleanup (xfree, buf);
1603 memcpy (buf, writebuf, len);
1604
1605 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1606 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1607 xfered_len);
1608
1609 do_cleanups (old_chain);
1610 }
1611
1612 return res;
1613 }
1614
1615 static void
1616 restore_show_memory_breakpoints (void *arg)
1617 {
1618 show_memory_breakpoints = (uintptr_t) arg;
1619 }
1620
1621 struct cleanup *
1622 make_show_memory_breakpoints_cleanup (int show)
1623 {
1624 int current = show_memory_breakpoints;
1625
1626 show_memory_breakpoints = show;
1627 return make_cleanup (restore_show_memory_breakpoints,
1628 (void *) (uintptr_t) current);
1629 }
1630
1631 /* For docs see target.h, to_xfer_partial. */
1632
1633 enum target_xfer_status
1634 target_xfer_partial (struct target_ops *ops,
1635 enum target_object object, const char *annex,
1636 gdb_byte *readbuf, const gdb_byte *writebuf,
1637 ULONGEST offset, ULONGEST len,
1638 ULONGEST *xfered_len)
1639 {
1640 enum target_xfer_status retval;
1641
1642 gdb_assert (ops->to_xfer_partial != NULL);
1643
1644 /* Transfer is done when LEN is zero. */
1645 if (len == 0)
1646 return TARGET_XFER_EOF;
1647
1648 if (writebuf && !may_write_memory)
1649 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1650 core_addr_to_string_nz (offset), plongest (len));
1651
1652 *xfered_len = 0;
1653
1654 /* If this is a memory transfer, let the memory-specific code
1655 have a look at it instead. Memory transfers are more
1656 complicated. */
1657 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1658 || object == TARGET_OBJECT_CODE_MEMORY)
1659 retval = memory_xfer_partial (ops, object, readbuf,
1660 writebuf, offset, len, xfered_len);
1661 else if (object == TARGET_OBJECT_RAW_MEMORY)
1662 {
1663 /* Request the normal memory object from other layers. */
1664 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1665 xfered_len);
1666 }
1667 else
1668 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1669 writebuf, offset, len, xfered_len);
1670
1671 if (targetdebug)
1672 {
1673 const unsigned char *myaddr = NULL;
1674
1675 fprintf_unfiltered (gdb_stdlog,
1676 "%s:target_xfer_partial "
1677 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1678 ops->to_shortname,
1679 (int) object,
1680 (annex ? annex : "(null)"),
1681 host_address_to_string (readbuf),
1682 host_address_to_string (writebuf),
1683 core_addr_to_string_nz (offset),
1684 pulongest (len), retval,
1685 pulongest (*xfered_len));
1686
1687 if (readbuf)
1688 myaddr = readbuf;
1689 if (writebuf)
1690 myaddr = writebuf;
1691 if (retval == TARGET_XFER_OK && myaddr != NULL)
1692 {
1693 int i;
1694
1695 fputs_unfiltered (", bytes =", gdb_stdlog);
1696 for (i = 0; i < *xfered_len; i++)
1697 {
1698 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1699 {
1700 if (targetdebug < 2 && i > 0)
1701 {
1702 fprintf_unfiltered (gdb_stdlog, " ...");
1703 break;
1704 }
1705 fprintf_unfiltered (gdb_stdlog, "\n");
1706 }
1707
1708 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1709 }
1710 }
1711
1712 fputc_unfiltered ('\n', gdb_stdlog);
1713 }
1714
1715 /* Check implementations of to_xfer_partial update *XFERED_LEN
1716 properly. Do assertion after printing debug messages, so that we
1717 can find more clues on assertion failure from debugging messages. */
1718 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1719 gdb_assert (*xfered_len > 0);
1720
1721 return retval;
1722 }
1723
1724 /* Read LEN bytes of target memory at address MEMADDR, placing the
1725 results in GDB's memory at MYADDR. Returns either 0 for success or
1726 TARGET_XFER_E_IO if any error occurs.
1727
1728 If an error occurs, no guarantee is made about the contents of the data at
1729 MYADDR. In particular, the caller should not depend upon partial reads
1730 filling the buffer with good data. There is no way for the caller to know
1731 how much good data might have been transfered anyway. Callers that can
1732 deal with partial reads should call target_read (which will retry until
1733 it makes no progress, and then return how much was transferred). */
1734
1735 int
1736 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1737 {
1738 /* Dispatch to the topmost target, not the flattened current_target.
1739 Memory accesses check target->to_has_(all_)memory, and the
1740 flattened target doesn't inherit those. */
1741 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1742 myaddr, memaddr, len) == len)
1743 return 0;
1744 else
1745 return TARGET_XFER_E_IO;
1746 }
1747
1748 /* Like target_read_memory, but specify explicitly that this is a read
1749 from the target's raw memory. That is, this read bypasses the
1750 dcache, breakpoint shadowing, etc. */
1751
1752 int
1753 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1754 {
1755 /* See comment in target_read_memory about why the request starts at
1756 current_target.beneath. */
1757 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1758 myaddr, memaddr, len) == len)
1759 return 0;
1760 else
1761 return TARGET_XFER_E_IO;
1762 }
1763
1764 /* Like target_read_memory, but specify explicitly that this is a read from
1765 the target's stack. This may trigger different cache behavior. */
1766
1767 int
1768 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1769 {
1770 /* See comment in target_read_memory about why the request starts at
1771 current_target.beneath. */
1772 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1773 myaddr, memaddr, len) == len)
1774 return 0;
1775 else
1776 return TARGET_XFER_E_IO;
1777 }
1778
1779 /* Like target_read_memory, but specify explicitly that this is a read from
1780 the target's code. This may trigger different cache behavior. */
1781
1782 int
1783 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1784 {
1785 /* See comment in target_read_memory about why the request starts at
1786 current_target.beneath. */
1787 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1788 myaddr, memaddr, len) == len)
1789 return 0;
1790 else
1791 return TARGET_XFER_E_IO;
1792 }
1793
1794 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1795 Returns either 0 for success or TARGET_XFER_E_IO if any
1796 error occurs. If an error occurs, no guarantee is made about how
1797 much data got written. Callers that can deal with partial writes
1798 should call target_write. */
1799
1800 int
1801 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1802 {
1803 /* See comment in target_read_memory about why the request starts at
1804 current_target.beneath. */
1805 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1806 myaddr, memaddr, len) == len)
1807 return 0;
1808 else
1809 return TARGET_XFER_E_IO;
1810 }
1811
1812 /* Write LEN bytes from MYADDR to target raw memory at address
1813 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1814 if any error occurs. If an error occurs, no guarantee is made
1815 about how much data got written. Callers that can deal with
1816 partial writes should call target_write. */
1817
1818 int
1819 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1820 {
1821 /* See comment in target_read_memory about why the request starts at
1822 current_target.beneath. */
1823 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1824 myaddr, memaddr, len) == len)
1825 return 0;
1826 else
1827 return TARGET_XFER_E_IO;
1828 }
1829
1830 /* Fetch the target's memory map. */
1831
1832 VEC(mem_region_s) *
1833 target_memory_map (void)
1834 {
1835 VEC(mem_region_s) *result;
1836 struct mem_region *last_one, *this_one;
1837 int ix;
1838 struct target_ops *t;
1839
1840 if (targetdebug)
1841 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1842
1843 for (t = current_target.beneath; t != NULL; t = t->beneath)
1844 if (t->to_memory_map != NULL)
1845 break;
1846
1847 if (t == NULL)
1848 return NULL;
1849
1850 result = t->to_memory_map (t);
1851 if (result == NULL)
1852 return NULL;
1853
1854 qsort (VEC_address (mem_region_s, result),
1855 VEC_length (mem_region_s, result),
1856 sizeof (struct mem_region), mem_region_cmp);
1857
1858 /* Check that regions do not overlap. Simultaneously assign
1859 a numbering for the "mem" commands to use to refer to
1860 each region. */
1861 last_one = NULL;
1862 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1863 {
1864 this_one->number = ix;
1865
1866 if (last_one && last_one->hi > this_one->lo)
1867 {
1868 warning (_("Overlapping regions in memory map: ignoring"));
1869 VEC_free (mem_region_s, result);
1870 return NULL;
1871 }
1872 last_one = this_one;
1873 }
1874
1875 return result;
1876 }
1877
1878 void
1879 target_flash_erase (ULONGEST address, LONGEST length)
1880 {
1881 struct target_ops *t;
1882
1883 for (t = current_target.beneath; t != NULL; t = t->beneath)
1884 if (t->to_flash_erase != NULL)
1885 {
1886 if (targetdebug)
1887 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1888 hex_string (address), phex (length, 0));
1889 t->to_flash_erase (t, address, length);
1890 return;
1891 }
1892
1893 tcomplain ();
1894 }
1895
1896 void
1897 target_flash_done (void)
1898 {
1899 struct target_ops *t;
1900
1901 for (t = current_target.beneath; t != NULL; t = t->beneath)
1902 if (t->to_flash_done != NULL)
1903 {
1904 if (targetdebug)
1905 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1906 t->to_flash_done (t);
1907 return;
1908 }
1909
1910 tcomplain ();
1911 }
1912
1913 static void
1914 show_trust_readonly (struct ui_file *file, int from_tty,
1915 struct cmd_list_element *c, const char *value)
1916 {
1917 fprintf_filtered (file,
1918 _("Mode for reading from readonly sections is %s.\n"),
1919 value);
1920 }
1921
1922 /* More generic transfers. */
1923
1924 static enum target_xfer_status
1925 default_xfer_partial (struct target_ops *ops, enum target_object object,
1926 const char *annex, gdb_byte *readbuf,
1927 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
1928 ULONGEST *xfered_len)
1929 {
1930 if (object == TARGET_OBJECT_MEMORY
1931 && ops->deprecated_xfer_memory != NULL)
1932 /* If available, fall back to the target's
1933 "deprecated_xfer_memory" method. */
1934 {
1935 int xfered = -1;
1936
1937 errno = 0;
1938 if (writebuf != NULL)
1939 {
1940 void *buffer = xmalloc (len);
1941 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1942
1943 memcpy (buffer, writebuf, len);
1944 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1945 1/*write*/, NULL, ops);
1946 do_cleanups (cleanup);
1947 }
1948 if (readbuf != NULL)
1949 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1950 0/*read*/, NULL, ops);
1951 if (xfered > 0)
1952 {
1953 *xfered_len = (ULONGEST) xfered;
1954 return TARGET_XFER_E_IO;
1955 }
1956 else if (xfered == 0 && errno == 0)
1957 /* "deprecated_xfer_memory" uses 0, cross checked against
1958 ERRNO as one indication of an error. */
1959 return TARGET_XFER_EOF;
1960 else
1961 return TARGET_XFER_E_IO;
1962 }
1963 else
1964 {
1965 gdb_assert (ops->beneath != NULL);
1966 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1967 readbuf, writebuf, offset, len,
1968 xfered_len);
1969 }
1970 }
1971
1972 /* Target vector read/write partial wrapper functions. */
1973
1974 static enum target_xfer_status
1975 target_read_partial (struct target_ops *ops,
1976 enum target_object object,
1977 const char *annex, gdb_byte *buf,
1978 ULONGEST offset, ULONGEST len,
1979 ULONGEST *xfered_len)
1980 {
1981 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1982 xfered_len);
1983 }
1984
1985 static enum target_xfer_status
1986 target_write_partial (struct target_ops *ops,
1987 enum target_object object,
1988 const char *annex, const gdb_byte *buf,
1989 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1990 {
1991 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1992 xfered_len);
1993 }
1994
1995 /* Wrappers to perform the full transfer. */
1996
1997 /* For docs on target_read see target.h. */
1998
1999 LONGEST
2000 target_read (struct target_ops *ops,
2001 enum target_object object,
2002 const char *annex, gdb_byte *buf,
2003 ULONGEST offset, LONGEST len)
2004 {
2005 LONGEST xfered = 0;
2006
2007 while (xfered < len)
2008 {
2009 ULONGEST xfered_len;
2010 enum target_xfer_status status;
2011
2012 status = target_read_partial (ops, object, annex,
2013 (gdb_byte *) buf + xfered,
2014 offset + xfered, len - xfered,
2015 &xfered_len);
2016
2017 /* Call an observer, notifying them of the xfer progress? */
2018 if (status == TARGET_XFER_EOF)
2019 return xfered;
2020 else if (status == TARGET_XFER_OK)
2021 {
2022 xfered += xfered_len;
2023 QUIT;
2024 }
2025 else
2026 return -1;
2027
2028 }
2029 return len;
2030 }
2031
2032 /* Assuming that the entire [begin, end) range of memory cannot be
2033 read, try to read whatever subrange is possible to read.
2034
2035 The function returns, in RESULT, either zero or one memory block.
2036 If there's a readable subrange at the beginning, it is completely
2037 read and returned. Any further readable subrange will not be read.
2038 Otherwise, if there's a readable subrange at the end, it will be
2039 completely read and returned. Any readable subranges before it
2040 (obviously, not starting at the beginning), will be ignored. In
2041 other cases -- either no readable subrange, or readable subrange(s)
2042 that is neither at the beginning, or end, nothing is returned.
2043
2044 The purpose of this function is to handle a read across a boundary
2045 of accessible memory in a case when memory map is not available.
2046 The above restrictions are fine for this case, but will give
2047 incorrect results if the memory is 'patchy'. However, supporting
2048 'patchy' memory would require trying to read every single byte,
2049 and it seems unacceptable solution. Explicit memory map is
2050 recommended for this case -- and target_read_memory_robust will
2051 take care of reading multiple ranges then. */
2052
2053 static void
2054 read_whatever_is_readable (struct target_ops *ops,
2055 ULONGEST begin, ULONGEST end,
2056 VEC(memory_read_result_s) **result)
2057 {
2058 gdb_byte *buf = xmalloc (end - begin);
2059 ULONGEST current_begin = begin;
2060 ULONGEST current_end = end;
2061 int forward;
2062 memory_read_result_s r;
2063 ULONGEST xfered_len;
2064
2065 /* If we previously failed to read 1 byte, nothing can be done here. */
2066 if (end - begin <= 1)
2067 {
2068 xfree (buf);
2069 return;
2070 }
2071
2072 /* Check that either first or the last byte is readable, and give up
2073 if not. This heuristic is meant to permit reading accessible memory
2074 at the boundary of accessible region. */
2075 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2076 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
2077 {
2078 forward = 1;
2079 ++current_begin;
2080 }
2081 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2082 buf + (end-begin) - 1, end - 1, 1,
2083 &xfered_len) == TARGET_XFER_OK)
2084 {
2085 forward = 0;
2086 --current_end;
2087 }
2088 else
2089 {
2090 xfree (buf);
2091 return;
2092 }
2093
2094 /* Loop invariant is that the [current_begin, current_end) was previously
2095 found to be not readable as a whole.
2096
2097 Note loop condition -- if the range has 1 byte, we can't divide the range
2098 so there's no point trying further. */
2099 while (current_end - current_begin > 1)
2100 {
2101 ULONGEST first_half_begin, first_half_end;
2102 ULONGEST second_half_begin, second_half_end;
2103 LONGEST xfer;
2104 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2105
2106 if (forward)
2107 {
2108 first_half_begin = current_begin;
2109 first_half_end = middle;
2110 second_half_begin = middle;
2111 second_half_end = current_end;
2112 }
2113 else
2114 {
2115 first_half_begin = middle;
2116 first_half_end = current_end;
2117 second_half_begin = current_begin;
2118 second_half_end = middle;
2119 }
2120
2121 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2122 buf + (first_half_begin - begin),
2123 first_half_begin,
2124 first_half_end - first_half_begin);
2125
2126 if (xfer == first_half_end - first_half_begin)
2127 {
2128 /* This half reads up fine. So, the error must be in the
2129 other half. */
2130 current_begin = second_half_begin;
2131 current_end = second_half_end;
2132 }
2133 else
2134 {
2135 /* This half is not readable. Because we've tried one byte, we
2136 know some part of this half if actually redable. Go to the next
2137 iteration to divide again and try to read.
2138
2139 We don't handle the other half, because this function only tries
2140 to read a single readable subrange. */
2141 current_begin = first_half_begin;
2142 current_end = first_half_end;
2143 }
2144 }
2145
2146 if (forward)
2147 {
2148 /* The [begin, current_begin) range has been read. */
2149 r.begin = begin;
2150 r.end = current_begin;
2151 r.data = buf;
2152 }
2153 else
2154 {
2155 /* The [current_end, end) range has been read. */
2156 LONGEST rlen = end - current_end;
2157
2158 r.data = xmalloc (rlen);
2159 memcpy (r.data, buf + current_end - begin, rlen);
2160 r.begin = current_end;
2161 r.end = end;
2162 xfree (buf);
2163 }
2164 VEC_safe_push(memory_read_result_s, (*result), &r);
2165 }
2166
2167 void
2168 free_memory_read_result_vector (void *x)
2169 {
2170 VEC(memory_read_result_s) *v = x;
2171 memory_read_result_s *current;
2172 int ix;
2173
2174 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2175 {
2176 xfree (current->data);
2177 }
2178 VEC_free (memory_read_result_s, v);
2179 }
2180
2181 VEC(memory_read_result_s) *
2182 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2183 {
2184 VEC(memory_read_result_s) *result = 0;
2185
2186 LONGEST xfered = 0;
2187 while (xfered < len)
2188 {
2189 struct mem_region *region = lookup_mem_region (offset + xfered);
2190 LONGEST rlen;
2191
2192 /* If there is no explicit region, a fake one should be created. */
2193 gdb_assert (region);
2194
2195 if (region->hi == 0)
2196 rlen = len - xfered;
2197 else
2198 rlen = region->hi - offset;
2199
2200 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2201 {
2202 /* Cannot read this region. Note that we can end up here only
2203 if the region is explicitly marked inaccessible, or
2204 'inaccessible-by-default' is in effect. */
2205 xfered += rlen;
2206 }
2207 else
2208 {
2209 LONGEST to_read = min (len - xfered, rlen);
2210 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2211
2212 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2213 (gdb_byte *) buffer,
2214 offset + xfered, to_read);
2215 /* Call an observer, notifying them of the xfer progress? */
2216 if (xfer <= 0)
2217 {
2218 /* Got an error reading full chunk. See if maybe we can read
2219 some subrange. */
2220 xfree (buffer);
2221 read_whatever_is_readable (ops, offset + xfered,
2222 offset + xfered + to_read, &result);
2223 xfered += to_read;
2224 }
2225 else
2226 {
2227 struct memory_read_result r;
2228 r.data = buffer;
2229 r.begin = offset + xfered;
2230 r.end = r.begin + xfer;
2231 VEC_safe_push (memory_read_result_s, result, &r);
2232 xfered += xfer;
2233 }
2234 QUIT;
2235 }
2236 }
2237 return result;
2238 }
2239
2240
2241 /* An alternative to target_write with progress callbacks. */
2242
2243 LONGEST
2244 target_write_with_progress (struct target_ops *ops,
2245 enum target_object object,
2246 const char *annex, const gdb_byte *buf,
2247 ULONGEST offset, LONGEST len,
2248 void (*progress) (ULONGEST, void *), void *baton)
2249 {
2250 LONGEST xfered = 0;
2251
2252 /* Give the progress callback a chance to set up. */
2253 if (progress)
2254 (*progress) (0, baton);
2255
2256 while (xfered < len)
2257 {
2258 ULONGEST xfered_len;
2259 enum target_xfer_status status;
2260
2261 status = target_write_partial (ops, object, annex,
2262 (gdb_byte *) buf + xfered,
2263 offset + xfered, len - xfered,
2264 &xfered_len);
2265
2266 if (status == TARGET_XFER_EOF)
2267 return xfered;
2268 if (TARGET_XFER_STATUS_ERROR_P (status))
2269 return -1;
2270
2271 gdb_assert (status == TARGET_XFER_OK);
2272 if (progress)
2273 (*progress) (xfered_len, baton);
2274
2275 xfered += xfered_len;
2276 QUIT;
2277 }
2278 return len;
2279 }
2280
2281 /* For docs on target_write see target.h. */
2282
2283 LONGEST
2284 target_write (struct target_ops *ops,
2285 enum target_object object,
2286 const char *annex, const gdb_byte *buf,
2287 ULONGEST offset, LONGEST len)
2288 {
2289 return target_write_with_progress (ops, object, annex, buf, offset, len,
2290 NULL, NULL);
2291 }
2292
2293 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2294 the size of the transferred data. PADDING additional bytes are
2295 available in *BUF_P. This is a helper function for
2296 target_read_alloc; see the declaration of that function for more
2297 information. */
2298
2299 static LONGEST
2300 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2301 const char *annex, gdb_byte **buf_p, int padding)
2302 {
2303 size_t buf_alloc, buf_pos;
2304 gdb_byte *buf;
2305
2306 /* This function does not have a length parameter; it reads the
2307 entire OBJECT). Also, it doesn't support objects fetched partly
2308 from one target and partly from another (in a different stratum,
2309 e.g. a core file and an executable). Both reasons make it
2310 unsuitable for reading memory. */
2311 gdb_assert (object != TARGET_OBJECT_MEMORY);
2312
2313 /* Start by reading up to 4K at a time. The target will throttle
2314 this number down if necessary. */
2315 buf_alloc = 4096;
2316 buf = xmalloc (buf_alloc);
2317 buf_pos = 0;
2318 while (1)
2319 {
2320 ULONGEST xfered_len;
2321 enum target_xfer_status status;
2322
2323 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2324 buf_pos, buf_alloc - buf_pos - padding,
2325 &xfered_len);
2326
2327 if (status == TARGET_XFER_EOF)
2328 {
2329 /* Read all there was. */
2330 if (buf_pos == 0)
2331 xfree (buf);
2332 else
2333 *buf_p = buf;
2334 return buf_pos;
2335 }
2336 else if (status != TARGET_XFER_OK)
2337 {
2338 /* An error occurred. */
2339 xfree (buf);
2340 return TARGET_XFER_E_IO;
2341 }
2342
2343 buf_pos += xfered_len;
2344
2345 /* If the buffer is filling up, expand it. */
2346 if (buf_alloc < buf_pos * 2)
2347 {
2348 buf_alloc *= 2;
2349 buf = xrealloc (buf, buf_alloc);
2350 }
2351
2352 QUIT;
2353 }
2354 }
2355
2356 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2357 the size of the transferred data. See the declaration in "target.h"
2358 function for more information about the return value. */
2359
2360 LONGEST
2361 target_read_alloc (struct target_ops *ops, enum target_object object,
2362 const char *annex, gdb_byte **buf_p)
2363 {
2364 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2365 }
2366
2367 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2368 returned as a string, allocated using xmalloc. If an error occurs
2369 or the transfer is unsupported, NULL is returned. Empty objects
2370 are returned as allocated but empty strings. A warning is issued
2371 if the result contains any embedded NUL bytes. */
2372
2373 char *
2374 target_read_stralloc (struct target_ops *ops, enum target_object object,
2375 const char *annex)
2376 {
2377 gdb_byte *buffer;
2378 char *bufstr;
2379 LONGEST i, transferred;
2380
2381 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2382 bufstr = (char *) buffer;
2383
2384 if (transferred < 0)
2385 return NULL;
2386
2387 if (transferred == 0)
2388 return xstrdup ("");
2389
2390 bufstr[transferred] = 0;
2391
2392 /* Check for embedded NUL bytes; but allow trailing NULs. */
2393 for (i = strlen (bufstr); i < transferred; i++)
2394 if (bufstr[i] != 0)
2395 {
2396 warning (_("target object %d, annex %s, "
2397 "contained unexpected null characters"),
2398 (int) object, annex ? annex : "(none)");
2399 break;
2400 }
2401
2402 return bufstr;
2403 }
2404
2405 /* Memory transfer methods. */
2406
2407 void
2408 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2409 LONGEST len)
2410 {
2411 /* This method is used to read from an alternate, non-current
2412 target. This read must bypass the overlay support (as symbols
2413 don't match this target), and GDB's internal cache (wrong cache
2414 for this target). */
2415 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2416 != len)
2417 memory_error (TARGET_XFER_E_IO, addr);
2418 }
2419
2420 ULONGEST
2421 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2422 int len, enum bfd_endian byte_order)
2423 {
2424 gdb_byte buf[sizeof (ULONGEST)];
2425
2426 gdb_assert (len <= sizeof (buf));
2427 get_target_memory (ops, addr, buf, len);
2428 return extract_unsigned_integer (buf, len, byte_order);
2429 }
2430
2431 /* See target.h. */
2432
2433 int
2434 target_insert_breakpoint (struct gdbarch *gdbarch,
2435 struct bp_target_info *bp_tgt)
2436 {
2437 if (!may_insert_breakpoints)
2438 {
2439 warning (_("May not insert breakpoints"));
2440 return 1;
2441 }
2442
2443 return current_target.to_insert_breakpoint (&current_target,
2444 gdbarch, bp_tgt);
2445 }
2446
2447 /* See target.h. */
2448
2449 int
2450 target_remove_breakpoint (struct gdbarch *gdbarch,
2451 struct bp_target_info *bp_tgt)
2452 {
2453 /* This is kind of a weird case to handle, but the permission might
2454 have been changed after breakpoints were inserted - in which case
2455 we should just take the user literally and assume that any
2456 breakpoints should be left in place. */
2457 if (!may_insert_breakpoints)
2458 {
2459 warning (_("May not remove breakpoints"));
2460 return 1;
2461 }
2462
2463 return current_target.to_remove_breakpoint (&current_target,
2464 gdbarch, bp_tgt);
2465 }
2466
2467 static void
2468 target_info (char *args, int from_tty)
2469 {
2470 struct target_ops *t;
2471 int has_all_mem = 0;
2472
2473 if (symfile_objfile != NULL)
2474 printf_unfiltered (_("Symbols from \"%s\".\n"),
2475 objfile_name (symfile_objfile));
2476
2477 for (t = target_stack; t != NULL; t = t->beneath)
2478 {
2479 if (!(*t->to_has_memory) (t))
2480 continue;
2481
2482 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2483 continue;
2484 if (has_all_mem)
2485 printf_unfiltered (_("\tWhile running this, "
2486 "GDB does not access memory from...\n"));
2487 printf_unfiltered ("%s:\n", t->to_longname);
2488 (t->to_files_info) (t);
2489 has_all_mem = (*t->to_has_all_memory) (t);
2490 }
2491 }
2492
2493 /* This function is called before any new inferior is created, e.g.
2494 by running a program, attaching, or connecting to a target.
2495 It cleans up any state from previous invocations which might
2496 change between runs. This is a subset of what target_preopen
2497 resets (things which might change between targets). */
2498
2499 void
2500 target_pre_inferior (int from_tty)
2501 {
2502 /* Clear out solib state. Otherwise the solib state of the previous
2503 inferior might have survived and is entirely wrong for the new
2504 target. This has been observed on GNU/Linux using glibc 2.3. How
2505 to reproduce:
2506
2507 bash$ ./foo&
2508 [1] 4711
2509 bash$ ./foo&
2510 [1] 4712
2511 bash$ gdb ./foo
2512 [...]
2513 (gdb) attach 4711
2514 (gdb) detach
2515 (gdb) attach 4712
2516 Cannot access memory at address 0xdeadbeef
2517 */
2518
2519 /* In some OSs, the shared library list is the same/global/shared
2520 across inferiors. If code is shared between processes, so are
2521 memory regions and features. */
2522 if (!gdbarch_has_global_solist (target_gdbarch ()))
2523 {
2524 no_shared_libraries (NULL, from_tty);
2525
2526 invalidate_target_mem_regions ();
2527
2528 target_clear_description ();
2529 }
2530
2531 agent_capability_invalidate ();
2532 }
2533
2534 /* Callback for iterate_over_inferiors. Gets rid of the given
2535 inferior. */
2536
2537 static int
2538 dispose_inferior (struct inferior *inf, void *args)
2539 {
2540 struct thread_info *thread;
2541
2542 thread = any_thread_of_process (inf->pid);
2543 if (thread)
2544 {
2545 switch_to_thread (thread->ptid);
2546
2547 /* Core inferiors actually should be detached, not killed. */
2548 if (target_has_execution)
2549 target_kill ();
2550 else
2551 target_detach (NULL, 0);
2552 }
2553
2554 return 0;
2555 }
2556
2557 /* This is to be called by the open routine before it does
2558 anything. */
2559
2560 void
2561 target_preopen (int from_tty)
2562 {
2563 dont_repeat ();
2564
2565 if (have_inferiors ())
2566 {
2567 if (!from_tty
2568 || !have_live_inferiors ()
2569 || query (_("A program is being debugged already. Kill it? ")))
2570 iterate_over_inferiors (dispose_inferior, NULL);
2571 else
2572 error (_("Program not killed."));
2573 }
2574
2575 /* Calling target_kill may remove the target from the stack. But if
2576 it doesn't (which seems like a win for UDI), remove it now. */
2577 /* Leave the exec target, though. The user may be switching from a
2578 live process to a core of the same program. */
2579 pop_all_targets_above (file_stratum);
2580
2581 target_pre_inferior (from_tty);
2582 }
2583
2584 /* Detach a target after doing deferred register stores. */
2585
2586 void
2587 target_detach (const char *args, int from_tty)
2588 {
2589 struct target_ops* t;
2590
2591 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2592 /* Don't remove global breakpoints here. They're removed on
2593 disconnection from the target. */
2594 ;
2595 else
2596 /* If we're in breakpoints-always-inserted mode, have to remove
2597 them before detaching. */
2598 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2599
2600 prepare_for_detach ();
2601
2602 current_target.to_detach (&current_target, args, from_tty);
2603 if (targetdebug)
2604 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2605 args, from_tty);
2606 }
2607
2608 void
2609 target_disconnect (char *args, int from_tty)
2610 {
2611 struct target_ops *t;
2612
2613 /* If we're in breakpoints-always-inserted mode or if breakpoints
2614 are global across processes, we have to remove them before
2615 disconnecting. */
2616 remove_breakpoints ();
2617
2618 for (t = current_target.beneath; t != NULL; t = t->beneath)
2619 if (t->to_disconnect != NULL)
2620 {
2621 if (targetdebug)
2622 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2623 args, from_tty);
2624 t->to_disconnect (t, args, from_tty);
2625 return;
2626 }
2627
2628 tcomplain ();
2629 }
2630
2631 ptid_t
2632 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2633 {
2634 struct target_ops *t;
2635 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2636 status, options);
2637
2638 if (targetdebug)
2639 {
2640 char *status_string;
2641 char *options_string;
2642
2643 status_string = target_waitstatus_to_string (status);
2644 options_string = target_options_to_string (options);
2645 fprintf_unfiltered (gdb_stdlog,
2646 "target_wait (%d, status, options={%s})"
2647 " = %d, %s\n",
2648 ptid_get_pid (ptid), options_string,
2649 ptid_get_pid (retval), status_string);
2650 xfree (status_string);
2651 xfree (options_string);
2652 }
2653
2654 return retval;
2655 }
2656
2657 char *
2658 target_pid_to_str (ptid_t ptid)
2659 {
2660 struct target_ops *t;
2661
2662 for (t = current_target.beneath; t != NULL; t = t->beneath)
2663 {
2664 if (t->to_pid_to_str != NULL)
2665 return (*t->to_pid_to_str) (t, ptid);
2666 }
2667
2668 return normal_pid_to_str (ptid);
2669 }
2670
2671 char *
2672 target_thread_name (struct thread_info *info)
2673 {
2674 return current_target.to_thread_name (&current_target, info);
2675 }
2676
2677 void
2678 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2679 {
2680 struct target_ops *t;
2681
2682 target_dcache_invalidate ();
2683
2684 current_target.to_resume (&current_target, ptid, step, signal);
2685 if (targetdebug)
2686 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2687 ptid_get_pid (ptid),
2688 step ? "step" : "continue",
2689 gdb_signal_to_name (signal));
2690
2691 registers_changed_ptid (ptid);
2692 set_executing (ptid, 1);
2693 set_running (ptid, 1);
2694 clear_inline_frame_state (ptid);
2695 }
2696
2697 void
2698 target_pass_signals (int numsigs, unsigned char *pass_signals)
2699 {
2700 struct target_ops *t;
2701
2702 for (t = current_target.beneath; t != NULL; t = t->beneath)
2703 {
2704 if (t->to_pass_signals != NULL)
2705 {
2706 if (targetdebug)
2707 {
2708 int i;
2709
2710 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2711 numsigs);
2712
2713 for (i = 0; i < numsigs; i++)
2714 if (pass_signals[i])
2715 fprintf_unfiltered (gdb_stdlog, " %s",
2716 gdb_signal_to_name (i));
2717
2718 fprintf_unfiltered (gdb_stdlog, " })\n");
2719 }
2720
2721 (*t->to_pass_signals) (t, numsigs, pass_signals);
2722 return;
2723 }
2724 }
2725 }
2726
2727 void
2728 target_program_signals (int numsigs, unsigned char *program_signals)
2729 {
2730 struct target_ops *t;
2731
2732 for (t = current_target.beneath; t != NULL; t = t->beneath)
2733 {
2734 if (t->to_program_signals != NULL)
2735 {
2736 if (targetdebug)
2737 {
2738 int i;
2739
2740 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2741 numsigs);
2742
2743 for (i = 0; i < numsigs; i++)
2744 if (program_signals[i])
2745 fprintf_unfiltered (gdb_stdlog, " %s",
2746 gdb_signal_to_name (i));
2747
2748 fprintf_unfiltered (gdb_stdlog, " })\n");
2749 }
2750
2751 (*t->to_program_signals) (t, numsigs, program_signals);
2752 return;
2753 }
2754 }
2755 }
2756
2757 /* Look through the list of possible targets for a target that can
2758 follow forks. */
2759
2760 int
2761 target_follow_fork (int follow_child, int detach_fork)
2762 {
2763 struct target_ops *t;
2764
2765 for (t = current_target.beneath; t != NULL; t = t->beneath)
2766 {
2767 if (t->to_follow_fork != NULL)
2768 {
2769 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2770
2771 if (targetdebug)
2772 fprintf_unfiltered (gdb_stdlog,
2773 "target_follow_fork (%d, %d) = %d\n",
2774 follow_child, detach_fork, retval);
2775 return retval;
2776 }
2777 }
2778
2779 /* Some target returned a fork event, but did not know how to follow it. */
2780 internal_error (__FILE__, __LINE__,
2781 _("could not find a target to follow fork"));
2782 }
2783
2784 void
2785 target_mourn_inferior (void)
2786 {
2787 struct target_ops *t;
2788
2789 for (t = current_target.beneath; t != NULL; t = t->beneath)
2790 {
2791 if (t->to_mourn_inferior != NULL)
2792 {
2793 t->to_mourn_inferior (t);
2794 if (targetdebug)
2795 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2796
2797 /* We no longer need to keep handles on any of the object files.
2798 Make sure to release them to avoid unnecessarily locking any
2799 of them while we're not actually debugging. */
2800 bfd_cache_close_all ();
2801
2802 return;
2803 }
2804 }
2805
2806 internal_error (__FILE__, __LINE__,
2807 _("could not find a target to follow mourn inferior"));
2808 }
2809
2810 /* Look for a target which can describe architectural features, starting
2811 from TARGET. If we find one, return its description. */
2812
2813 const struct target_desc *
2814 target_read_description (struct target_ops *target)
2815 {
2816 struct target_ops *t;
2817
2818 for (t = target; t != NULL; t = t->beneath)
2819 if (t->to_read_description != NULL)
2820 {
2821 const struct target_desc *tdesc;
2822
2823 tdesc = t->to_read_description (t);
2824 if (tdesc)
2825 return tdesc;
2826 }
2827
2828 return NULL;
2829 }
2830
2831 /* The default implementation of to_search_memory.
2832 This implements a basic search of memory, reading target memory and
2833 performing the search here (as opposed to performing the search in on the
2834 target side with, for example, gdbserver). */
2835
2836 int
2837 simple_search_memory (struct target_ops *ops,
2838 CORE_ADDR start_addr, ULONGEST search_space_len,
2839 const gdb_byte *pattern, ULONGEST pattern_len,
2840 CORE_ADDR *found_addrp)
2841 {
2842 /* NOTE: also defined in find.c testcase. */
2843 #define SEARCH_CHUNK_SIZE 16000
2844 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2845 /* Buffer to hold memory contents for searching. */
2846 gdb_byte *search_buf;
2847 unsigned search_buf_size;
2848 struct cleanup *old_cleanups;
2849
2850 search_buf_size = chunk_size + pattern_len - 1;
2851
2852 /* No point in trying to allocate a buffer larger than the search space. */
2853 if (search_space_len < search_buf_size)
2854 search_buf_size = search_space_len;
2855
2856 search_buf = malloc (search_buf_size);
2857 if (search_buf == NULL)
2858 error (_("Unable to allocate memory to perform the search."));
2859 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2860
2861 /* Prime the search buffer. */
2862
2863 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2864 search_buf, start_addr, search_buf_size) != search_buf_size)
2865 {
2866 warning (_("Unable to access %s bytes of target "
2867 "memory at %s, halting search."),
2868 pulongest (search_buf_size), hex_string (start_addr));
2869 do_cleanups (old_cleanups);
2870 return -1;
2871 }
2872
2873 /* Perform the search.
2874
2875 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2876 When we've scanned N bytes we copy the trailing bytes to the start and
2877 read in another N bytes. */
2878
2879 while (search_space_len >= pattern_len)
2880 {
2881 gdb_byte *found_ptr;
2882 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2883
2884 found_ptr = memmem (search_buf, nr_search_bytes,
2885 pattern, pattern_len);
2886
2887 if (found_ptr != NULL)
2888 {
2889 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2890
2891 *found_addrp = found_addr;
2892 do_cleanups (old_cleanups);
2893 return 1;
2894 }
2895
2896 /* Not found in this chunk, skip to next chunk. */
2897
2898 /* Don't let search_space_len wrap here, it's unsigned. */
2899 if (search_space_len >= chunk_size)
2900 search_space_len -= chunk_size;
2901 else
2902 search_space_len = 0;
2903
2904 if (search_space_len >= pattern_len)
2905 {
2906 unsigned keep_len = search_buf_size - chunk_size;
2907 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2908 int nr_to_read;
2909
2910 /* Copy the trailing part of the previous iteration to the front
2911 of the buffer for the next iteration. */
2912 gdb_assert (keep_len == pattern_len - 1);
2913 memcpy (search_buf, search_buf + chunk_size, keep_len);
2914
2915 nr_to_read = min (search_space_len - keep_len, chunk_size);
2916
2917 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2918 search_buf + keep_len, read_addr,
2919 nr_to_read) != nr_to_read)
2920 {
2921 warning (_("Unable to access %s bytes of target "
2922 "memory at %s, halting search."),
2923 plongest (nr_to_read),
2924 hex_string (read_addr));
2925 do_cleanups (old_cleanups);
2926 return -1;
2927 }
2928
2929 start_addr += chunk_size;
2930 }
2931 }
2932
2933 /* Not found. */
2934
2935 do_cleanups (old_cleanups);
2936 return 0;
2937 }
2938
2939 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2940 sequence of bytes in PATTERN with length PATTERN_LEN.
2941
2942 The result is 1 if found, 0 if not found, and -1 if there was an error
2943 requiring halting of the search (e.g. memory read error).
2944 If the pattern is found the address is recorded in FOUND_ADDRP. */
2945
2946 int
2947 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2948 const gdb_byte *pattern, ULONGEST pattern_len,
2949 CORE_ADDR *found_addrp)
2950 {
2951 struct target_ops *t;
2952 int found;
2953
2954 /* We don't use INHERIT to set current_target.to_search_memory,
2955 so we have to scan the target stack and handle targetdebug
2956 ourselves. */
2957
2958 if (targetdebug)
2959 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2960 hex_string (start_addr));
2961
2962 for (t = current_target.beneath; t != NULL; t = t->beneath)
2963 if (t->to_search_memory != NULL)
2964 break;
2965
2966 if (t != NULL)
2967 {
2968 found = t->to_search_memory (t, start_addr, search_space_len,
2969 pattern, pattern_len, found_addrp);
2970 }
2971 else
2972 {
2973 /* If a special version of to_search_memory isn't available, use the
2974 simple version. */
2975 found = simple_search_memory (current_target.beneath,
2976 start_addr, search_space_len,
2977 pattern, pattern_len, found_addrp);
2978 }
2979
2980 if (targetdebug)
2981 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2982
2983 return found;
2984 }
2985
2986 /* Look through the currently pushed targets. If none of them will
2987 be able to restart the currently running process, issue an error
2988 message. */
2989
2990 void
2991 target_require_runnable (void)
2992 {
2993 struct target_ops *t;
2994
2995 for (t = target_stack; t != NULL; t = t->beneath)
2996 {
2997 /* If this target knows how to create a new program, then
2998 assume we will still be able to after killing the current
2999 one. Either killing and mourning will not pop T, or else
3000 find_default_run_target will find it again. */
3001 if (t->to_create_inferior != NULL)
3002 return;
3003
3004 /* Do not worry about thread_stratum targets that can not
3005 create inferiors. Assume they will be pushed again if
3006 necessary, and continue to the process_stratum. */
3007 if (t->to_stratum == thread_stratum
3008 || t->to_stratum == arch_stratum)
3009 continue;
3010
3011 error (_("The \"%s\" target does not support \"run\". "
3012 "Try \"help target\" or \"continue\"."),
3013 t->to_shortname);
3014 }
3015
3016 /* This function is only called if the target is running. In that
3017 case there should have been a process_stratum target and it
3018 should either know how to create inferiors, or not... */
3019 internal_error (__FILE__, __LINE__, _("No targets found"));
3020 }
3021
3022 /* Look through the list of possible targets for a target that can
3023 execute a run or attach command without any other data. This is
3024 used to locate the default process stratum.
3025
3026 If DO_MESG is not NULL, the result is always valid (error() is
3027 called for errors); else, return NULL on error. */
3028
3029 static struct target_ops *
3030 find_default_run_target (char *do_mesg)
3031 {
3032 struct target_ops **t;
3033 struct target_ops *runable = NULL;
3034 int count;
3035
3036 count = 0;
3037
3038 for (t = target_structs; t < target_structs + target_struct_size;
3039 ++t)
3040 {
3041 if ((*t)->to_can_run && target_can_run (*t))
3042 {
3043 runable = *t;
3044 ++count;
3045 }
3046 }
3047
3048 if (count != 1)
3049 {
3050 if (do_mesg)
3051 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3052 else
3053 return NULL;
3054 }
3055
3056 return runable;
3057 }
3058
3059 void
3060 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3061 {
3062 struct target_ops *t;
3063
3064 t = find_default_run_target ("attach");
3065 (t->to_attach) (t, args, from_tty);
3066 return;
3067 }
3068
3069 void
3070 find_default_create_inferior (struct target_ops *ops,
3071 char *exec_file, char *allargs, char **env,
3072 int from_tty)
3073 {
3074 struct target_ops *t;
3075
3076 t = find_default_run_target ("run");
3077 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3078 return;
3079 }
3080
3081 static int
3082 find_default_can_async_p (struct target_ops *ignore)
3083 {
3084 struct target_ops *t;
3085
3086 /* This may be called before the target is pushed on the stack;
3087 look for the default process stratum. If there's none, gdb isn't
3088 configured with a native debugger, and target remote isn't
3089 connected yet. */
3090 t = find_default_run_target (NULL);
3091 if (t && t->to_can_async_p != delegate_can_async_p)
3092 return (t->to_can_async_p) (t);
3093 return 0;
3094 }
3095
3096 static int
3097 find_default_is_async_p (struct target_ops *ignore)
3098 {
3099 struct target_ops *t;
3100
3101 /* This may be called before the target is pushed on the stack;
3102 look for the default process stratum. If there's none, gdb isn't
3103 configured with a native debugger, and target remote isn't
3104 connected yet. */
3105 t = find_default_run_target (NULL);
3106 if (t && t->to_is_async_p != delegate_is_async_p)
3107 return (t->to_is_async_p) (t);
3108 return 0;
3109 }
3110
3111 static int
3112 find_default_supports_non_stop (struct target_ops *self)
3113 {
3114 struct target_ops *t;
3115
3116 t = find_default_run_target (NULL);
3117 if (t && t->to_supports_non_stop)
3118 return (t->to_supports_non_stop) (t);
3119 return 0;
3120 }
3121
3122 int
3123 target_supports_non_stop (void)
3124 {
3125 struct target_ops *t;
3126
3127 for (t = &current_target; t != NULL; t = t->beneath)
3128 if (t->to_supports_non_stop)
3129 return t->to_supports_non_stop (t);
3130
3131 return 0;
3132 }
3133
3134 /* Implement the "info proc" command. */
3135
3136 int
3137 target_info_proc (char *args, enum info_proc_what what)
3138 {
3139 struct target_ops *t;
3140
3141 /* If we're already connected to something that can get us OS
3142 related data, use it. Otherwise, try using the native
3143 target. */
3144 if (current_target.to_stratum >= process_stratum)
3145 t = current_target.beneath;
3146 else
3147 t = find_default_run_target (NULL);
3148
3149 for (; t != NULL; t = t->beneath)
3150 {
3151 if (t->to_info_proc != NULL)
3152 {
3153 t->to_info_proc (t, args, what);
3154
3155 if (targetdebug)
3156 fprintf_unfiltered (gdb_stdlog,
3157 "target_info_proc (\"%s\", %d)\n", args, what);
3158
3159 return 1;
3160 }
3161 }
3162
3163 return 0;
3164 }
3165
3166 static int
3167 find_default_supports_disable_randomization (struct target_ops *self)
3168 {
3169 struct target_ops *t;
3170
3171 t = find_default_run_target (NULL);
3172 if (t && t->to_supports_disable_randomization)
3173 return (t->to_supports_disable_randomization) (t);
3174 return 0;
3175 }
3176
3177 int
3178 target_supports_disable_randomization (void)
3179 {
3180 struct target_ops *t;
3181
3182 for (t = &current_target; t != NULL; t = t->beneath)
3183 if (t->to_supports_disable_randomization)
3184 return t->to_supports_disable_randomization (t);
3185
3186 return 0;
3187 }
3188
3189 char *
3190 target_get_osdata (const char *type)
3191 {
3192 struct target_ops *t;
3193
3194 /* If we're already connected to something that can get us OS
3195 related data, use it. Otherwise, try using the native
3196 target. */
3197 if (current_target.to_stratum >= process_stratum)
3198 t = current_target.beneath;
3199 else
3200 t = find_default_run_target ("get OS data");
3201
3202 if (!t)
3203 return NULL;
3204
3205 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3206 }
3207
3208 /* Determine the current address space of thread PTID. */
3209
3210 struct address_space *
3211 target_thread_address_space (ptid_t ptid)
3212 {
3213 struct address_space *aspace;
3214 struct inferior *inf;
3215 struct target_ops *t;
3216
3217 for (t = current_target.beneath; t != NULL; t = t->beneath)
3218 {
3219 if (t->to_thread_address_space != NULL)
3220 {
3221 aspace = t->to_thread_address_space (t, ptid);
3222 gdb_assert (aspace);
3223
3224 if (targetdebug)
3225 fprintf_unfiltered (gdb_stdlog,
3226 "target_thread_address_space (%s) = %d\n",
3227 target_pid_to_str (ptid),
3228 address_space_num (aspace));
3229 return aspace;
3230 }
3231 }
3232
3233 /* Fall-back to the "main" address space of the inferior. */
3234 inf = find_inferior_pid (ptid_get_pid (ptid));
3235
3236 if (inf == NULL || inf->aspace == NULL)
3237 internal_error (__FILE__, __LINE__,
3238 _("Can't determine the current "
3239 "address space of thread %s\n"),
3240 target_pid_to_str (ptid));
3241
3242 return inf->aspace;
3243 }
3244
3245
3246 /* Target file operations. */
3247
3248 static struct target_ops *
3249 default_fileio_target (void)
3250 {
3251 /* If we're already connected to something that can perform
3252 file I/O, use it. Otherwise, try using the native target. */
3253 if (current_target.to_stratum >= process_stratum)
3254 return current_target.beneath;
3255 else
3256 return find_default_run_target ("file I/O");
3257 }
3258
3259 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3260 target file descriptor, or -1 if an error occurs (and set
3261 *TARGET_ERRNO). */
3262 int
3263 target_fileio_open (const char *filename, int flags, int mode,
3264 int *target_errno)
3265 {
3266 struct target_ops *t;
3267
3268 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3269 {
3270 if (t->to_fileio_open != NULL)
3271 {
3272 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
3273
3274 if (targetdebug)
3275 fprintf_unfiltered (gdb_stdlog,
3276 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3277 filename, flags, mode,
3278 fd, fd != -1 ? 0 : *target_errno);
3279 return fd;
3280 }
3281 }
3282
3283 *target_errno = FILEIO_ENOSYS;
3284 return -1;
3285 }
3286
3287 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3288 Return the number of bytes written, or -1 if an error occurs
3289 (and set *TARGET_ERRNO). */
3290 int
3291 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3292 ULONGEST offset, int *target_errno)
3293 {
3294 struct target_ops *t;
3295
3296 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3297 {
3298 if (t->to_fileio_pwrite != NULL)
3299 {
3300 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
3301 target_errno);
3302
3303 if (targetdebug)
3304 fprintf_unfiltered (gdb_stdlog,
3305 "target_fileio_pwrite (%d,...,%d,%s) "
3306 "= %d (%d)\n",
3307 fd, len, pulongest (offset),
3308 ret, ret != -1 ? 0 : *target_errno);
3309 return ret;
3310 }
3311 }
3312
3313 *target_errno = FILEIO_ENOSYS;
3314 return -1;
3315 }
3316
3317 /* Read up to LEN bytes FD on the target into READ_BUF.
3318 Return the number of bytes read, or -1 if an error occurs
3319 (and set *TARGET_ERRNO). */
3320 int
3321 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3322 ULONGEST offset, int *target_errno)
3323 {
3324 struct target_ops *t;
3325
3326 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3327 {
3328 if (t->to_fileio_pread != NULL)
3329 {
3330 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3331 target_errno);
3332
3333 if (targetdebug)
3334 fprintf_unfiltered (gdb_stdlog,
3335 "target_fileio_pread (%d,...,%d,%s) "
3336 "= %d (%d)\n",
3337 fd, len, pulongest (offset),
3338 ret, ret != -1 ? 0 : *target_errno);
3339 return ret;
3340 }
3341 }
3342
3343 *target_errno = FILEIO_ENOSYS;
3344 return -1;
3345 }
3346
3347 /* Close FD on the target. Return 0, or -1 if an error occurs
3348 (and set *TARGET_ERRNO). */
3349 int
3350 target_fileio_close (int fd, int *target_errno)
3351 {
3352 struct target_ops *t;
3353
3354 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3355 {
3356 if (t->to_fileio_close != NULL)
3357 {
3358 int ret = t->to_fileio_close (t, fd, target_errno);
3359
3360 if (targetdebug)
3361 fprintf_unfiltered (gdb_stdlog,
3362 "target_fileio_close (%d) = %d (%d)\n",
3363 fd, ret, ret != -1 ? 0 : *target_errno);
3364 return ret;
3365 }
3366 }
3367
3368 *target_errno = FILEIO_ENOSYS;
3369 return -1;
3370 }
3371
3372 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3373 occurs (and set *TARGET_ERRNO). */
3374 int
3375 target_fileio_unlink (const char *filename, int *target_errno)
3376 {
3377 struct target_ops *t;
3378
3379 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3380 {
3381 if (t->to_fileio_unlink != NULL)
3382 {
3383 int ret = t->to_fileio_unlink (t, filename, target_errno);
3384
3385 if (targetdebug)
3386 fprintf_unfiltered (gdb_stdlog,
3387 "target_fileio_unlink (%s) = %d (%d)\n",
3388 filename, ret, ret != -1 ? 0 : *target_errno);
3389 return ret;
3390 }
3391 }
3392
3393 *target_errno = FILEIO_ENOSYS;
3394 return -1;
3395 }
3396
3397 /* Read value of symbolic link FILENAME on the target. Return a
3398 null-terminated string allocated via xmalloc, or NULL if an error
3399 occurs (and set *TARGET_ERRNO). */
3400 char *
3401 target_fileio_readlink (const char *filename, int *target_errno)
3402 {
3403 struct target_ops *t;
3404
3405 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3406 {
3407 if (t->to_fileio_readlink != NULL)
3408 {
3409 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3410
3411 if (targetdebug)
3412 fprintf_unfiltered (gdb_stdlog,
3413 "target_fileio_readlink (%s) = %s (%d)\n",
3414 filename, ret? ret : "(nil)",
3415 ret? 0 : *target_errno);
3416 return ret;
3417 }
3418 }
3419
3420 *target_errno = FILEIO_ENOSYS;
3421 return NULL;
3422 }
3423
3424 static void
3425 target_fileio_close_cleanup (void *opaque)
3426 {
3427 int fd = *(int *) opaque;
3428 int target_errno;
3429
3430 target_fileio_close (fd, &target_errno);
3431 }
3432
3433 /* Read target file FILENAME. Store the result in *BUF_P and
3434 return the size of the transferred data. PADDING additional bytes are
3435 available in *BUF_P. This is a helper function for
3436 target_fileio_read_alloc; see the declaration of that function for more
3437 information. */
3438
3439 static LONGEST
3440 target_fileio_read_alloc_1 (const char *filename,
3441 gdb_byte **buf_p, int padding)
3442 {
3443 struct cleanup *close_cleanup;
3444 size_t buf_alloc, buf_pos;
3445 gdb_byte *buf;
3446 LONGEST n;
3447 int fd;
3448 int target_errno;
3449
3450 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3451 if (fd == -1)
3452 return -1;
3453
3454 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3455
3456 /* Start by reading up to 4K at a time. The target will throttle
3457 this number down if necessary. */
3458 buf_alloc = 4096;
3459 buf = xmalloc (buf_alloc);
3460 buf_pos = 0;
3461 while (1)
3462 {
3463 n = target_fileio_pread (fd, &buf[buf_pos],
3464 buf_alloc - buf_pos - padding, buf_pos,
3465 &target_errno);
3466 if (n < 0)
3467 {
3468 /* An error occurred. */
3469 do_cleanups (close_cleanup);
3470 xfree (buf);
3471 return -1;
3472 }
3473 else if (n == 0)
3474 {
3475 /* Read all there was. */
3476 do_cleanups (close_cleanup);
3477 if (buf_pos == 0)
3478 xfree (buf);
3479 else
3480 *buf_p = buf;
3481 return buf_pos;
3482 }
3483
3484 buf_pos += n;
3485
3486 /* If the buffer is filling up, expand it. */
3487 if (buf_alloc < buf_pos * 2)
3488 {
3489 buf_alloc *= 2;
3490 buf = xrealloc (buf, buf_alloc);
3491 }
3492
3493 QUIT;
3494 }
3495 }
3496
3497 /* Read target file FILENAME. Store the result in *BUF_P and return
3498 the size of the transferred data. See the declaration in "target.h"
3499 function for more information about the return value. */
3500
3501 LONGEST
3502 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3503 {
3504 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3505 }
3506
3507 /* Read target file FILENAME. The result is NUL-terminated and
3508 returned as a string, allocated using xmalloc. If an error occurs
3509 or the transfer is unsupported, NULL is returned. Empty objects
3510 are returned as allocated but empty strings. A warning is issued
3511 if the result contains any embedded NUL bytes. */
3512
3513 char *
3514 target_fileio_read_stralloc (const char *filename)
3515 {
3516 gdb_byte *buffer;
3517 char *bufstr;
3518 LONGEST i, transferred;
3519
3520 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3521 bufstr = (char *) buffer;
3522
3523 if (transferred < 0)
3524 return NULL;
3525
3526 if (transferred == 0)
3527 return xstrdup ("");
3528
3529 bufstr[transferred] = 0;
3530
3531 /* Check for embedded NUL bytes; but allow trailing NULs. */
3532 for (i = strlen (bufstr); i < transferred; i++)
3533 if (bufstr[i] != 0)
3534 {
3535 warning (_("target file %s "
3536 "contained unexpected null characters"),
3537 filename);
3538 break;
3539 }
3540
3541 return bufstr;
3542 }
3543
3544
3545 static int
3546 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3547 CORE_ADDR addr, int len)
3548 {
3549 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3550 }
3551
3552 static int
3553 default_watchpoint_addr_within_range (struct target_ops *target,
3554 CORE_ADDR addr,
3555 CORE_ADDR start, int length)
3556 {
3557 return addr >= start && addr < start + length;
3558 }
3559
3560 static struct gdbarch *
3561 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3562 {
3563 return target_gdbarch ();
3564 }
3565
3566 static int
3567 return_zero (void)
3568 {
3569 return 0;
3570 }
3571
3572 static int
3573 return_minus_one (void)
3574 {
3575 return -1;
3576 }
3577
3578 static void *
3579 return_null (void)
3580 {
3581 return 0;
3582 }
3583
3584 /*
3585 * Find the next target down the stack from the specified target.
3586 */
3587
3588 struct target_ops *
3589 find_target_beneath (struct target_ops *t)
3590 {
3591 return t->beneath;
3592 }
3593
3594 /* See target.h. */
3595
3596 struct target_ops *
3597 find_target_at (enum strata stratum)
3598 {
3599 struct target_ops *t;
3600
3601 for (t = current_target.beneath; t != NULL; t = t->beneath)
3602 if (t->to_stratum == stratum)
3603 return t;
3604
3605 return NULL;
3606 }
3607
3608 \f
3609 /* The inferior process has died. Long live the inferior! */
3610
3611 void
3612 generic_mourn_inferior (void)
3613 {
3614 ptid_t ptid;
3615
3616 ptid = inferior_ptid;
3617 inferior_ptid = null_ptid;
3618
3619 /* Mark breakpoints uninserted in case something tries to delete a
3620 breakpoint while we delete the inferior's threads (which would
3621 fail, since the inferior is long gone). */
3622 mark_breakpoints_out ();
3623
3624 if (!ptid_equal (ptid, null_ptid))
3625 {
3626 int pid = ptid_get_pid (ptid);
3627 exit_inferior (pid);
3628 }
3629
3630 /* Note this wipes step-resume breakpoints, so needs to be done
3631 after exit_inferior, which ends up referencing the step-resume
3632 breakpoints through clear_thread_inferior_resources. */
3633 breakpoint_init_inferior (inf_exited);
3634
3635 registers_changed ();
3636
3637 reopen_exec_file ();
3638 reinit_frame_cache ();
3639
3640 if (deprecated_detach_hook)
3641 deprecated_detach_hook ();
3642 }
3643 \f
3644 /* Convert a normal process ID to a string. Returns the string in a
3645 static buffer. */
3646
3647 char *
3648 normal_pid_to_str (ptid_t ptid)
3649 {
3650 static char buf[32];
3651
3652 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3653 return buf;
3654 }
3655
3656 static char *
3657 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3658 {
3659 return normal_pid_to_str (ptid);
3660 }
3661
3662 /* Error-catcher for target_find_memory_regions. */
3663 static int
3664 dummy_find_memory_regions (struct target_ops *self,
3665 find_memory_region_ftype ignore1, void *ignore2)
3666 {
3667 error (_("Command not implemented for this target."));
3668 return 0;
3669 }
3670
3671 /* Error-catcher for target_make_corefile_notes. */
3672 static char *
3673 dummy_make_corefile_notes (struct target_ops *self,
3674 bfd *ignore1, int *ignore2)
3675 {
3676 error (_("Command not implemented for this target."));
3677 return NULL;
3678 }
3679
3680 /* Error-catcher for target_get_bookmark. */
3681 static gdb_byte *
3682 dummy_get_bookmark (struct target_ops *self, char *ignore1, int ignore2)
3683 {
3684 tcomplain ();
3685 return NULL;
3686 }
3687
3688 /* Error-catcher for target_goto_bookmark. */
3689 static void
3690 dummy_goto_bookmark (struct target_ops *self, gdb_byte *ignore, int from_tty)
3691 {
3692 tcomplain ();
3693 }
3694
3695 /* Set up the handful of non-empty slots needed by the dummy target
3696 vector. */
3697
3698 static void
3699 init_dummy_target (void)
3700 {
3701 dummy_target.to_shortname = "None";
3702 dummy_target.to_longname = "None";
3703 dummy_target.to_doc = "";
3704 dummy_target.to_create_inferior = find_default_create_inferior;
3705 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3706 dummy_target.to_supports_disable_randomization
3707 = find_default_supports_disable_randomization;
3708 dummy_target.to_pid_to_str = dummy_pid_to_str;
3709 dummy_target.to_stratum = dummy_stratum;
3710 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3711 dummy_target.to_get_bookmark = dummy_get_bookmark;
3712 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3713 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3714 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3715 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3716 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3717 dummy_target.to_has_execution
3718 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3719 dummy_target.to_magic = OPS_MAGIC;
3720
3721 install_dummy_methods (&dummy_target);
3722 }
3723 \f
3724 static void
3725 debug_to_open (char *args, int from_tty)
3726 {
3727 debug_target.to_open (args, from_tty);
3728
3729 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3730 }
3731
3732 void
3733 target_close (struct target_ops *targ)
3734 {
3735 gdb_assert (!target_is_pushed (targ));
3736
3737 if (targ->to_xclose != NULL)
3738 targ->to_xclose (targ);
3739 else if (targ->to_close != NULL)
3740 targ->to_close (targ);
3741
3742 if (targetdebug)
3743 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3744 }
3745
3746 void
3747 target_attach (char *args, int from_tty)
3748 {
3749 current_target.to_attach (&current_target, args, from_tty);
3750 if (targetdebug)
3751 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3752 args, from_tty);
3753 }
3754
3755 int
3756 target_thread_alive (ptid_t ptid)
3757 {
3758 struct target_ops *t;
3759
3760 for (t = current_target.beneath; t != NULL; t = t->beneath)
3761 {
3762 if (t->to_thread_alive != NULL)
3763 {
3764 int retval;
3765
3766 retval = t->to_thread_alive (t, ptid);
3767 if (targetdebug)
3768 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3769 ptid_get_pid (ptid), retval);
3770
3771 return retval;
3772 }
3773 }
3774
3775 return 0;
3776 }
3777
3778 void
3779 target_find_new_threads (void)
3780 {
3781 struct target_ops *t;
3782
3783 for (t = current_target.beneath; t != NULL; t = t->beneath)
3784 {
3785 if (t->to_find_new_threads != NULL)
3786 {
3787 t->to_find_new_threads (t);
3788 if (targetdebug)
3789 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3790
3791 return;
3792 }
3793 }
3794 }
3795
3796 void
3797 target_stop (ptid_t ptid)
3798 {
3799 if (!may_stop)
3800 {
3801 warning (_("May not interrupt or stop the target, ignoring attempt"));
3802 return;
3803 }
3804
3805 (*current_target.to_stop) (&current_target, ptid);
3806 }
3807
3808 static void
3809 debug_to_post_attach (struct target_ops *self, int pid)
3810 {
3811 debug_target.to_post_attach (&debug_target, pid);
3812
3813 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3814 }
3815
3816 /* Concatenate ELEM to LIST, a comma separate list, and return the
3817 result. The LIST incoming argument is released. */
3818
3819 static char *
3820 str_comma_list_concat_elem (char *list, const char *elem)
3821 {
3822 if (list == NULL)
3823 return xstrdup (elem);
3824 else
3825 return reconcat (list, list, ", ", elem, (char *) NULL);
3826 }
3827
3828 /* Helper for target_options_to_string. If OPT is present in
3829 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3830 Returns the new resulting string. OPT is removed from
3831 TARGET_OPTIONS. */
3832
3833 static char *
3834 do_option (int *target_options, char *ret,
3835 int opt, char *opt_str)
3836 {
3837 if ((*target_options & opt) != 0)
3838 {
3839 ret = str_comma_list_concat_elem (ret, opt_str);
3840 *target_options &= ~opt;
3841 }
3842
3843 return ret;
3844 }
3845
3846 char *
3847 target_options_to_string (int target_options)
3848 {
3849 char *ret = NULL;
3850
3851 #define DO_TARG_OPTION(OPT) \
3852 ret = do_option (&target_options, ret, OPT, #OPT)
3853
3854 DO_TARG_OPTION (TARGET_WNOHANG);
3855
3856 if (target_options != 0)
3857 ret = str_comma_list_concat_elem (ret, "unknown???");
3858
3859 if (ret == NULL)
3860 ret = xstrdup ("");
3861 return ret;
3862 }
3863
3864 static void
3865 debug_print_register (const char * func,
3866 struct regcache *regcache, int regno)
3867 {
3868 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3869
3870 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3871 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3872 && gdbarch_register_name (gdbarch, regno) != NULL
3873 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3874 fprintf_unfiltered (gdb_stdlog, "(%s)",
3875 gdbarch_register_name (gdbarch, regno));
3876 else
3877 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3878 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3879 {
3880 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3881 int i, size = register_size (gdbarch, regno);
3882 gdb_byte buf[MAX_REGISTER_SIZE];
3883
3884 regcache_raw_collect (regcache, regno, buf);
3885 fprintf_unfiltered (gdb_stdlog, " = ");
3886 for (i = 0; i < size; i++)
3887 {
3888 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3889 }
3890 if (size <= sizeof (LONGEST))
3891 {
3892 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3893
3894 fprintf_unfiltered (gdb_stdlog, " %s %s",
3895 core_addr_to_string_nz (val), plongest (val));
3896 }
3897 }
3898 fprintf_unfiltered (gdb_stdlog, "\n");
3899 }
3900
3901 void
3902 target_fetch_registers (struct regcache *regcache, int regno)
3903 {
3904 struct target_ops *t;
3905
3906 for (t = current_target.beneath; t != NULL; t = t->beneath)
3907 {
3908 if (t->to_fetch_registers != NULL)
3909 {
3910 t->to_fetch_registers (t, regcache, regno);
3911 if (targetdebug)
3912 debug_print_register ("target_fetch_registers", regcache, regno);
3913 return;
3914 }
3915 }
3916 }
3917
3918 void
3919 target_store_registers (struct regcache *regcache, int regno)
3920 {
3921 struct target_ops *t;
3922
3923 if (!may_write_registers)
3924 error (_("Writing to registers is not allowed (regno %d)"), regno);
3925
3926 current_target.to_store_registers (&current_target, regcache, regno);
3927 if (targetdebug)
3928 {
3929 debug_print_register ("target_store_registers", regcache, regno);
3930 }
3931 }
3932
3933 int
3934 target_core_of_thread (ptid_t ptid)
3935 {
3936 struct target_ops *t;
3937
3938 for (t = current_target.beneath; t != NULL; t = t->beneath)
3939 {
3940 if (t->to_core_of_thread != NULL)
3941 {
3942 int retval = t->to_core_of_thread (t, ptid);
3943
3944 if (targetdebug)
3945 fprintf_unfiltered (gdb_stdlog,
3946 "target_core_of_thread (%d) = %d\n",
3947 ptid_get_pid (ptid), retval);
3948 return retval;
3949 }
3950 }
3951
3952 return -1;
3953 }
3954
3955 int
3956 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3957 {
3958 struct target_ops *t;
3959
3960 for (t = current_target.beneath; t != NULL; t = t->beneath)
3961 {
3962 if (t->to_verify_memory != NULL)
3963 {
3964 int retval = t->to_verify_memory (t, data, memaddr, size);
3965
3966 if (targetdebug)
3967 fprintf_unfiltered (gdb_stdlog,
3968 "target_verify_memory (%s, %s) = %d\n",
3969 paddress (target_gdbarch (), memaddr),
3970 pulongest (size),
3971 retval);
3972 return retval;
3973 }
3974 }
3975
3976 tcomplain ();
3977 }
3978
3979 /* The documentation for this function is in its prototype declaration in
3980 target.h. */
3981
3982 int
3983 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3984 {
3985 struct target_ops *t;
3986
3987 for (t = current_target.beneath; t != NULL; t = t->beneath)
3988 if (t->to_insert_mask_watchpoint != NULL)
3989 {
3990 int ret;
3991
3992 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
3993
3994 if (targetdebug)
3995 fprintf_unfiltered (gdb_stdlog, "\
3996 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3997 core_addr_to_string (addr),
3998 core_addr_to_string (mask), rw, ret);
3999
4000 return ret;
4001 }
4002
4003 return 1;
4004 }
4005
4006 /* The documentation for this function is in its prototype declaration in
4007 target.h. */
4008
4009 int
4010 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4011 {
4012 struct target_ops *t;
4013
4014 for (t = current_target.beneath; t != NULL; t = t->beneath)
4015 if (t->to_remove_mask_watchpoint != NULL)
4016 {
4017 int ret;
4018
4019 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4020
4021 if (targetdebug)
4022 fprintf_unfiltered (gdb_stdlog, "\
4023 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4024 core_addr_to_string (addr),
4025 core_addr_to_string (mask), rw, ret);
4026
4027 return ret;
4028 }
4029
4030 return 1;
4031 }
4032
4033 /* The documentation for this function is in its prototype declaration
4034 in target.h. */
4035
4036 int
4037 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4038 {
4039 struct target_ops *t;
4040
4041 for (t = current_target.beneath; t != NULL; t = t->beneath)
4042 if (t->to_masked_watch_num_registers != NULL)
4043 return t->to_masked_watch_num_registers (t, addr, mask);
4044
4045 return -1;
4046 }
4047
4048 /* The documentation for this function is in its prototype declaration
4049 in target.h. */
4050
4051 int
4052 target_ranged_break_num_registers (void)
4053 {
4054 struct target_ops *t;
4055
4056 for (t = current_target.beneath; t != NULL; t = t->beneath)
4057 if (t->to_ranged_break_num_registers != NULL)
4058 return t->to_ranged_break_num_registers (t);
4059
4060 return -1;
4061 }
4062
4063 /* See target.h. */
4064
4065 struct btrace_target_info *
4066 target_enable_btrace (ptid_t ptid)
4067 {
4068 struct target_ops *t;
4069
4070 for (t = current_target.beneath; t != NULL; t = t->beneath)
4071 if (t->to_enable_btrace != NULL)
4072 return t->to_enable_btrace (t, ptid);
4073
4074 tcomplain ();
4075 return NULL;
4076 }
4077
4078 /* See target.h. */
4079
4080 void
4081 target_disable_btrace (struct btrace_target_info *btinfo)
4082 {
4083 struct target_ops *t;
4084
4085 for (t = current_target.beneath; t != NULL; t = t->beneath)
4086 if (t->to_disable_btrace != NULL)
4087 {
4088 t->to_disable_btrace (t, btinfo);
4089 return;
4090 }
4091
4092 tcomplain ();
4093 }
4094
4095 /* See target.h. */
4096
4097 void
4098 target_teardown_btrace (struct btrace_target_info *btinfo)
4099 {
4100 struct target_ops *t;
4101
4102 for (t = current_target.beneath; t != NULL; t = t->beneath)
4103 if (t->to_teardown_btrace != NULL)
4104 {
4105 t->to_teardown_btrace (t, btinfo);
4106 return;
4107 }
4108
4109 tcomplain ();
4110 }
4111
4112 /* See target.h. */
4113
4114 enum btrace_error
4115 target_read_btrace (VEC (btrace_block_s) **btrace,
4116 struct btrace_target_info *btinfo,
4117 enum btrace_read_type type)
4118 {
4119 struct target_ops *t;
4120
4121 for (t = current_target.beneath; t != NULL; t = t->beneath)
4122 if (t->to_read_btrace != NULL)
4123 return t->to_read_btrace (t, btrace, btinfo, type);
4124
4125 tcomplain ();
4126 return BTRACE_ERR_NOT_SUPPORTED;
4127 }
4128
4129 /* See target.h. */
4130
4131 void
4132 target_stop_recording (void)
4133 {
4134 struct target_ops *t;
4135
4136 for (t = current_target.beneath; t != NULL; t = t->beneath)
4137 if (t->to_stop_recording != NULL)
4138 {
4139 t->to_stop_recording (t);
4140 return;
4141 }
4142
4143 /* This is optional. */
4144 }
4145
4146 /* See target.h. */
4147
4148 void
4149 target_info_record (void)
4150 {
4151 struct target_ops *t;
4152
4153 for (t = current_target.beneath; t != NULL; t = t->beneath)
4154 if (t->to_info_record != NULL)
4155 {
4156 t->to_info_record (t);
4157 return;
4158 }
4159
4160 tcomplain ();
4161 }
4162
4163 /* See target.h. */
4164
4165 void
4166 target_save_record (const char *filename)
4167 {
4168 struct target_ops *t;
4169
4170 for (t = current_target.beneath; t != NULL; t = t->beneath)
4171 if (t->to_save_record != NULL)
4172 {
4173 t->to_save_record (t, filename);
4174 return;
4175 }
4176
4177 tcomplain ();
4178 }
4179
4180 /* See target.h. */
4181
4182 int
4183 target_supports_delete_record (void)
4184 {
4185 struct target_ops *t;
4186
4187 for (t = current_target.beneath; t != NULL; t = t->beneath)
4188 if (t->to_delete_record != NULL)
4189 return 1;
4190
4191 return 0;
4192 }
4193
4194 /* See target.h. */
4195
4196 void
4197 target_delete_record (void)
4198 {
4199 struct target_ops *t;
4200
4201 for (t = current_target.beneath; t != NULL; t = t->beneath)
4202 if (t->to_delete_record != NULL)
4203 {
4204 t->to_delete_record (t);
4205 return;
4206 }
4207
4208 tcomplain ();
4209 }
4210
4211 /* See target.h. */
4212
4213 int
4214 target_record_is_replaying (void)
4215 {
4216 struct target_ops *t;
4217
4218 for (t = current_target.beneath; t != NULL; t = t->beneath)
4219 if (t->to_record_is_replaying != NULL)
4220 return t->to_record_is_replaying (t);
4221
4222 return 0;
4223 }
4224
4225 /* See target.h. */
4226
4227 void
4228 target_goto_record_begin (void)
4229 {
4230 struct target_ops *t;
4231
4232 for (t = current_target.beneath; t != NULL; t = t->beneath)
4233 if (t->to_goto_record_begin != NULL)
4234 {
4235 t->to_goto_record_begin (t);
4236 return;
4237 }
4238
4239 tcomplain ();
4240 }
4241
4242 /* See target.h. */
4243
4244 void
4245 target_goto_record_end (void)
4246 {
4247 struct target_ops *t;
4248
4249 for (t = current_target.beneath; t != NULL; t = t->beneath)
4250 if (t->to_goto_record_end != NULL)
4251 {
4252 t->to_goto_record_end (t);
4253 return;
4254 }
4255
4256 tcomplain ();
4257 }
4258
4259 /* See target.h. */
4260
4261 void
4262 target_goto_record (ULONGEST insn)
4263 {
4264 struct target_ops *t;
4265
4266 for (t = current_target.beneath; t != NULL; t = t->beneath)
4267 if (t->to_goto_record != NULL)
4268 {
4269 t->to_goto_record (t, insn);
4270 return;
4271 }
4272
4273 tcomplain ();
4274 }
4275
4276 /* See target.h. */
4277
4278 void
4279 target_insn_history (int size, int flags)
4280 {
4281 struct target_ops *t;
4282
4283 for (t = current_target.beneath; t != NULL; t = t->beneath)
4284 if (t->to_insn_history != NULL)
4285 {
4286 t->to_insn_history (t, size, flags);
4287 return;
4288 }
4289
4290 tcomplain ();
4291 }
4292
4293 /* See target.h. */
4294
4295 void
4296 target_insn_history_from (ULONGEST from, int size, int flags)
4297 {
4298 struct target_ops *t;
4299
4300 for (t = current_target.beneath; t != NULL; t = t->beneath)
4301 if (t->to_insn_history_from != NULL)
4302 {
4303 t->to_insn_history_from (t, from, size, flags);
4304 return;
4305 }
4306
4307 tcomplain ();
4308 }
4309
4310 /* See target.h. */
4311
4312 void
4313 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4314 {
4315 struct target_ops *t;
4316
4317 for (t = current_target.beneath; t != NULL; t = t->beneath)
4318 if (t->to_insn_history_range != NULL)
4319 {
4320 t->to_insn_history_range (t, begin, end, flags);
4321 return;
4322 }
4323
4324 tcomplain ();
4325 }
4326
4327 /* See target.h. */
4328
4329 void
4330 target_call_history (int size, int flags)
4331 {
4332 struct target_ops *t;
4333
4334 for (t = current_target.beneath; t != NULL; t = t->beneath)
4335 if (t->to_call_history != NULL)
4336 {
4337 t->to_call_history (t, size, flags);
4338 return;
4339 }
4340
4341 tcomplain ();
4342 }
4343
4344 /* See target.h. */
4345
4346 void
4347 target_call_history_from (ULONGEST begin, int size, int flags)
4348 {
4349 struct target_ops *t;
4350
4351 for (t = current_target.beneath; t != NULL; t = t->beneath)
4352 if (t->to_call_history_from != NULL)
4353 {
4354 t->to_call_history_from (t, begin, size, flags);
4355 return;
4356 }
4357
4358 tcomplain ();
4359 }
4360
4361 /* See target.h. */
4362
4363 void
4364 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4365 {
4366 struct target_ops *t;
4367
4368 for (t = current_target.beneath; t != NULL; t = t->beneath)
4369 if (t->to_call_history_range != NULL)
4370 {
4371 t->to_call_history_range (t, begin, end, flags);
4372 return;
4373 }
4374
4375 tcomplain ();
4376 }
4377
4378 static void
4379 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4380 {
4381 debug_target.to_prepare_to_store (&debug_target, regcache);
4382
4383 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4384 }
4385
4386 /* See target.h. */
4387
4388 const struct frame_unwind *
4389 target_get_unwinder (void)
4390 {
4391 struct target_ops *t;
4392
4393 for (t = current_target.beneath; t != NULL; t = t->beneath)
4394 if (t->to_get_unwinder != NULL)
4395 return t->to_get_unwinder;
4396
4397 return NULL;
4398 }
4399
4400 /* See target.h. */
4401
4402 const struct frame_unwind *
4403 target_get_tailcall_unwinder (void)
4404 {
4405 struct target_ops *t;
4406
4407 for (t = current_target.beneath; t != NULL; t = t->beneath)
4408 if (t->to_get_tailcall_unwinder != NULL)
4409 return t->to_get_tailcall_unwinder;
4410
4411 return NULL;
4412 }
4413
4414 /* See target.h. */
4415
4416 CORE_ADDR
4417 forward_target_decr_pc_after_break (struct target_ops *ops,
4418 struct gdbarch *gdbarch)
4419 {
4420 for (; ops != NULL; ops = ops->beneath)
4421 if (ops->to_decr_pc_after_break != NULL)
4422 return ops->to_decr_pc_after_break (ops, gdbarch);
4423
4424 return gdbarch_decr_pc_after_break (gdbarch);
4425 }
4426
4427 /* See target.h. */
4428
4429 CORE_ADDR
4430 target_decr_pc_after_break (struct gdbarch *gdbarch)
4431 {
4432 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4433 }
4434
4435 static int
4436 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4437 int write, struct mem_attrib *attrib,
4438 struct target_ops *target)
4439 {
4440 int retval;
4441
4442 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4443 attrib, target);
4444
4445 fprintf_unfiltered (gdb_stdlog,
4446 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4447 paddress (target_gdbarch (), memaddr), len,
4448 write ? "write" : "read", retval);
4449
4450 if (retval > 0)
4451 {
4452 int i;
4453
4454 fputs_unfiltered (", bytes =", gdb_stdlog);
4455 for (i = 0; i < retval; i++)
4456 {
4457 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4458 {
4459 if (targetdebug < 2 && i > 0)
4460 {
4461 fprintf_unfiltered (gdb_stdlog, " ...");
4462 break;
4463 }
4464 fprintf_unfiltered (gdb_stdlog, "\n");
4465 }
4466
4467 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4468 }
4469 }
4470
4471 fputc_unfiltered ('\n', gdb_stdlog);
4472
4473 return retval;
4474 }
4475
4476 static void
4477 debug_to_files_info (struct target_ops *target)
4478 {
4479 debug_target.to_files_info (target);
4480
4481 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4482 }
4483
4484 static int
4485 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4486 struct bp_target_info *bp_tgt)
4487 {
4488 int retval;
4489
4490 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4491
4492 fprintf_unfiltered (gdb_stdlog,
4493 "target_insert_breakpoint (%s, xxx) = %ld\n",
4494 core_addr_to_string (bp_tgt->placed_address),
4495 (unsigned long) retval);
4496 return retval;
4497 }
4498
4499 static int
4500 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4501 struct bp_target_info *bp_tgt)
4502 {
4503 int retval;
4504
4505 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4506
4507 fprintf_unfiltered (gdb_stdlog,
4508 "target_remove_breakpoint (%s, xxx) = %ld\n",
4509 core_addr_to_string (bp_tgt->placed_address),
4510 (unsigned long) retval);
4511 return retval;
4512 }
4513
4514 static int
4515 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4516 int type, int cnt, int from_tty)
4517 {
4518 int retval;
4519
4520 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4521 type, cnt, from_tty);
4522
4523 fprintf_unfiltered (gdb_stdlog,
4524 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4525 (unsigned long) type,
4526 (unsigned long) cnt,
4527 (unsigned long) from_tty,
4528 (unsigned long) retval);
4529 return retval;
4530 }
4531
4532 static int
4533 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4534 CORE_ADDR addr, int len)
4535 {
4536 CORE_ADDR retval;
4537
4538 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4539 addr, len);
4540
4541 fprintf_unfiltered (gdb_stdlog,
4542 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4543 core_addr_to_string (addr), (unsigned long) len,
4544 core_addr_to_string (retval));
4545 return retval;
4546 }
4547
4548 static int
4549 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
4550 CORE_ADDR addr, int len, int rw,
4551 struct expression *cond)
4552 {
4553 int retval;
4554
4555 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
4556 addr, len,
4557 rw, cond);
4558
4559 fprintf_unfiltered (gdb_stdlog,
4560 "target_can_accel_watchpoint_condition "
4561 "(%s, %d, %d, %s) = %ld\n",
4562 core_addr_to_string (addr), len, rw,
4563 host_address_to_string (cond), (unsigned long) retval);
4564 return retval;
4565 }
4566
4567 static int
4568 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4569 {
4570 int retval;
4571
4572 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4573
4574 fprintf_unfiltered (gdb_stdlog,
4575 "target_stopped_by_watchpoint () = %ld\n",
4576 (unsigned long) retval);
4577 return retval;
4578 }
4579
4580 static int
4581 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4582 {
4583 int retval;
4584
4585 retval = debug_target.to_stopped_data_address (target, addr);
4586
4587 fprintf_unfiltered (gdb_stdlog,
4588 "target_stopped_data_address ([%s]) = %ld\n",
4589 core_addr_to_string (*addr),
4590 (unsigned long)retval);
4591 return retval;
4592 }
4593
4594 static int
4595 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4596 CORE_ADDR addr,
4597 CORE_ADDR start, int length)
4598 {
4599 int retval;
4600
4601 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4602 start, length);
4603
4604 fprintf_filtered (gdb_stdlog,
4605 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4606 core_addr_to_string (addr), core_addr_to_string (start),
4607 length, retval);
4608 return retval;
4609 }
4610
4611 static int
4612 debug_to_insert_hw_breakpoint (struct target_ops *self,
4613 struct gdbarch *gdbarch,
4614 struct bp_target_info *bp_tgt)
4615 {
4616 int retval;
4617
4618 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4619 gdbarch, bp_tgt);
4620
4621 fprintf_unfiltered (gdb_stdlog,
4622 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4623 core_addr_to_string (bp_tgt->placed_address),
4624 (unsigned long) retval);
4625 return retval;
4626 }
4627
4628 static int
4629 debug_to_remove_hw_breakpoint (struct target_ops *self,
4630 struct gdbarch *gdbarch,
4631 struct bp_target_info *bp_tgt)
4632 {
4633 int retval;
4634
4635 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4636 gdbarch, bp_tgt);
4637
4638 fprintf_unfiltered (gdb_stdlog,
4639 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4640 core_addr_to_string (bp_tgt->placed_address),
4641 (unsigned long) retval);
4642 return retval;
4643 }
4644
4645 static int
4646 debug_to_insert_watchpoint (struct target_ops *self,
4647 CORE_ADDR addr, int len, int type,
4648 struct expression *cond)
4649 {
4650 int retval;
4651
4652 retval = debug_target.to_insert_watchpoint (&debug_target,
4653 addr, len, type, cond);
4654
4655 fprintf_unfiltered (gdb_stdlog,
4656 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4657 core_addr_to_string (addr), len, type,
4658 host_address_to_string (cond), (unsigned long) retval);
4659 return retval;
4660 }
4661
4662 static int
4663 debug_to_remove_watchpoint (struct target_ops *self,
4664 CORE_ADDR addr, int len, int type,
4665 struct expression *cond)
4666 {
4667 int retval;
4668
4669 retval = debug_target.to_remove_watchpoint (&debug_target,
4670 addr, len, type, cond);
4671
4672 fprintf_unfiltered (gdb_stdlog,
4673 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4674 core_addr_to_string (addr), len, type,
4675 host_address_to_string (cond), (unsigned long) retval);
4676 return retval;
4677 }
4678
4679 static void
4680 debug_to_terminal_init (struct target_ops *self)
4681 {
4682 debug_target.to_terminal_init (&debug_target);
4683
4684 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4685 }
4686
4687 static void
4688 debug_to_terminal_inferior (struct target_ops *self)
4689 {
4690 debug_target.to_terminal_inferior (&debug_target);
4691
4692 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4693 }
4694
4695 static void
4696 debug_to_terminal_ours_for_output (struct target_ops *self)
4697 {
4698 debug_target.to_terminal_ours_for_output (&debug_target);
4699
4700 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4701 }
4702
4703 static void
4704 debug_to_terminal_ours (struct target_ops *self)
4705 {
4706 debug_target.to_terminal_ours (&debug_target);
4707
4708 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4709 }
4710
4711 static void
4712 debug_to_terminal_save_ours (struct target_ops *self)
4713 {
4714 debug_target.to_terminal_save_ours (&debug_target);
4715
4716 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4717 }
4718
4719 static void
4720 debug_to_terminal_info (struct target_ops *self,
4721 const char *arg, int from_tty)
4722 {
4723 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4724
4725 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4726 from_tty);
4727 }
4728
4729 static void
4730 debug_to_load (struct target_ops *self, char *args, int from_tty)
4731 {
4732 debug_target.to_load (&debug_target, args, from_tty);
4733
4734 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4735 }
4736
4737 static void
4738 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4739 {
4740 debug_target.to_post_startup_inferior (&debug_target, ptid);
4741
4742 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4743 ptid_get_pid (ptid));
4744 }
4745
4746 static int
4747 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4748 {
4749 int retval;
4750
4751 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4752
4753 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4754 pid, retval);
4755
4756 return retval;
4757 }
4758
4759 static int
4760 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4761 {
4762 int retval;
4763
4764 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4765
4766 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4767 pid, retval);
4768
4769 return retval;
4770 }
4771
4772 static int
4773 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4774 {
4775 int retval;
4776
4777 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4778
4779 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4780 pid, retval);
4781
4782 return retval;
4783 }
4784
4785 static int
4786 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4787 {
4788 int retval;
4789
4790 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4791
4792 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4793 pid, retval);
4794
4795 return retval;
4796 }
4797
4798 static int
4799 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4800 {
4801 int retval;
4802
4803 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4804
4805 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4806 pid, retval);
4807
4808 return retval;
4809 }
4810
4811 static int
4812 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4813 {
4814 int retval;
4815
4816 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4817
4818 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4819 pid, retval);
4820
4821 return retval;
4822 }
4823
4824 static int
4825 debug_to_has_exited (struct target_ops *self,
4826 int pid, int wait_status, int *exit_status)
4827 {
4828 int has_exited;
4829
4830 has_exited = debug_target.to_has_exited (&debug_target,
4831 pid, wait_status, exit_status);
4832
4833 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4834 pid, wait_status, *exit_status, has_exited);
4835
4836 return has_exited;
4837 }
4838
4839 static int
4840 debug_to_can_run (struct target_ops *self)
4841 {
4842 int retval;
4843
4844 retval = debug_target.to_can_run (&debug_target);
4845
4846 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4847
4848 return retval;
4849 }
4850
4851 static struct gdbarch *
4852 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4853 {
4854 struct gdbarch *retval;
4855
4856 retval = debug_target.to_thread_architecture (ops, ptid);
4857
4858 fprintf_unfiltered (gdb_stdlog,
4859 "target_thread_architecture (%s) = %s [%s]\n",
4860 target_pid_to_str (ptid),
4861 host_address_to_string (retval),
4862 gdbarch_bfd_arch_info (retval)->printable_name);
4863 return retval;
4864 }
4865
4866 static void
4867 debug_to_stop (struct target_ops *self, ptid_t ptid)
4868 {
4869 debug_target.to_stop (&debug_target, ptid);
4870
4871 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4872 target_pid_to_str (ptid));
4873 }
4874
4875 static void
4876 debug_to_rcmd (struct target_ops *self, char *command,
4877 struct ui_file *outbuf)
4878 {
4879 debug_target.to_rcmd (&debug_target, command, outbuf);
4880 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4881 }
4882
4883 static char *
4884 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
4885 {
4886 char *exec_file;
4887
4888 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
4889
4890 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4891 pid, exec_file);
4892
4893 return exec_file;
4894 }
4895
4896 static void
4897 setup_target_debug (void)
4898 {
4899 memcpy (&debug_target, &current_target, sizeof debug_target);
4900
4901 current_target.to_open = debug_to_open;
4902 current_target.to_post_attach = debug_to_post_attach;
4903 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4904 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4905 current_target.to_files_info = debug_to_files_info;
4906 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4907 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4908 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4909 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4910 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4911 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4912 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4913 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4914 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4915 current_target.to_watchpoint_addr_within_range
4916 = debug_to_watchpoint_addr_within_range;
4917 current_target.to_region_ok_for_hw_watchpoint
4918 = debug_to_region_ok_for_hw_watchpoint;
4919 current_target.to_can_accel_watchpoint_condition
4920 = debug_to_can_accel_watchpoint_condition;
4921 current_target.to_terminal_init = debug_to_terminal_init;
4922 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4923 current_target.to_terminal_ours_for_output
4924 = debug_to_terminal_ours_for_output;
4925 current_target.to_terminal_ours = debug_to_terminal_ours;
4926 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4927 current_target.to_terminal_info = debug_to_terminal_info;
4928 current_target.to_load = debug_to_load;
4929 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4930 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4931 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4932 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4933 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4934 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4935 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4936 current_target.to_has_exited = debug_to_has_exited;
4937 current_target.to_can_run = debug_to_can_run;
4938 current_target.to_stop = debug_to_stop;
4939 current_target.to_rcmd = debug_to_rcmd;
4940 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4941 current_target.to_thread_architecture = debug_to_thread_architecture;
4942 }
4943 \f
4944
4945 static char targ_desc[] =
4946 "Names of targets and files being debugged.\nShows the entire \
4947 stack of targets currently in use (including the exec-file,\n\
4948 core-file, and process, if any), as well as the symbol file name.";
4949
4950 static void
4951 default_rcmd (struct target_ops *self, char *command, struct ui_file *output)
4952 {
4953 error (_("\"monitor\" command not supported by this target."));
4954 }
4955
4956 static void
4957 do_monitor_command (char *cmd,
4958 int from_tty)
4959 {
4960 target_rcmd (cmd, gdb_stdtarg);
4961 }
4962
4963 /* Print the name of each layers of our target stack. */
4964
4965 static void
4966 maintenance_print_target_stack (char *cmd, int from_tty)
4967 {
4968 struct target_ops *t;
4969
4970 printf_filtered (_("The current target stack is:\n"));
4971
4972 for (t = target_stack; t != NULL; t = t->beneath)
4973 {
4974 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4975 }
4976 }
4977
4978 /* Controls if async mode is permitted. */
4979 int target_async_permitted = 0;
4980
4981 /* The set command writes to this variable. If the inferior is
4982 executing, target_async_permitted is *not* updated. */
4983 static int target_async_permitted_1 = 0;
4984
4985 static void
4986 set_target_async_command (char *args, int from_tty,
4987 struct cmd_list_element *c)
4988 {
4989 if (have_live_inferiors ())
4990 {
4991 target_async_permitted_1 = target_async_permitted;
4992 error (_("Cannot change this setting while the inferior is running."));
4993 }
4994
4995 target_async_permitted = target_async_permitted_1;
4996 }
4997
4998 static void
4999 show_target_async_command (struct ui_file *file, int from_tty,
5000 struct cmd_list_element *c,
5001 const char *value)
5002 {
5003 fprintf_filtered (file,
5004 _("Controlling the inferior in "
5005 "asynchronous mode is %s.\n"), value);
5006 }
5007
5008 /* Temporary copies of permission settings. */
5009
5010 static int may_write_registers_1 = 1;
5011 static int may_write_memory_1 = 1;
5012 static int may_insert_breakpoints_1 = 1;
5013 static int may_insert_tracepoints_1 = 1;
5014 static int may_insert_fast_tracepoints_1 = 1;
5015 static int may_stop_1 = 1;
5016
5017 /* Make the user-set values match the real values again. */
5018
5019 void
5020 update_target_permissions (void)
5021 {
5022 may_write_registers_1 = may_write_registers;
5023 may_write_memory_1 = may_write_memory;
5024 may_insert_breakpoints_1 = may_insert_breakpoints;
5025 may_insert_tracepoints_1 = may_insert_tracepoints;
5026 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
5027 may_stop_1 = may_stop;
5028 }
5029
5030 /* The one function handles (most of) the permission flags in the same
5031 way. */
5032
5033 static void
5034 set_target_permissions (char *args, int from_tty,
5035 struct cmd_list_element *c)
5036 {
5037 if (target_has_execution)
5038 {
5039 update_target_permissions ();
5040 error (_("Cannot change this setting while the inferior is running."));
5041 }
5042
5043 /* Make the real values match the user-changed values. */
5044 may_write_registers = may_write_registers_1;
5045 may_insert_breakpoints = may_insert_breakpoints_1;
5046 may_insert_tracepoints = may_insert_tracepoints_1;
5047 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5048 may_stop = may_stop_1;
5049 update_observer_mode ();
5050 }
5051
5052 /* Set memory write permission independently of observer mode. */
5053
5054 static void
5055 set_write_memory_permission (char *args, int from_tty,
5056 struct cmd_list_element *c)
5057 {
5058 /* Make the real values match the user-changed values. */
5059 may_write_memory = may_write_memory_1;
5060 update_observer_mode ();
5061 }
5062
5063
5064 void
5065 initialize_targets (void)
5066 {
5067 init_dummy_target ();
5068 push_target (&dummy_target);
5069
5070 add_info ("target", target_info, targ_desc);
5071 add_info ("files", target_info, targ_desc);
5072
5073 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5074 Set target debugging."), _("\
5075 Show target debugging."), _("\
5076 When non-zero, target debugging is enabled. Higher numbers are more\n\
5077 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5078 command."),
5079 NULL,
5080 show_targetdebug,
5081 &setdebuglist, &showdebuglist);
5082
5083 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5084 &trust_readonly, _("\
5085 Set mode for reading from readonly sections."), _("\
5086 Show mode for reading from readonly sections."), _("\
5087 When this mode is on, memory reads from readonly sections (such as .text)\n\
5088 will be read from the object file instead of from the target. This will\n\
5089 result in significant performance improvement for remote targets."),
5090 NULL,
5091 show_trust_readonly,
5092 &setlist, &showlist);
5093
5094 add_com ("monitor", class_obscure, do_monitor_command,
5095 _("Send a command to the remote monitor (remote targets only)."));
5096
5097 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5098 _("Print the name of each layer of the internal target stack."),
5099 &maintenanceprintlist);
5100
5101 add_setshow_boolean_cmd ("target-async", no_class,
5102 &target_async_permitted_1, _("\
5103 Set whether gdb controls the inferior in asynchronous mode."), _("\
5104 Show whether gdb controls the inferior in asynchronous mode."), _("\
5105 Tells gdb whether to control the inferior in asynchronous mode."),
5106 set_target_async_command,
5107 show_target_async_command,
5108 &setlist,
5109 &showlist);
5110
5111 add_setshow_boolean_cmd ("may-write-registers", class_support,
5112 &may_write_registers_1, _("\
5113 Set permission to write into registers."), _("\
5114 Show permission to write into registers."), _("\
5115 When this permission is on, GDB may write into the target's registers.\n\
5116 Otherwise, any sort of write attempt will result in an error."),
5117 set_target_permissions, NULL,
5118 &setlist, &showlist);
5119
5120 add_setshow_boolean_cmd ("may-write-memory", class_support,
5121 &may_write_memory_1, _("\
5122 Set permission to write into target memory."), _("\
5123 Show permission to write into target memory."), _("\
5124 When this permission is on, GDB may write into the target's memory.\n\
5125 Otherwise, any sort of write attempt will result in an error."),
5126 set_write_memory_permission, NULL,
5127 &setlist, &showlist);
5128
5129 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5130 &may_insert_breakpoints_1, _("\
5131 Set permission to insert breakpoints in the target."), _("\
5132 Show permission to insert breakpoints in the target."), _("\
5133 When this permission is on, GDB may insert breakpoints in the program.\n\
5134 Otherwise, any sort of insertion attempt will result in an error."),
5135 set_target_permissions, NULL,
5136 &setlist, &showlist);
5137
5138 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5139 &may_insert_tracepoints_1, _("\
5140 Set permission to insert tracepoints in the target."), _("\
5141 Show permission to insert tracepoints in the target."), _("\
5142 When this permission is on, GDB may insert tracepoints in the program.\n\
5143 Otherwise, any sort of insertion attempt will result in an error."),
5144 set_target_permissions, NULL,
5145 &setlist, &showlist);
5146
5147 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5148 &may_insert_fast_tracepoints_1, _("\
5149 Set permission to insert fast tracepoints in the target."), _("\
5150 Show permission to insert fast tracepoints in the target."), _("\
5151 When this permission is on, GDB may insert fast tracepoints.\n\
5152 Otherwise, any sort of insertion attempt will result in an error."),
5153 set_target_permissions, NULL,
5154 &setlist, &showlist);
5155
5156 add_setshow_boolean_cmd ("may-interrupt", class_support,
5157 &may_stop_1, _("\
5158 Set permission to interrupt or signal the target."), _("\
5159 Show permission to interrupt or signal the target."), _("\
5160 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5161 Otherwise, any attempt to interrupt or stop will be ignored."),
5162 set_target_permissions, NULL,
5163 &setlist, &showlist);
5164 }