]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/target.c
gdb/
[thirdparty/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2012 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include "gdb_string.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "symtab.h"
28 #include "inferior.h"
29 #include "bfd.h"
30 #include "symfile.h"
31 #include "objfiles.h"
32 #include "dcache.h"
33 #include <signal.h>
34 #include "regcache.h"
35 #include "gdb_assert.h"
36 #include "gdbcore.h"
37 #include "exceptions.h"
38 #include "target-descriptions.h"
39 #include "gdbthread.h"
40 #include "solib.h"
41 #include "exec.h"
42 #include "inline-frame.h"
43 #include "tracepoint.h"
44 #include "gdb/fileio.h"
45 #include "agent.h"
46
47 static void target_info (char *, int);
48
49 static void default_terminal_info (char *, int);
50
51 static int default_watchpoint_addr_within_range (struct target_ops *,
52 CORE_ADDR, CORE_ADDR, int);
53
54 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
55
56 static void tcomplain (void) ATTRIBUTE_NORETURN;
57
58 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
59
60 static int return_zero (void);
61
62 static int return_one (void);
63
64 static int return_minus_one (void);
65
66 void target_ignore (void);
67
68 static void target_command (char *, int);
69
70 static struct target_ops *find_default_run_target (char *);
71
72 static LONGEST default_xfer_partial (struct target_ops *ops,
73 enum target_object object,
74 const char *annex, gdb_byte *readbuf,
75 const gdb_byte *writebuf,
76 ULONGEST offset, LONGEST len);
77
78 static LONGEST current_xfer_partial (struct target_ops *ops,
79 enum target_object object,
80 const char *annex, gdb_byte *readbuf,
81 const gdb_byte *writebuf,
82 ULONGEST offset, LONGEST len);
83
84 static LONGEST target_xfer_partial (struct target_ops *ops,
85 enum target_object object,
86 const char *annex,
87 void *readbuf, const void *writebuf,
88 ULONGEST offset, LONGEST len);
89
90 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
91 ptid_t ptid);
92
93 static void init_dummy_target (void);
94
95 static struct target_ops debug_target;
96
97 static void debug_to_open (char *, int);
98
99 static void debug_to_prepare_to_store (struct regcache *);
100
101 static void debug_to_files_info (struct target_ops *);
102
103 static int debug_to_insert_breakpoint (struct gdbarch *,
104 struct bp_target_info *);
105
106 static int debug_to_remove_breakpoint (struct gdbarch *,
107 struct bp_target_info *);
108
109 static int debug_to_can_use_hw_breakpoint (int, int, int);
110
111 static int debug_to_insert_hw_breakpoint (struct gdbarch *,
112 struct bp_target_info *);
113
114 static int debug_to_remove_hw_breakpoint (struct gdbarch *,
115 struct bp_target_info *);
116
117 static int debug_to_insert_watchpoint (CORE_ADDR, int, int,
118 struct expression *);
119
120 static int debug_to_remove_watchpoint (CORE_ADDR, int, int,
121 struct expression *);
122
123 static int debug_to_stopped_by_watchpoint (void);
124
125 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
126
127 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
128 CORE_ADDR, CORE_ADDR, int);
129
130 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
131
132 static int debug_to_can_accel_watchpoint_condition (CORE_ADDR, int, int,
133 struct expression *);
134
135 static void debug_to_terminal_init (void);
136
137 static void debug_to_terminal_inferior (void);
138
139 static void debug_to_terminal_ours_for_output (void);
140
141 static void debug_to_terminal_save_ours (void);
142
143 static void debug_to_terminal_ours (void);
144
145 static void debug_to_terminal_info (char *, int);
146
147 static void debug_to_load (char *, int);
148
149 static int debug_to_can_run (void);
150
151 static void debug_to_stop (ptid_t);
152
153 /* Pointer to array of target architecture structures; the size of the
154 array; the current index into the array; the allocated size of the
155 array. */
156 struct target_ops **target_structs;
157 unsigned target_struct_size;
158 unsigned target_struct_index;
159 unsigned target_struct_allocsize;
160 #define DEFAULT_ALLOCSIZE 10
161
162 /* The initial current target, so that there is always a semi-valid
163 current target. */
164
165 static struct target_ops dummy_target;
166
167 /* Top of target stack. */
168
169 static struct target_ops *target_stack;
170
171 /* The target structure we are currently using to talk to a process
172 or file or whatever "inferior" we have. */
173
174 struct target_ops current_target;
175
176 /* Command list for target. */
177
178 static struct cmd_list_element *targetlist = NULL;
179
180 /* Nonzero if we should trust readonly sections from the
181 executable when reading memory. */
182
183 static int trust_readonly = 0;
184
185 /* Nonzero if we should show true memory content including
186 memory breakpoint inserted by gdb. */
187
188 static int show_memory_breakpoints = 0;
189
190 /* These globals control whether GDB attempts to perform these
191 operations; they are useful for targets that need to prevent
192 inadvertant disruption, such as in non-stop mode. */
193
194 int may_write_registers = 1;
195
196 int may_write_memory = 1;
197
198 int may_insert_breakpoints = 1;
199
200 int may_insert_tracepoints = 1;
201
202 int may_insert_fast_tracepoints = 1;
203
204 int may_stop = 1;
205
206 /* Non-zero if we want to see trace of target level stuff. */
207
208 static unsigned int targetdebug = 0;
209 static void
210 show_targetdebug (struct ui_file *file, int from_tty,
211 struct cmd_list_element *c, const char *value)
212 {
213 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
214 }
215
216 static void setup_target_debug (void);
217
218 /* The option sets this. */
219 static int stack_cache_enabled_p_1 = 1;
220 /* And set_stack_cache_enabled_p updates this.
221 The reason for the separation is so that we don't flush the cache for
222 on->on transitions. */
223 static int stack_cache_enabled_p = 1;
224
225 /* This is called *after* the stack-cache has been set.
226 Flush the cache for off->on and on->off transitions.
227 There's no real need to flush the cache for on->off transitions,
228 except cleanliness. */
229
230 static void
231 set_stack_cache_enabled_p (char *args, int from_tty,
232 struct cmd_list_element *c)
233 {
234 if (stack_cache_enabled_p != stack_cache_enabled_p_1)
235 target_dcache_invalidate ();
236
237 stack_cache_enabled_p = stack_cache_enabled_p_1;
238 }
239
240 static void
241 show_stack_cache_enabled_p (struct ui_file *file, int from_tty,
242 struct cmd_list_element *c, const char *value)
243 {
244 fprintf_filtered (file, _("Cache use for stack accesses is %s.\n"), value);
245 }
246
247 /* Cache of memory operations, to speed up remote access. */
248 static DCACHE *target_dcache;
249
250 /* Invalidate the target dcache. */
251
252 void
253 target_dcache_invalidate (void)
254 {
255 dcache_invalidate (target_dcache);
256 }
257
258 /* The user just typed 'target' without the name of a target. */
259
260 static void
261 target_command (char *arg, int from_tty)
262 {
263 fputs_filtered ("Argument required (target name). Try `help target'\n",
264 gdb_stdout);
265 }
266
267 /* Default target_has_* methods for process_stratum targets. */
268
269 int
270 default_child_has_all_memory (struct target_ops *ops)
271 {
272 /* If no inferior selected, then we can't read memory here. */
273 if (ptid_equal (inferior_ptid, null_ptid))
274 return 0;
275
276 return 1;
277 }
278
279 int
280 default_child_has_memory (struct target_ops *ops)
281 {
282 /* If no inferior selected, then we can't read memory here. */
283 if (ptid_equal (inferior_ptid, null_ptid))
284 return 0;
285
286 return 1;
287 }
288
289 int
290 default_child_has_stack (struct target_ops *ops)
291 {
292 /* If no inferior selected, there's no stack. */
293 if (ptid_equal (inferior_ptid, null_ptid))
294 return 0;
295
296 return 1;
297 }
298
299 int
300 default_child_has_registers (struct target_ops *ops)
301 {
302 /* Can't read registers from no inferior. */
303 if (ptid_equal (inferior_ptid, null_ptid))
304 return 0;
305
306 return 1;
307 }
308
309 int
310 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
311 {
312 /* If there's no thread selected, then we can't make it run through
313 hoops. */
314 if (ptid_equal (the_ptid, null_ptid))
315 return 0;
316
317 return 1;
318 }
319
320
321 int
322 target_has_all_memory_1 (void)
323 {
324 struct target_ops *t;
325
326 for (t = current_target.beneath; t != NULL; t = t->beneath)
327 if (t->to_has_all_memory (t))
328 return 1;
329
330 return 0;
331 }
332
333 int
334 target_has_memory_1 (void)
335 {
336 struct target_ops *t;
337
338 for (t = current_target.beneath; t != NULL; t = t->beneath)
339 if (t->to_has_memory (t))
340 return 1;
341
342 return 0;
343 }
344
345 int
346 target_has_stack_1 (void)
347 {
348 struct target_ops *t;
349
350 for (t = current_target.beneath; t != NULL; t = t->beneath)
351 if (t->to_has_stack (t))
352 return 1;
353
354 return 0;
355 }
356
357 int
358 target_has_registers_1 (void)
359 {
360 struct target_ops *t;
361
362 for (t = current_target.beneath; t != NULL; t = t->beneath)
363 if (t->to_has_registers (t))
364 return 1;
365
366 return 0;
367 }
368
369 int
370 target_has_execution_1 (ptid_t the_ptid)
371 {
372 struct target_ops *t;
373
374 for (t = current_target.beneath; t != NULL; t = t->beneath)
375 if (t->to_has_execution (t, the_ptid))
376 return 1;
377
378 return 0;
379 }
380
381 int
382 target_has_execution_current (void)
383 {
384 return target_has_execution_1 (inferior_ptid);
385 }
386
387 /* Add a possible target architecture to the list. */
388
389 void
390 add_target (struct target_ops *t)
391 {
392 /* Provide default values for all "must have" methods. */
393 if (t->to_xfer_partial == NULL)
394 t->to_xfer_partial = default_xfer_partial;
395
396 if (t->to_has_all_memory == NULL)
397 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
398
399 if (t->to_has_memory == NULL)
400 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
401
402 if (t->to_has_stack == NULL)
403 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
404
405 if (t->to_has_registers == NULL)
406 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
407
408 if (t->to_has_execution == NULL)
409 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
410
411 if (!target_structs)
412 {
413 target_struct_allocsize = DEFAULT_ALLOCSIZE;
414 target_structs = (struct target_ops **) xmalloc
415 (target_struct_allocsize * sizeof (*target_structs));
416 }
417 if (target_struct_size >= target_struct_allocsize)
418 {
419 target_struct_allocsize *= 2;
420 target_structs = (struct target_ops **)
421 xrealloc ((char *) target_structs,
422 target_struct_allocsize * sizeof (*target_structs));
423 }
424 target_structs[target_struct_size++] = t;
425
426 if (targetlist == NULL)
427 add_prefix_cmd ("target", class_run, target_command, _("\
428 Connect to a target machine or process.\n\
429 The first argument is the type or protocol of the target machine.\n\
430 Remaining arguments are interpreted by the target protocol. For more\n\
431 information on the arguments for a particular protocol, type\n\
432 `help target ' followed by the protocol name."),
433 &targetlist, "target ", 0, &cmdlist);
434 add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc, &targetlist);
435 }
436
437 /* Stub functions */
438
439 void
440 target_ignore (void)
441 {
442 }
443
444 void
445 target_kill (void)
446 {
447 struct target_ops *t;
448
449 for (t = current_target.beneath; t != NULL; t = t->beneath)
450 if (t->to_kill != NULL)
451 {
452 if (targetdebug)
453 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
454
455 t->to_kill (t);
456 return;
457 }
458
459 noprocess ();
460 }
461
462 void
463 target_load (char *arg, int from_tty)
464 {
465 target_dcache_invalidate ();
466 (*current_target.to_load) (arg, from_tty);
467 }
468
469 void
470 target_create_inferior (char *exec_file, char *args,
471 char **env, int from_tty)
472 {
473 struct target_ops *t;
474
475 for (t = current_target.beneath; t != NULL; t = t->beneath)
476 {
477 if (t->to_create_inferior != NULL)
478 {
479 t->to_create_inferior (t, exec_file, args, env, from_tty);
480 if (targetdebug)
481 fprintf_unfiltered (gdb_stdlog,
482 "target_create_inferior (%s, %s, xxx, %d)\n",
483 exec_file, args, from_tty);
484 return;
485 }
486 }
487
488 internal_error (__FILE__, __LINE__,
489 _("could not find a target to create inferior"));
490 }
491
492 void
493 target_terminal_inferior (void)
494 {
495 /* A background resume (``run&'') should leave GDB in control of the
496 terminal. Use target_can_async_p, not target_is_async_p, since at
497 this point the target is not async yet. However, if sync_execution
498 is not set, we know it will become async prior to resume. */
499 if (target_can_async_p () && !sync_execution)
500 return;
501
502 /* If GDB is resuming the inferior in the foreground, install
503 inferior's terminal modes. */
504 (*current_target.to_terminal_inferior) ();
505 }
506
507 static int
508 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
509 struct target_ops *t)
510 {
511 errno = EIO; /* Can't read/write this location. */
512 return 0; /* No bytes handled. */
513 }
514
515 static void
516 tcomplain (void)
517 {
518 error (_("You can't do that when your target is `%s'"),
519 current_target.to_shortname);
520 }
521
522 void
523 noprocess (void)
524 {
525 error (_("You can't do that without a process to debug."));
526 }
527
528 static void
529 default_terminal_info (char *args, int from_tty)
530 {
531 printf_unfiltered (_("No saved terminal information.\n"));
532 }
533
534 /* A default implementation for the to_get_ada_task_ptid target method.
535
536 This function builds the PTID by using both LWP and TID as part of
537 the PTID lwp and tid elements. The pid used is the pid of the
538 inferior_ptid. */
539
540 static ptid_t
541 default_get_ada_task_ptid (long lwp, long tid)
542 {
543 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
544 }
545
546 static enum exec_direction_kind
547 default_execution_direction (void)
548 {
549 if (!target_can_execute_reverse)
550 return EXEC_FORWARD;
551 else if (!target_can_async_p ())
552 return EXEC_FORWARD;
553 else
554 gdb_assert_not_reached ("\
555 to_execution_direction must be implemented for reverse async");
556 }
557
558 /* Go through the target stack from top to bottom, copying over zero
559 entries in current_target, then filling in still empty entries. In
560 effect, we are doing class inheritance through the pushed target
561 vectors.
562
563 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
564 is currently implemented, is that it discards any knowledge of
565 which target an inherited method originally belonged to.
566 Consequently, new new target methods should instead explicitly and
567 locally search the target stack for the target that can handle the
568 request. */
569
570 static void
571 update_current_target (void)
572 {
573 struct target_ops *t;
574
575 /* First, reset current's contents. */
576 memset (&current_target, 0, sizeof (current_target));
577
578 #define INHERIT(FIELD, TARGET) \
579 if (!current_target.FIELD) \
580 current_target.FIELD = (TARGET)->FIELD
581
582 for (t = target_stack; t; t = t->beneath)
583 {
584 INHERIT (to_shortname, t);
585 INHERIT (to_longname, t);
586 INHERIT (to_doc, t);
587 /* Do not inherit to_open. */
588 /* Do not inherit to_close. */
589 /* Do not inherit to_attach. */
590 INHERIT (to_post_attach, t);
591 INHERIT (to_attach_no_wait, t);
592 /* Do not inherit to_detach. */
593 /* Do not inherit to_disconnect. */
594 /* Do not inherit to_resume. */
595 /* Do not inherit to_wait. */
596 /* Do not inherit to_fetch_registers. */
597 /* Do not inherit to_store_registers. */
598 INHERIT (to_prepare_to_store, t);
599 INHERIT (deprecated_xfer_memory, t);
600 INHERIT (to_files_info, t);
601 INHERIT (to_insert_breakpoint, t);
602 INHERIT (to_remove_breakpoint, t);
603 INHERIT (to_can_use_hw_breakpoint, t);
604 INHERIT (to_insert_hw_breakpoint, t);
605 INHERIT (to_remove_hw_breakpoint, t);
606 /* Do not inherit to_ranged_break_num_registers. */
607 INHERIT (to_insert_watchpoint, t);
608 INHERIT (to_remove_watchpoint, t);
609 /* Do not inherit to_insert_mask_watchpoint. */
610 /* Do not inherit to_remove_mask_watchpoint. */
611 INHERIT (to_stopped_data_address, t);
612 INHERIT (to_have_steppable_watchpoint, t);
613 INHERIT (to_have_continuable_watchpoint, t);
614 INHERIT (to_stopped_by_watchpoint, t);
615 INHERIT (to_watchpoint_addr_within_range, t);
616 INHERIT (to_region_ok_for_hw_watchpoint, t);
617 INHERIT (to_can_accel_watchpoint_condition, t);
618 /* Do not inherit to_masked_watch_num_registers. */
619 INHERIT (to_terminal_init, t);
620 INHERIT (to_terminal_inferior, t);
621 INHERIT (to_terminal_ours_for_output, t);
622 INHERIT (to_terminal_ours, t);
623 INHERIT (to_terminal_save_ours, t);
624 INHERIT (to_terminal_info, t);
625 /* Do not inherit to_kill. */
626 INHERIT (to_load, t);
627 /* Do no inherit to_create_inferior. */
628 INHERIT (to_post_startup_inferior, t);
629 INHERIT (to_insert_fork_catchpoint, t);
630 INHERIT (to_remove_fork_catchpoint, t);
631 INHERIT (to_insert_vfork_catchpoint, t);
632 INHERIT (to_remove_vfork_catchpoint, t);
633 /* Do not inherit to_follow_fork. */
634 INHERIT (to_insert_exec_catchpoint, t);
635 INHERIT (to_remove_exec_catchpoint, t);
636 INHERIT (to_set_syscall_catchpoint, t);
637 INHERIT (to_has_exited, t);
638 /* Do not inherit to_mourn_inferior. */
639 INHERIT (to_can_run, t);
640 /* Do not inherit to_pass_signals. */
641 /* Do not inherit to_program_signals. */
642 /* Do not inherit to_thread_alive. */
643 /* Do not inherit to_find_new_threads. */
644 /* Do not inherit to_pid_to_str. */
645 INHERIT (to_extra_thread_info, t);
646 INHERIT (to_thread_name, t);
647 INHERIT (to_stop, t);
648 /* Do not inherit to_xfer_partial. */
649 INHERIT (to_rcmd, t);
650 INHERIT (to_pid_to_exec_file, t);
651 INHERIT (to_log_command, t);
652 INHERIT (to_stratum, t);
653 /* Do not inherit to_has_all_memory. */
654 /* Do not inherit to_has_memory. */
655 /* Do not inherit to_has_stack. */
656 /* Do not inherit to_has_registers. */
657 /* Do not inherit to_has_execution. */
658 INHERIT (to_has_thread_control, t);
659 INHERIT (to_can_async_p, t);
660 INHERIT (to_is_async_p, t);
661 INHERIT (to_async, t);
662 INHERIT (to_find_memory_regions, t);
663 INHERIT (to_make_corefile_notes, t);
664 INHERIT (to_get_bookmark, t);
665 INHERIT (to_goto_bookmark, t);
666 /* Do not inherit to_get_thread_local_address. */
667 INHERIT (to_can_execute_reverse, t);
668 INHERIT (to_execution_direction, t);
669 INHERIT (to_thread_architecture, t);
670 /* Do not inherit to_read_description. */
671 INHERIT (to_get_ada_task_ptid, t);
672 /* Do not inherit to_search_memory. */
673 INHERIT (to_supports_multi_process, t);
674 INHERIT (to_supports_enable_disable_tracepoint, t);
675 INHERIT (to_supports_string_tracing, t);
676 INHERIT (to_trace_init, t);
677 INHERIT (to_download_tracepoint, t);
678 INHERIT (to_can_download_tracepoint, t);
679 INHERIT (to_download_trace_state_variable, t);
680 INHERIT (to_enable_tracepoint, t);
681 INHERIT (to_disable_tracepoint, t);
682 INHERIT (to_trace_set_readonly_regions, t);
683 INHERIT (to_trace_start, t);
684 INHERIT (to_get_trace_status, t);
685 INHERIT (to_get_tracepoint_status, t);
686 INHERIT (to_trace_stop, t);
687 INHERIT (to_trace_find, t);
688 INHERIT (to_get_trace_state_variable_value, t);
689 INHERIT (to_save_trace_data, t);
690 INHERIT (to_upload_tracepoints, t);
691 INHERIT (to_upload_trace_state_variables, t);
692 INHERIT (to_get_raw_trace_data, t);
693 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
694 INHERIT (to_set_disconnected_tracing, t);
695 INHERIT (to_set_circular_trace_buffer, t);
696 INHERIT (to_set_trace_notes, t);
697 INHERIT (to_get_tib_address, t);
698 INHERIT (to_set_permissions, t);
699 INHERIT (to_static_tracepoint_marker_at, t);
700 INHERIT (to_static_tracepoint_markers_by_strid, t);
701 INHERIT (to_traceframe_info, t);
702 INHERIT (to_use_agent, t);
703 INHERIT (to_can_use_agent, t);
704 INHERIT (to_magic, t);
705 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
706 INHERIT (to_can_run_breakpoint_commands, t);
707 /* Do not inherit to_memory_map. */
708 /* Do not inherit to_flash_erase. */
709 /* Do not inherit to_flash_done. */
710 }
711 #undef INHERIT
712
713 /* Clean up a target struct so it no longer has any zero pointers in
714 it. Some entries are defaulted to a method that print an error,
715 others are hard-wired to a standard recursive default. */
716
717 #define de_fault(field, value) \
718 if (!current_target.field) \
719 current_target.field = value
720
721 de_fault (to_open,
722 (void (*) (char *, int))
723 tcomplain);
724 de_fault (to_close,
725 (void (*) (int))
726 target_ignore);
727 de_fault (to_post_attach,
728 (void (*) (int))
729 target_ignore);
730 de_fault (to_prepare_to_store,
731 (void (*) (struct regcache *))
732 noprocess);
733 de_fault (deprecated_xfer_memory,
734 (int (*) (CORE_ADDR, gdb_byte *, int, int,
735 struct mem_attrib *, struct target_ops *))
736 nomemory);
737 de_fault (to_files_info,
738 (void (*) (struct target_ops *))
739 target_ignore);
740 de_fault (to_insert_breakpoint,
741 memory_insert_breakpoint);
742 de_fault (to_remove_breakpoint,
743 memory_remove_breakpoint);
744 de_fault (to_can_use_hw_breakpoint,
745 (int (*) (int, int, int))
746 return_zero);
747 de_fault (to_insert_hw_breakpoint,
748 (int (*) (struct gdbarch *, struct bp_target_info *))
749 return_minus_one);
750 de_fault (to_remove_hw_breakpoint,
751 (int (*) (struct gdbarch *, struct bp_target_info *))
752 return_minus_one);
753 de_fault (to_insert_watchpoint,
754 (int (*) (CORE_ADDR, int, int, struct expression *))
755 return_minus_one);
756 de_fault (to_remove_watchpoint,
757 (int (*) (CORE_ADDR, int, int, struct expression *))
758 return_minus_one);
759 de_fault (to_stopped_by_watchpoint,
760 (int (*) (void))
761 return_zero);
762 de_fault (to_stopped_data_address,
763 (int (*) (struct target_ops *, CORE_ADDR *))
764 return_zero);
765 de_fault (to_watchpoint_addr_within_range,
766 default_watchpoint_addr_within_range);
767 de_fault (to_region_ok_for_hw_watchpoint,
768 default_region_ok_for_hw_watchpoint);
769 de_fault (to_can_accel_watchpoint_condition,
770 (int (*) (CORE_ADDR, int, int, struct expression *))
771 return_zero);
772 de_fault (to_terminal_init,
773 (void (*) (void))
774 target_ignore);
775 de_fault (to_terminal_inferior,
776 (void (*) (void))
777 target_ignore);
778 de_fault (to_terminal_ours_for_output,
779 (void (*) (void))
780 target_ignore);
781 de_fault (to_terminal_ours,
782 (void (*) (void))
783 target_ignore);
784 de_fault (to_terminal_save_ours,
785 (void (*) (void))
786 target_ignore);
787 de_fault (to_terminal_info,
788 default_terminal_info);
789 de_fault (to_load,
790 (void (*) (char *, int))
791 tcomplain);
792 de_fault (to_post_startup_inferior,
793 (void (*) (ptid_t))
794 target_ignore);
795 de_fault (to_insert_fork_catchpoint,
796 (int (*) (int))
797 return_one);
798 de_fault (to_remove_fork_catchpoint,
799 (int (*) (int))
800 return_one);
801 de_fault (to_insert_vfork_catchpoint,
802 (int (*) (int))
803 return_one);
804 de_fault (to_remove_vfork_catchpoint,
805 (int (*) (int))
806 return_one);
807 de_fault (to_insert_exec_catchpoint,
808 (int (*) (int))
809 return_one);
810 de_fault (to_remove_exec_catchpoint,
811 (int (*) (int))
812 return_one);
813 de_fault (to_set_syscall_catchpoint,
814 (int (*) (int, int, int, int, int *))
815 return_one);
816 de_fault (to_has_exited,
817 (int (*) (int, int, int *))
818 return_zero);
819 de_fault (to_can_run,
820 return_zero);
821 de_fault (to_extra_thread_info,
822 (char *(*) (struct thread_info *))
823 return_zero);
824 de_fault (to_thread_name,
825 (char *(*) (struct thread_info *))
826 return_zero);
827 de_fault (to_stop,
828 (void (*) (ptid_t))
829 target_ignore);
830 current_target.to_xfer_partial = current_xfer_partial;
831 de_fault (to_rcmd,
832 (void (*) (char *, struct ui_file *))
833 tcomplain);
834 de_fault (to_pid_to_exec_file,
835 (char *(*) (int))
836 return_zero);
837 de_fault (to_async,
838 (void (*) (void (*) (enum inferior_event_type, void*), void*))
839 tcomplain);
840 de_fault (to_thread_architecture,
841 default_thread_architecture);
842 current_target.to_read_description = NULL;
843 de_fault (to_get_ada_task_ptid,
844 (ptid_t (*) (long, long))
845 default_get_ada_task_ptid);
846 de_fault (to_supports_multi_process,
847 (int (*) (void))
848 return_zero);
849 de_fault (to_supports_enable_disable_tracepoint,
850 (int (*) (void))
851 return_zero);
852 de_fault (to_supports_string_tracing,
853 (int (*) (void))
854 return_zero);
855 de_fault (to_trace_init,
856 (void (*) (void))
857 tcomplain);
858 de_fault (to_download_tracepoint,
859 (void (*) (struct bp_location *))
860 tcomplain);
861 de_fault (to_can_download_tracepoint,
862 (int (*) (void))
863 return_zero);
864 de_fault (to_download_trace_state_variable,
865 (void (*) (struct trace_state_variable *))
866 tcomplain);
867 de_fault (to_enable_tracepoint,
868 (void (*) (struct bp_location *))
869 tcomplain);
870 de_fault (to_disable_tracepoint,
871 (void (*) (struct bp_location *))
872 tcomplain);
873 de_fault (to_trace_set_readonly_regions,
874 (void (*) (void))
875 tcomplain);
876 de_fault (to_trace_start,
877 (void (*) (void))
878 tcomplain);
879 de_fault (to_get_trace_status,
880 (int (*) (struct trace_status *))
881 return_minus_one);
882 de_fault (to_get_tracepoint_status,
883 (void (*) (struct breakpoint *, struct uploaded_tp *))
884 tcomplain);
885 de_fault (to_trace_stop,
886 (void (*) (void))
887 tcomplain);
888 de_fault (to_trace_find,
889 (int (*) (enum trace_find_type, int, ULONGEST, ULONGEST, int *))
890 return_minus_one);
891 de_fault (to_get_trace_state_variable_value,
892 (int (*) (int, LONGEST *))
893 return_zero);
894 de_fault (to_save_trace_data,
895 (int (*) (const char *))
896 tcomplain);
897 de_fault (to_upload_tracepoints,
898 (int (*) (struct uploaded_tp **))
899 return_zero);
900 de_fault (to_upload_trace_state_variables,
901 (int (*) (struct uploaded_tsv **))
902 return_zero);
903 de_fault (to_get_raw_trace_data,
904 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
905 tcomplain);
906 de_fault (to_get_min_fast_tracepoint_insn_len,
907 (int (*) (void))
908 return_minus_one);
909 de_fault (to_set_disconnected_tracing,
910 (void (*) (int))
911 target_ignore);
912 de_fault (to_set_circular_trace_buffer,
913 (void (*) (int))
914 target_ignore);
915 de_fault (to_set_trace_notes,
916 (int (*) (char *, char *, char *))
917 return_zero);
918 de_fault (to_get_tib_address,
919 (int (*) (ptid_t, CORE_ADDR *))
920 tcomplain);
921 de_fault (to_set_permissions,
922 (void (*) (void))
923 target_ignore);
924 de_fault (to_static_tracepoint_marker_at,
925 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
926 return_zero);
927 de_fault (to_static_tracepoint_markers_by_strid,
928 (VEC(static_tracepoint_marker_p) * (*) (const char *))
929 tcomplain);
930 de_fault (to_traceframe_info,
931 (struct traceframe_info * (*) (void))
932 tcomplain);
933 de_fault (to_supports_evaluation_of_breakpoint_conditions,
934 (int (*) (void))
935 return_zero);
936 de_fault (to_can_run_breakpoint_commands,
937 (int (*) (void))
938 return_zero);
939 de_fault (to_use_agent,
940 (int (*) (int))
941 tcomplain);
942 de_fault (to_can_use_agent,
943 (int (*) (void))
944 return_zero);
945 de_fault (to_execution_direction, default_execution_direction);
946
947 #undef de_fault
948
949 /* Finally, position the target-stack beneath the squashed
950 "current_target". That way code looking for a non-inherited
951 target method can quickly and simply find it. */
952 current_target.beneath = target_stack;
953
954 if (targetdebug)
955 setup_target_debug ();
956 }
957
958 /* Push a new target type into the stack of the existing target accessors,
959 possibly superseding some of the existing accessors.
960
961 Rather than allow an empty stack, we always have the dummy target at
962 the bottom stratum, so we can call the function vectors without
963 checking them. */
964
965 void
966 push_target (struct target_ops *t)
967 {
968 struct target_ops **cur;
969
970 /* Check magic number. If wrong, it probably means someone changed
971 the struct definition, but not all the places that initialize one. */
972 if (t->to_magic != OPS_MAGIC)
973 {
974 fprintf_unfiltered (gdb_stderr,
975 "Magic number of %s target struct wrong\n",
976 t->to_shortname);
977 internal_error (__FILE__, __LINE__,
978 _("failed internal consistency check"));
979 }
980
981 /* Find the proper stratum to install this target in. */
982 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
983 {
984 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
985 break;
986 }
987
988 /* If there's already targets at this stratum, remove them. */
989 /* FIXME: cagney/2003-10-15: I think this should be popping all
990 targets to CUR, and not just those at this stratum level. */
991 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
992 {
993 /* There's already something at this stratum level. Close it,
994 and un-hook it from the stack. */
995 struct target_ops *tmp = (*cur);
996
997 (*cur) = (*cur)->beneath;
998 tmp->beneath = NULL;
999 target_close (tmp, 0);
1000 }
1001
1002 /* We have removed all targets in our stratum, now add the new one. */
1003 t->beneath = (*cur);
1004 (*cur) = t;
1005
1006 update_current_target ();
1007 }
1008
1009 /* Remove a target_ops vector from the stack, wherever it may be.
1010 Return how many times it was removed (0 or 1). */
1011
1012 int
1013 unpush_target (struct target_ops *t)
1014 {
1015 struct target_ops **cur;
1016 struct target_ops *tmp;
1017
1018 if (t->to_stratum == dummy_stratum)
1019 internal_error (__FILE__, __LINE__,
1020 _("Attempt to unpush the dummy target"));
1021
1022 /* Look for the specified target. Note that we assume that a target
1023 can only occur once in the target stack. */
1024
1025 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1026 {
1027 if ((*cur) == t)
1028 break;
1029 }
1030
1031 /* If we don't find target_ops, quit. Only open targets should be
1032 closed. */
1033 if ((*cur) == NULL)
1034 return 0;
1035
1036 /* Unchain the target. */
1037 tmp = (*cur);
1038 (*cur) = (*cur)->beneath;
1039 tmp->beneath = NULL;
1040
1041 update_current_target ();
1042
1043 /* Finally close the target. Note we do this after unchaining, so
1044 any target method calls from within the target_close
1045 implementation don't end up in T anymore. */
1046 target_close (t, 0);
1047
1048 return 1;
1049 }
1050
1051 void
1052 pop_target (void)
1053 {
1054 target_close (target_stack, 0); /* Let it clean up. */
1055 if (unpush_target (target_stack) == 1)
1056 return;
1057
1058 fprintf_unfiltered (gdb_stderr,
1059 "pop_target couldn't find target %s\n",
1060 current_target.to_shortname);
1061 internal_error (__FILE__, __LINE__,
1062 _("failed internal consistency check"));
1063 }
1064
1065 void
1066 pop_all_targets_above (enum strata above_stratum, int quitting)
1067 {
1068 while ((int) (current_target.to_stratum) > (int) above_stratum)
1069 {
1070 target_close (target_stack, quitting);
1071 if (!unpush_target (target_stack))
1072 {
1073 fprintf_unfiltered (gdb_stderr,
1074 "pop_all_targets couldn't find target %s\n",
1075 target_stack->to_shortname);
1076 internal_error (__FILE__, __LINE__,
1077 _("failed internal consistency check"));
1078 break;
1079 }
1080 }
1081 }
1082
1083 void
1084 pop_all_targets (int quitting)
1085 {
1086 pop_all_targets_above (dummy_stratum, quitting);
1087 }
1088
1089 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1090
1091 int
1092 target_is_pushed (struct target_ops *t)
1093 {
1094 struct target_ops **cur;
1095
1096 /* Check magic number. If wrong, it probably means someone changed
1097 the struct definition, but not all the places that initialize one. */
1098 if (t->to_magic != OPS_MAGIC)
1099 {
1100 fprintf_unfiltered (gdb_stderr,
1101 "Magic number of %s target struct wrong\n",
1102 t->to_shortname);
1103 internal_error (__FILE__, __LINE__,
1104 _("failed internal consistency check"));
1105 }
1106
1107 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1108 if (*cur == t)
1109 return 1;
1110
1111 return 0;
1112 }
1113
1114 /* Using the objfile specified in OBJFILE, find the address for the
1115 current thread's thread-local storage with offset OFFSET. */
1116 CORE_ADDR
1117 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1118 {
1119 volatile CORE_ADDR addr = 0;
1120 struct target_ops *target;
1121
1122 for (target = current_target.beneath;
1123 target != NULL;
1124 target = target->beneath)
1125 {
1126 if (target->to_get_thread_local_address != NULL)
1127 break;
1128 }
1129
1130 if (target != NULL
1131 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1132 {
1133 ptid_t ptid = inferior_ptid;
1134 volatile struct gdb_exception ex;
1135
1136 TRY_CATCH (ex, RETURN_MASK_ALL)
1137 {
1138 CORE_ADDR lm_addr;
1139
1140 /* Fetch the load module address for this objfile. */
1141 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1142 objfile);
1143 /* If it's 0, throw the appropriate exception. */
1144 if (lm_addr == 0)
1145 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1146 _("TLS load module not found"));
1147
1148 addr = target->to_get_thread_local_address (target, ptid,
1149 lm_addr, offset);
1150 }
1151 /* If an error occurred, print TLS related messages here. Otherwise,
1152 throw the error to some higher catcher. */
1153 if (ex.reason < 0)
1154 {
1155 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1156
1157 switch (ex.error)
1158 {
1159 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1160 error (_("Cannot find thread-local variables "
1161 "in this thread library."));
1162 break;
1163 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1164 if (objfile_is_library)
1165 error (_("Cannot find shared library `%s' in dynamic"
1166 " linker's load module list"), objfile->name);
1167 else
1168 error (_("Cannot find executable file `%s' in dynamic"
1169 " linker's load module list"), objfile->name);
1170 break;
1171 case TLS_NOT_ALLOCATED_YET_ERROR:
1172 if (objfile_is_library)
1173 error (_("The inferior has not yet allocated storage for"
1174 " thread-local variables in\n"
1175 "the shared library `%s'\n"
1176 "for %s"),
1177 objfile->name, target_pid_to_str (ptid));
1178 else
1179 error (_("The inferior has not yet allocated storage for"
1180 " thread-local variables in\n"
1181 "the executable `%s'\n"
1182 "for %s"),
1183 objfile->name, target_pid_to_str (ptid));
1184 break;
1185 case TLS_GENERIC_ERROR:
1186 if (objfile_is_library)
1187 error (_("Cannot find thread-local storage for %s, "
1188 "shared library %s:\n%s"),
1189 target_pid_to_str (ptid),
1190 objfile->name, ex.message);
1191 else
1192 error (_("Cannot find thread-local storage for %s, "
1193 "executable file %s:\n%s"),
1194 target_pid_to_str (ptid),
1195 objfile->name, ex.message);
1196 break;
1197 default:
1198 throw_exception (ex);
1199 break;
1200 }
1201 }
1202 }
1203 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1204 TLS is an ABI-specific thing. But we don't do that yet. */
1205 else
1206 error (_("Cannot find thread-local variables on this target"));
1207
1208 return addr;
1209 }
1210
1211 #undef MIN
1212 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1213
1214 /* target_read_string -- read a null terminated string, up to LEN bytes,
1215 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1216 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1217 is responsible for freeing it. Return the number of bytes successfully
1218 read. */
1219
1220 int
1221 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1222 {
1223 int tlen, origlen, offset, i;
1224 gdb_byte buf[4];
1225 int errcode = 0;
1226 char *buffer;
1227 int buffer_allocated;
1228 char *bufptr;
1229 unsigned int nbytes_read = 0;
1230
1231 gdb_assert (string);
1232
1233 /* Small for testing. */
1234 buffer_allocated = 4;
1235 buffer = xmalloc (buffer_allocated);
1236 bufptr = buffer;
1237
1238 origlen = len;
1239
1240 while (len > 0)
1241 {
1242 tlen = MIN (len, 4 - (memaddr & 3));
1243 offset = memaddr & 3;
1244
1245 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1246 if (errcode != 0)
1247 {
1248 /* The transfer request might have crossed the boundary to an
1249 unallocated region of memory. Retry the transfer, requesting
1250 a single byte. */
1251 tlen = 1;
1252 offset = 0;
1253 errcode = target_read_memory (memaddr, buf, 1);
1254 if (errcode != 0)
1255 goto done;
1256 }
1257
1258 if (bufptr - buffer + tlen > buffer_allocated)
1259 {
1260 unsigned int bytes;
1261
1262 bytes = bufptr - buffer;
1263 buffer_allocated *= 2;
1264 buffer = xrealloc (buffer, buffer_allocated);
1265 bufptr = buffer + bytes;
1266 }
1267
1268 for (i = 0; i < tlen; i++)
1269 {
1270 *bufptr++ = buf[i + offset];
1271 if (buf[i + offset] == '\000')
1272 {
1273 nbytes_read += i + 1;
1274 goto done;
1275 }
1276 }
1277
1278 memaddr += tlen;
1279 len -= tlen;
1280 nbytes_read += tlen;
1281 }
1282 done:
1283 *string = buffer;
1284 if (errnop != NULL)
1285 *errnop = errcode;
1286 return nbytes_read;
1287 }
1288
1289 struct target_section_table *
1290 target_get_section_table (struct target_ops *target)
1291 {
1292 struct target_ops *t;
1293
1294 if (targetdebug)
1295 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1296
1297 for (t = target; t != NULL; t = t->beneath)
1298 if (t->to_get_section_table != NULL)
1299 return (*t->to_get_section_table) (t);
1300
1301 return NULL;
1302 }
1303
1304 /* Find a section containing ADDR. */
1305
1306 struct target_section *
1307 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1308 {
1309 struct target_section_table *table = target_get_section_table (target);
1310 struct target_section *secp;
1311
1312 if (table == NULL)
1313 return NULL;
1314
1315 for (secp = table->sections; secp < table->sections_end; secp++)
1316 {
1317 if (addr >= secp->addr && addr < secp->endaddr)
1318 return secp;
1319 }
1320 return NULL;
1321 }
1322
1323 /* Read memory from the live target, even if currently inspecting a
1324 traceframe. The return is the same as that of target_read. */
1325
1326 static LONGEST
1327 target_read_live_memory (enum target_object object,
1328 ULONGEST memaddr, gdb_byte *myaddr, LONGEST len)
1329 {
1330 int ret;
1331 struct cleanup *cleanup;
1332
1333 /* Switch momentarily out of tfind mode so to access live memory.
1334 Note that this must not clear global state, such as the frame
1335 cache, which must still remain valid for the previous traceframe.
1336 We may be _building_ the frame cache at this point. */
1337 cleanup = make_cleanup_restore_traceframe_number ();
1338 set_traceframe_number (-1);
1339
1340 ret = target_read (current_target.beneath, object, NULL,
1341 myaddr, memaddr, len);
1342
1343 do_cleanups (cleanup);
1344 return ret;
1345 }
1346
1347 /* Using the set of read-only target sections of OPS, read live
1348 read-only memory. Note that the actual reads start from the
1349 top-most target again.
1350
1351 For interface/parameters/return description see target.h,
1352 to_xfer_partial. */
1353
1354 static LONGEST
1355 memory_xfer_live_readonly_partial (struct target_ops *ops,
1356 enum target_object object,
1357 gdb_byte *readbuf, ULONGEST memaddr,
1358 LONGEST len)
1359 {
1360 struct target_section *secp;
1361 struct target_section_table *table;
1362
1363 secp = target_section_by_addr (ops, memaddr);
1364 if (secp != NULL
1365 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1366 & SEC_READONLY))
1367 {
1368 struct target_section *p;
1369 ULONGEST memend = memaddr + len;
1370
1371 table = target_get_section_table (ops);
1372
1373 for (p = table->sections; p < table->sections_end; p++)
1374 {
1375 if (memaddr >= p->addr)
1376 {
1377 if (memend <= p->endaddr)
1378 {
1379 /* Entire transfer is within this section. */
1380 return target_read_live_memory (object, memaddr,
1381 readbuf, len);
1382 }
1383 else if (memaddr >= p->endaddr)
1384 {
1385 /* This section ends before the transfer starts. */
1386 continue;
1387 }
1388 else
1389 {
1390 /* This section overlaps the transfer. Just do half. */
1391 len = p->endaddr - memaddr;
1392 return target_read_live_memory (object, memaddr,
1393 readbuf, len);
1394 }
1395 }
1396 }
1397 }
1398
1399 return 0;
1400 }
1401
1402 /* Perform a partial memory transfer.
1403 For docs see target.h, to_xfer_partial. */
1404
1405 static LONGEST
1406 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1407 void *readbuf, const void *writebuf, ULONGEST memaddr,
1408 LONGEST len)
1409 {
1410 LONGEST res;
1411 int reg_len;
1412 struct mem_region *region;
1413 struct inferior *inf;
1414
1415 /* For accesses to unmapped overlay sections, read directly from
1416 files. Must do this first, as MEMADDR may need adjustment. */
1417 if (readbuf != NULL && overlay_debugging)
1418 {
1419 struct obj_section *section = find_pc_overlay (memaddr);
1420
1421 if (pc_in_unmapped_range (memaddr, section))
1422 {
1423 struct target_section_table *table
1424 = target_get_section_table (ops);
1425 const char *section_name = section->the_bfd_section->name;
1426
1427 memaddr = overlay_mapped_address (memaddr, section);
1428 return section_table_xfer_memory_partial (readbuf, writebuf,
1429 memaddr, len,
1430 table->sections,
1431 table->sections_end,
1432 section_name);
1433 }
1434 }
1435
1436 /* Try the executable files, if "trust-readonly-sections" is set. */
1437 if (readbuf != NULL && trust_readonly)
1438 {
1439 struct target_section *secp;
1440 struct target_section_table *table;
1441
1442 secp = target_section_by_addr (ops, memaddr);
1443 if (secp != NULL
1444 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1445 & SEC_READONLY))
1446 {
1447 table = target_get_section_table (ops);
1448 return section_table_xfer_memory_partial (readbuf, writebuf,
1449 memaddr, len,
1450 table->sections,
1451 table->sections_end,
1452 NULL);
1453 }
1454 }
1455
1456 /* If reading unavailable memory in the context of traceframes, and
1457 this address falls within a read-only section, fallback to
1458 reading from live memory. */
1459 if (readbuf != NULL && get_traceframe_number () != -1)
1460 {
1461 VEC(mem_range_s) *available;
1462
1463 /* If we fail to get the set of available memory, then the
1464 target does not support querying traceframe info, and so we
1465 attempt reading from the traceframe anyway (assuming the
1466 target implements the old QTro packet then). */
1467 if (traceframe_available_memory (&available, memaddr, len))
1468 {
1469 struct cleanup *old_chain;
1470
1471 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1472
1473 if (VEC_empty (mem_range_s, available)
1474 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1475 {
1476 /* Don't read into the traceframe's available
1477 memory. */
1478 if (!VEC_empty (mem_range_s, available))
1479 {
1480 LONGEST oldlen = len;
1481
1482 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1483 gdb_assert (len <= oldlen);
1484 }
1485
1486 do_cleanups (old_chain);
1487
1488 /* This goes through the topmost target again. */
1489 res = memory_xfer_live_readonly_partial (ops, object,
1490 readbuf, memaddr, len);
1491 if (res > 0)
1492 return res;
1493
1494 /* No use trying further, we know some memory starting
1495 at MEMADDR isn't available. */
1496 return -1;
1497 }
1498
1499 /* Don't try to read more than how much is available, in
1500 case the target implements the deprecated QTro packet to
1501 cater for older GDBs (the target's knowledge of read-only
1502 sections may be outdated by now). */
1503 len = VEC_index (mem_range_s, available, 0)->length;
1504
1505 do_cleanups (old_chain);
1506 }
1507 }
1508
1509 /* Try GDB's internal data cache. */
1510 region = lookup_mem_region (memaddr);
1511 /* region->hi == 0 means there's no upper bound. */
1512 if (memaddr + len < region->hi || region->hi == 0)
1513 reg_len = len;
1514 else
1515 reg_len = region->hi - memaddr;
1516
1517 switch (region->attrib.mode)
1518 {
1519 case MEM_RO:
1520 if (writebuf != NULL)
1521 return -1;
1522 break;
1523
1524 case MEM_WO:
1525 if (readbuf != NULL)
1526 return -1;
1527 break;
1528
1529 case MEM_FLASH:
1530 /* We only support writing to flash during "load" for now. */
1531 if (writebuf != NULL)
1532 error (_("Writing to flash memory forbidden in this context"));
1533 break;
1534
1535 case MEM_NONE:
1536 return -1;
1537 }
1538
1539 if (!ptid_equal (inferior_ptid, null_ptid))
1540 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1541 else
1542 inf = NULL;
1543
1544 if (inf != NULL
1545 /* The dcache reads whole cache lines; that doesn't play well
1546 with reading from a trace buffer, because reading outside of
1547 the collected memory range fails. */
1548 && get_traceframe_number () == -1
1549 && (region->attrib.cache
1550 || (stack_cache_enabled_p && object == TARGET_OBJECT_STACK_MEMORY)))
1551 {
1552 if (readbuf != NULL)
1553 res = dcache_xfer_memory (ops, target_dcache, memaddr, readbuf,
1554 reg_len, 0);
1555 else
1556 /* FIXME drow/2006-08-09: If we're going to preserve const
1557 correctness dcache_xfer_memory should take readbuf and
1558 writebuf. */
1559 res = dcache_xfer_memory (ops, target_dcache, memaddr,
1560 (void *) writebuf,
1561 reg_len, 1);
1562 if (res <= 0)
1563 return -1;
1564 else
1565 return res;
1566 }
1567
1568 /* If none of those methods found the memory we wanted, fall back
1569 to a target partial transfer. Normally a single call to
1570 to_xfer_partial is enough; if it doesn't recognize an object
1571 it will call the to_xfer_partial of the next target down.
1572 But for memory this won't do. Memory is the only target
1573 object which can be read from more than one valid target.
1574 A core file, for instance, could have some of memory but
1575 delegate other bits to the target below it. So, we must
1576 manually try all targets. */
1577
1578 do
1579 {
1580 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1581 readbuf, writebuf, memaddr, reg_len);
1582 if (res > 0)
1583 break;
1584
1585 /* We want to continue past core files to executables, but not
1586 past a running target's memory. */
1587 if (ops->to_has_all_memory (ops))
1588 break;
1589
1590 ops = ops->beneath;
1591 }
1592 while (ops != NULL);
1593
1594 /* Make sure the cache gets updated no matter what - if we are writing
1595 to the stack. Even if this write is not tagged as such, we still need
1596 to update the cache. */
1597
1598 if (res > 0
1599 && inf != NULL
1600 && writebuf != NULL
1601 && !region->attrib.cache
1602 && stack_cache_enabled_p
1603 && object != TARGET_OBJECT_STACK_MEMORY)
1604 {
1605 dcache_update (target_dcache, memaddr, (void *) writebuf, res);
1606 }
1607
1608 /* If we still haven't got anything, return the last error. We
1609 give up. */
1610 return res;
1611 }
1612
1613 /* Perform a partial memory transfer. For docs see target.h,
1614 to_xfer_partial. */
1615
1616 static LONGEST
1617 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1618 void *readbuf, const void *writebuf, ULONGEST memaddr,
1619 LONGEST len)
1620 {
1621 int res;
1622
1623 /* Zero length requests are ok and require no work. */
1624 if (len == 0)
1625 return 0;
1626
1627 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1628 breakpoint insns, thus hiding out from higher layers whether
1629 there are software breakpoints inserted in the code stream. */
1630 if (readbuf != NULL)
1631 {
1632 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len);
1633
1634 if (res > 0 && !show_memory_breakpoints)
1635 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1636 }
1637 else
1638 {
1639 void *buf;
1640 struct cleanup *old_chain;
1641
1642 buf = xmalloc (len);
1643 old_chain = make_cleanup (xfree, buf);
1644 memcpy (buf, writebuf, len);
1645
1646 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1647 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len);
1648
1649 do_cleanups (old_chain);
1650 }
1651
1652 return res;
1653 }
1654
1655 static void
1656 restore_show_memory_breakpoints (void *arg)
1657 {
1658 show_memory_breakpoints = (uintptr_t) arg;
1659 }
1660
1661 struct cleanup *
1662 make_show_memory_breakpoints_cleanup (int show)
1663 {
1664 int current = show_memory_breakpoints;
1665
1666 show_memory_breakpoints = show;
1667 return make_cleanup (restore_show_memory_breakpoints,
1668 (void *) (uintptr_t) current);
1669 }
1670
1671 /* For docs see target.h, to_xfer_partial. */
1672
1673 static LONGEST
1674 target_xfer_partial (struct target_ops *ops,
1675 enum target_object object, const char *annex,
1676 void *readbuf, const void *writebuf,
1677 ULONGEST offset, LONGEST len)
1678 {
1679 LONGEST retval;
1680
1681 gdb_assert (ops->to_xfer_partial != NULL);
1682
1683 if (writebuf && !may_write_memory)
1684 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1685 core_addr_to_string_nz (offset), plongest (len));
1686
1687 /* If this is a memory transfer, let the memory-specific code
1688 have a look at it instead. Memory transfers are more
1689 complicated. */
1690 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY)
1691 retval = memory_xfer_partial (ops, object, readbuf,
1692 writebuf, offset, len);
1693 else
1694 {
1695 enum target_object raw_object = object;
1696
1697 /* If this is a raw memory transfer, request the normal
1698 memory object from other layers. */
1699 if (raw_object == TARGET_OBJECT_RAW_MEMORY)
1700 raw_object = TARGET_OBJECT_MEMORY;
1701
1702 retval = ops->to_xfer_partial (ops, raw_object, annex, readbuf,
1703 writebuf, offset, len);
1704 }
1705
1706 if (targetdebug)
1707 {
1708 const unsigned char *myaddr = NULL;
1709
1710 fprintf_unfiltered (gdb_stdlog,
1711 "%s:target_xfer_partial "
1712 "(%d, %s, %s, %s, %s, %s) = %s",
1713 ops->to_shortname,
1714 (int) object,
1715 (annex ? annex : "(null)"),
1716 host_address_to_string (readbuf),
1717 host_address_to_string (writebuf),
1718 core_addr_to_string_nz (offset),
1719 plongest (len), plongest (retval));
1720
1721 if (readbuf)
1722 myaddr = readbuf;
1723 if (writebuf)
1724 myaddr = writebuf;
1725 if (retval > 0 && myaddr != NULL)
1726 {
1727 int i;
1728
1729 fputs_unfiltered (", bytes =", gdb_stdlog);
1730 for (i = 0; i < retval; i++)
1731 {
1732 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1733 {
1734 if (targetdebug < 2 && i > 0)
1735 {
1736 fprintf_unfiltered (gdb_stdlog, " ...");
1737 break;
1738 }
1739 fprintf_unfiltered (gdb_stdlog, "\n");
1740 }
1741
1742 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1743 }
1744 }
1745
1746 fputc_unfiltered ('\n', gdb_stdlog);
1747 }
1748 return retval;
1749 }
1750
1751 /* Read LEN bytes of target memory at address MEMADDR, placing the results in
1752 GDB's memory at MYADDR. Returns either 0 for success or an errno value
1753 if any error occurs.
1754
1755 If an error occurs, no guarantee is made about the contents of the data at
1756 MYADDR. In particular, the caller should not depend upon partial reads
1757 filling the buffer with good data. There is no way for the caller to know
1758 how much good data might have been transfered anyway. Callers that can
1759 deal with partial reads should call target_read (which will retry until
1760 it makes no progress, and then return how much was transferred). */
1761
1762 int
1763 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1764 {
1765 /* Dispatch to the topmost target, not the flattened current_target.
1766 Memory accesses check target->to_has_(all_)memory, and the
1767 flattened target doesn't inherit those. */
1768 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1769 myaddr, memaddr, len) == len)
1770 return 0;
1771 else
1772 return EIO;
1773 }
1774
1775 /* Like target_read_memory, but specify explicitly that this is a read from
1776 the target's stack. This may trigger different cache behavior. */
1777
1778 int
1779 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1780 {
1781 /* Dispatch to the topmost target, not the flattened current_target.
1782 Memory accesses check target->to_has_(all_)memory, and the
1783 flattened target doesn't inherit those. */
1784
1785 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1786 myaddr, memaddr, len) == len)
1787 return 0;
1788 else
1789 return EIO;
1790 }
1791
1792 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1793 Returns either 0 for success or an errno value if any error occurs.
1794 If an error occurs, no guarantee is made about how much data got written.
1795 Callers that can deal with partial writes should call target_write. */
1796
1797 int
1798 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1799 {
1800 /* Dispatch to the topmost target, not the flattened current_target.
1801 Memory accesses check target->to_has_(all_)memory, and the
1802 flattened target doesn't inherit those. */
1803 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1804 myaddr, memaddr, len) == len)
1805 return 0;
1806 else
1807 return EIO;
1808 }
1809
1810 /* Write LEN bytes from MYADDR to target raw memory at address
1811 MEMADDR. Returns either 0 for success or an errno value if any
1812 error occurs. If an error occurs, no guarantee is made about how
1813 much data got written. Callers that can deal with partial writes
1814 should call target_write. */
1815
1816 int
1817 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1818 {
1819 /* Dispatch to the topmost target, not the flattened current_target.
1820 Memory accesses check target->to_has_(all_)memory, and the
1821 flattened target doesn't inherit those. */
1822 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1823 myaddr, memaddr, len) == len)
1824 return 0;
1825 else
1826 return EIO;
1827 }
1828
1829 /* Fetch the target's memory map. */
1830
1831 VEC(mem_region_s) *
1832 target_memory_map (void)
1833 {
1834 VEC(mem_region_s) *result;
1835 struct mem_region *last_one, *this_one;
1836 int ix;
1837 struct target_ops *t;
1838
1839 if (targetdebug)
1840 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1841
1842 for (t = current_target.beneath; t != NULL; t = t->beneath)
1843 if (t->to_memory_map != NULL)
1844 break;
1845
1846 if (t == NULL)
1847 return NULL;
1848
1849 result = t->to_memory_map (t);
1850 if (result == NULL)
1851 return NULL;
1852
1853 qsort (VEC_address (mem_region_s, result),
1854 VEC_length (mem_region_s, result),
1855 sizeof (struct mem_region), mem_region_cmp);
1856
1857 /* Check that regions do not overlap. Simultaneously assign
1858 a numbering for the "mem" commands to use to refer to
1859 each region. */
1860 last_one = NULL;
1861 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1862 {
1863 this_one->number = ix;
1864
1865 if (last_one && last_one->hi > this_one->lo)
1866 {
1867 warning (_("Overlapping regions in memory map: ignoring"));
1868 VEC_free (mem_region_s, result);
1869 return NULL;
1870 }
1871 last_one = this_one;
1872 }
1873
1874 return result;
1875 }
1876
1877 void
1878 target_flash_erase (ULONGEST address, LONGEST length)
1879 {
1880 struct target_ops *t;
1881
1882 for (t = current_target.beneath; t != NULL; t = t->beneath)
1883 if (t->to_flash_erase != NULL)
1884 {
1885 if (targetdebug)
1886 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1887 hex_string (address), phex (length, 0));
1888 t->to_flash_erase (t, address, length);
1889 return;
1890 }
1891
1892 tcomplain ();
1893 }
1894
1895 void
1896 target_flash_done (void)
1897 {
1898 struct target_ops *t;
1899
1900 for (t = current_target.beneath; t != NULL; t = t->beneath)
1901 if (t->to_flash_done != NULL)
1902 {
1903 if (targetdebug)
1904 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1905 t->to_flash_done (t);
1906 return;
1907 }
1908
1909 tcomplain ();
1910 }
1911
1912 static void
1913 show_trust_readonly (struct ui_file *file, int from_tty,
1914 struct cmd_list_element *c, const char *value)
1915 {
1916 fprintf_filtered (file,
1917 _("Mode for reading from readonly sections is %s.\n"),
1918 value);
1919 }
1920
1921 /* More generic transfers. */
1922
1923 static LONGEST
1924 default_xfer_partial (struct target_ops *ops, enum target_object object,
1925 const char *annex, gdb_byte *readbuf,
1926 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1927 {
1928 if (object == TARGET_OBJECT_MEMORY
1929 && ops->deprecated_xfer_memory != NULL)
1930 /* If available, fall back to the target's
1931 "deprecated_xfer_memory" method. */
1932 {
1933 int xfered = -1;
1934
1935 errno = 0;
1936 if (writebuf != NULL)
1937 {
1938 void *buffer = xmalloc (len);
1939 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1940
1941 memcpy (buffer, writebuf, len);
1942 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1943 1/*write*/, NULL, ops);
1944 do_cleanups (cleanup);
1945 }
1946 if (readbuf != NULL)
1947 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1948 0/*read*/, NULL, ops);
1949 if (xfered > 0)
1950 return xfered;
1951 else if (xfered == 0 && errno == 0)
1952 /* "deprecated_xfer_memory" uses 0, cross checked against
1953 ERRNO as one indication of an error. */
1954 return 0;
1955 else
1956 return -1;
1957 }
1958 else if (ops->beneath != NULL)
1959 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1960 readbuf, writebuf, offset, len);
1961 else
1962 return -1;
1963 }
1964
1965 /* The xfer_partial handler for the topmost target. Unlike the default,
1966 it does not need to handle memory specially; it just passes all
1967 requests down the stack. */
1968
1969 static LONGEST
1970 current_xfer_partial (struct target_ops *ops, enum target_object object,
1971 const char *annex, gdb_byte *readbuf,
1972 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1973 {
1974 if (ops->beneath != NULL)
1975 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1976 readbuf, writebuf, offset, len);
1977 else
1978 return -1;
1979 }
1980
1981 /* Target vector read/write partial wrapper functions. */
1982
1983 static LONGEST
1984 target_read_partial (struct target_ops *ops,
1985 enum target_object object,
1986 const char *annex, gdb_byte *buf,
1987 ULONGEST offset, LONGEST len)
1988 {
1989 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
1990 }
1991
1992 static LONGEST
1993 target_write_partial (struct target_ops *ops,
1994 enum target_object object,
1995 const char *annex, const gdb_byte *buf,
1996 ULONGEST offset, LONGEST len)
1997 {
1998 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
1999 }
2000
2001 /* Wrappers to perform the full transfer. */
2002
2003 /* For docs on target_read see target.h. */
2004
2005 LONGEST
2006 target_read (struct target_ops *ops,
2007 enum target_object object,
2008 const char *annex, gdb_byte *buf,
2009 ULONGEST offset, LONGEST len)
2010 {
2011 LONGEST xfered = 0;
2012
2013 while (xfered < len)
2014 {
2015 LONGEST xfer = target_read_partial (ops, object, annex,
2016 (gdb_byte *) buf + xfered,
2017 offset + xfered, len - xfered);
2018
2019 /* Call an observer, notifying them of the xfer progress? */
2020 if (xfer == 0)
2021 return xfered;
2022 if (xfer < 0)
2023 return -1;
2024 xfered += xfer;
2025 QUIT;
2026 }
2027 return len;
2028 }
2029
2030 /* Assuming that the entire [begin, end) range of memory cannot be
2031 read, try to read whatever subrange is possible to read.
2032
2033 The function returns, in RESULT, either zero or one memory block.
2034 If there's a readable subrange at the beginning, it is completely
2035 read and returned. Any further readable subrange will not be read.
2036 Otherwise, if there's a readable subrange at the end, it will be
2037 completely read and returned. Any readable subranges before it
2038 (obviously, not starting at the beginning), will be ignored. In
2039 other cases -- either no readable subrange, or readable subrange(s)
2040 that is neither at the beginning, or end, nothing is returned.
2041
2042 The purpose of this function is to handle a read across a boundary
2043 of accessible memory in a case when memory map is not available.
2044 The above restrictions are fine for this case, but will give
2045 incorrect results if the memory is 'patchy'. However, supporting
2046 'patchy' memory would require trying to read every single byte,
2047 and it seems unacceptable solution. Explicit memory map is
2048 recommended for this case -- and target_read_memory_robust will
2049 take care of reading multiple ranges then. */
2050
2051 static void
2052 read_whatever_is_readable (struct target_ops *ops,
2053 ULONGEST begin, ULONGEST end,
2054 VEC(memory_read_result_s) **result)
2055 {
2056 gdb_byte *buf = xmalloc (end - begin);
2057 ULONGEST current_begin = begin;
2058 ULONGEST current_end = end;
2059 int forward;
2060 memory_read_result_s r;
2061
2062 /* If we previously failed to read 1 byte, nothing can be done here. */
2063 if (end - begin <= 1)
2064 {
2065 xfree (buf);
2066 return;
2067 }
2068
2069 /* Check that either first or the last byte is readable, and give up
2070 if not. This heuristic is meant to permit reading accessible memory
2071 at the boundary of accessible region. */
2072 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2073 buf, begin, 1) == 1)
2074 {
2075 forward = 1;
2076 ++current_begin;
2077 }
2078 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2079 buf + (end-begin) - 1, end - 1, 1) == 1)
2080 {
2081 forward = 0;
2082 --current_end;
2083 }
2084 else
2085 {
2086 xfree (buf);
2087 return;
2088 }
2089
2090 /* Loop invariant is that the [current_begin, current_end) was previously
2091 found to be not readable as a whole.
2092
2093 Note loop condition -- if the range has 1 byte, we can't divide the range
2094 so there's no point trying further. */
2095 while (current_end - current_begin > 1)
2096 {
2097 ULONGEST first_half_begin, first_half_end;
2098 ULONGEST second_half_begin, second_half_end;
2099 LONGEST xfer;
2100 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2101
2102 if (forward)
2103 {
2104 first_half_begin = current_begin;
2105 first_half_end = middle;
2106 second_half_begin = middle;
2107 second_half_end = current_end;
2108 }
2109 else
2110 {
2111 first_half_begin = middle;
2112 first_half_end = current_end;
2113 second_half_begin = current_begin;
2114 second_half_end = middle;
2115 }
2116
2117 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2118 buf + (first_half_begin - begin),
2119 first_half_begin,
2120 first_half_end - first_half_begin);
2121
2122 if (xfer == first_half_end - first_half_begin)
2123 {
2124 /* This half reads up fine. So, the error must be in the
2125 other half. */
2126 current_begin = second_half_begin;
2127 current_end = second_half_end;
2128 }
2129 else
2130 {
2131 /* This half is not readable. Because we've tried one byte, we
2132 know some part of this half if actually redable. Go to the next
2133 iteration to divide again and try to read.
2134
2135 We don't handle the other half, because this function only tries
2136 to read a single readable subrange. */
2137 current_begin = first_half_begin;
2138 current_end = first_half_end;
2139 }
2140 }
2141
2142 if (forward)
2143 {
2144 /* The [begin, current_begin) range has been read. */
2145 r.begin = begin;
2146 r.end = current_begin;
2147 r.data = buf;
2148 }
2149 else
2150 {
2151 /* The [current_end, end) range has been read. */
2152 LONGEST rlen = end - current_end;
2153
2154 r.data = xmalloc (rlen);
2155 memcpy (r.data, buf + current_end - begin, rlen);
2156 r.begin = current_end;
2157 r.end = end;
2158 xfree (buf);
2159 }
2160 VEC_safe_push(memory_read_result_s, (*result), &r);
2161 }
2162
2163 void
2164 free_memory_read_result_vector (void *x)
2165 {
2166 VEC(memory_read_result_s) *v = x;
2167 memory_read_result_s *current;
2168 int ix;
2169
2170 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2171 {
2172 xfree (current->data);
2173 }
2174 VEC_free (memory_read_result_s, v);
2175 }
2176
2177 VEC(memory_read_result_s) *
2178 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2179 {
2180 VEC(memory_read_result_s) *result = 0;
2181
2182 LONGEST xfered = 0;
2183 while (xfered < len)
2184 {
2185 struct mem_region *region = lookup_mem_region (offset + xfered);
2186 LONGEST rlen;
2187
2188 /* If there is no explicit region, a fake one should be created. */
2189 gdb_assert (region);
2190
2191 if (region->hi == 0)
2192 rlen = len - xfered;
2193 else
2194 rlen = region->hi - offset;
2195
2196 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2197 {
2198 /* Cannot read this region. Note that we can end up here only
2199 if the region is explicitly marked inaccessible, or
2200 'inaccessible-by-default' is in effect. */
2201 xfered += rlen;
2202 }
2203 else
2204 {
2205 LONGEST to_read = min (len - xfered, rlen);
2206 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2207
2208 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2209 (gdb_byte *) buffer,
2210 offset + xfered, to_read);
2211 /* Call an observer, notifying them of the xfer progress? */
2212 if (xfer <= 0)
2213 {
2214 /* Got an error reading full chunk. See if maybe we can read
2215 some subrange. */
2216 xfree (buffer);
2217 read_whatever_is_readable (ops, offset + xfered,
2218 offset + xfered + to_read, &result);
2219 xfered += to_read;
2220 }
2221 else
2222 {
2223 struct memory_read_result r;
2224 r.data = buffer;
2225 r.begin = offset + xfered;
2226 r.end = r.begin + xfer;
2227 VEC_safe_push (memory_read_result_s, result, &r);
2228 xfered += xfer;
2229 }
2230 QUIT;
2231 }
2232 }
2233 return result;
2234 }
2235
2236
2237 /* An alternative to target_write with progress callbacks. */
2238
2239 LONGEST
2240 target_write_with_progress (struct target_ops *ops,
2241 enum target_object object,
2242 const char *annex, const gdb_byte *buf,
2243 ULONGEST offset, LONGEST len,
2244 void (*progress) (ULONGEST, void *), void *baton)
2245 {
2246 LONGEST xfered = 0;
2247
2248 /* Give the progress callback a chance to set up. */
2249 if (progress)
2250 (*progress) (0, baton);
2251
2252 while (xfered < len)
2253 {
2254 LONGEST xfer = target_write_partial (ops, object, annex,
2255 (gdb_byte *) buf + xfered,
2256 offset + xfered, len - xfered);
2257
2258 if (xfer == 0)
2259 return xfered;
2260 if (xfer < 0)
2261 return -1;
2262
2263 if (progress)
2264 (*progress) (xfer, baton);
2265
2266 xfered += xfer;
2267 QUIT;
2268 }
2269 return len;
2270 }
2271
2272 /* For docs on target_write see target.h. */
2273
2274 LONGEST
2275 target_write (struct target_ops *ops,
2276 enum target_object object,
2277 const char *annex, const gdb_byte *buf,
2278 ULONGEST offset, LONGEST len)
2279 {
2280 return target_write_with_progress (ops, object, annex, buf, offset, len,
2281 NULL, NULL);
2282 }
2283
2284 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2285 the size of the transferred data. PADDING additional bytes are
2286 available in *BUF_P. This is a helper function for
2287 target_read_alloc; see the declaration of that function for more
2288 information. */
2289
2290 static LONGEST
2291 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2292 const char *annex, gdb_byte **buf_p, int padding)
2293 {
2294 size_t buf_alloc, buf_pos;
2295 gdb_byte *buf;
2296 LONGEST n;
2297
2298 /* This function does not have a length parameter; it reads the
2299 entire OBJECT). Also, it doesn't support objects fetched partly
2300 from one target and partly from another (in a different stratum,
2301 e.g. a core file and an executable). Both reasons make it
2302 unsuitable for reading memory. */
2303 gdb_assert (object != TARGET_OBJECT_MEMORY);
2304
2305 /* Start by reading up to 4K at a time. The target will throttle
2306 this number down if necessary. */
2307 buf_alloc = 4096;
2308 buf = xmalloc (buf_alloc);
2309 buf_pos = 0;
2310 while (1)
2311 {
2312 n = target_read_partial (ops, object, annex, &buf[buf_pos],
2313 buf_pos, buf_alloc - buf_pos - padding);
2314 if (n < 0)
2315 {
2316 /* An error occurred. */
2317 xfree (buf);
2318 return -1;
2319 }
2320 else if (n == 0)
2321 {
2322 /* Read all there was. */
2323 if (buf_pos == 0)
2324 xfree (buf);
2325 else
2326 *buf_p = buf;
2327 return buf_pos;
2328 }
2329
2330 buf_pos += n;
2331
2332 /* If the buffer is filling up, expand it. */
2333 if (buf_alloc < buf_pos * 2)
2334 {
2335 buf_alloc *= 2;
2336 buf = xrealloc (buf, buf_alloc);
2337 }
2338
2339 QUIT;
2340 }
2341 }
2342
2343 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2344 the size of the transferred data. See the declaration in "target.h"
2345 function for more information about the return value. */
2346
2347 LONGEST
2348 target_read_alloc (struct target_ops *ops, enum target_object object,
2349 const char *annex, gdb_byte **buf_p)
2350 {
2351 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2352 }
2353
2354 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2355 returned as a string, allocated using xmalloc. If an error occurs
2356 or the transfer is unsupported, NULL is returned. Empty objects
2357 are returned as allocated but empty strings. A warning is issued
2358 if the result contains any embedded NUL bytes. */
2359
2360 char *
2361 target_read_stralloc (struct target_ops *ops, enum target_object object,
2362 const char *annex)
2363 {
2364 gdb_byte *buffer;
2365 LONGEST i, transferred;
2366
2367 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2368
2369 if (transferred < 0)
2370 return NULL;
2371
2372 if (transferred == 0)
2373 return xstrdup ("");
2374
2375 buffer[transferred] = 0;
2376
2377 /* Check for embedded NUL bytes; but allow trailing NULs. */
2378 for (i = strlen (buffer); i < transferred; i++)
2379 if (buffer[i] != 0)
2380 {
2381 warning (_("target object %d, annex %s, "
2382 "contained unexpected null characters"),
2383 (int) object, annex ? annex : "(none)");
2384 break;
2385 }
2386
2387 return (char *) buffer;
2388 }
2389
2390 /* Memory transfer methods. */
2391
2392 void
2393 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2394 LONGEST len)
2395 {
2396 /* This method is used to read from an alternate, non-current
2397 target. This read must bypass the overlay support (as symbols
2398 don't match this target), and GDB's internal cache (wrong cache
2399 for this target). */
2400 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2401 != len)
2402 memory_error (EIO, addr);
2403 }
2404
2405 ULONGEST
2406 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2407 int len, enum bfd_endian byte_order)
2408 {
2409 gdb_byte buf[sizeof (ULONGEST)];
2410
2411 gdb_assert (len <= sizeof (buf));
2412 get_target_memory (ops, addr, buf, len);
2413 return extract_unsigned_integer (buf, len, byte_order);
2414 }
2415
2416 int
2417 target_insert_breakpoint (struct gdbarch *gdbarch,
2418 struct bp_target_info *bp_tgt)
2419 {
2420 if (!may_insert_breakpoints)
2421 {
2422 warning (_("May not insert breakpoints"));
2423 return 1;
2424 }
2425
2426 return (*current_target.to_insert_breakpoint) (gdbarch, bp_tgt);
2427 }
2428
2429 int
2430 target_remove_breakpoint (struct gdbarch *gdbarch,
2431 struct bp_target_info *bp_tgt)
2432 {
2433 /* This is kind of a weird case to handle, but the permission might
2434 have been changed after breakpoints were inserted - in which case
2435 we should just take the user literally and assume that any
2436 breakpoints should be left in place. */
2437 if (!may_insert_breakpoints)
2438 {
2439 warning (_("May not remove breakpoints"));
2440 return 1;
2441 }
2442
2443 return (*current_target.to_remove_breakpoint) (gdbarch, bp_tgt);
2444 }
2445
2446 static void
2447 target_info (char *args, int from_tty)
2448 {
2449 struct target_ops *t;
2450 int has_all_mem = 0;
2451
2452 if (symfile_objfile != NULL)
2453 printf_unfiltered (_("Symbols from \"%s\".\n"), symfile_objfile->name);
2454
2455 for (t = target_stack; t != NULL; t = t->beneath)
2456 {
2457 if (!(*t->to_has_memory) (t))
2458 continue;
2459
2460 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2461 continue;
2462 if (has_all_mem)
2463 printf_unfiltered (_("\tWhile running this, "
2464 "GDB does not access memory from...\n"));
2465 printf_unfiltered ("%s:\n", t->to_longname);
2466 (t->to_files_info) (t);
2467 has_all_mem = (*t->to_has_all_memory) (t);
2468 }
2469 }
2470
2471 /* This function is called before any new inferior is created, e.g.
2472 by running a program, attaching, or connecting to a target.
2473 It cleans up any state from previous invocations which might
2474 change between runs. This is a subset of what target_preopen
2475 resets (things which might change between targets). */
2476
2477 void
2478 target_pre_inferior (int from_tty)
2479 {
2480 /* Clear out solib state. Otherwise the solib state of the previous
2481 inferior might have survived and is entirely wrong for the new
2482 target. This has been observed on GNU/Linux using glibc 2.3. How
2483 to reproduce:
2484
2485 bash$ ./foo&
2486 [1] 4711
2487 bash$ ./foo&
2488 [1] 4712
2489 bash$ gdb ./foo
2490 [...]
2491 (gdb) attach 4711
2492 (gdb) detach
2493 (gdb) attach 4712
2494 Cannot access memory at address 0xdeadbeef
2495 */
2496
2497 /* In some OSs, the shared library list is the same/global/shared
2498 across inferiors. If code is shared between processes, so are
2499 memory regions and features. */
2500 if (!gdbarch_has_global_solist (target_gdbarch ()))
2501 {
2502 no_shared_libraries (NULL, from_tty);
2503
2504 invalidate_target_mem_regions ();
2505
2506 target_clear_description ();
2507 }
2508
2509 agent_capability_invalidate ();
2510 }
2511
2512 /* Callback for iterate_over_inferiors. Gets rid of the given
2513 inferior. */
2514
2515 static int
2516 dispose_inferior (struct inferior *inf, void *args)
2517 {
2518 struct thread_info *thread;
2519
2520 thread = any_thread_of_process (inf->pid);
2521 if (thread)
2522 {
2523 switch_to_thread (thread->ptid);
2524
2525 /* Core inferiors actually should be detached, not killed. */
2526 if (target_has_execution)
2527 target_kill ();
2528 else
2529 target_detach (NULL, 0);
2530 }
2531
2532 return 0;
2533 }
2534
2535 /* This is to be called by the open routine before it does
2536 anything. */
2537
2538 void
2539 target_preopen (int from_tty)
2540 {
2541 dont_repeat ();
2542
2543 if (have_inferiors ())
2544 {
2545 if (!from_tty
2546 || !have_live_inferiors ()
2547 || query (_("A program is being debugged already. Kill it? ")))
2548 iterate_over_inferiors (dispose_inferior, NULL);
2549 else
2550 error (_("Program not killed."));
2551 }
2552
2553 /* Calling target_kill may remove the target from the stack. But if
2554 it doesn't (which seems like a win for UDI), remove it now. */
2555 /* Leave the exec target, though. The user may be switching from a
2556 live process to a core of the same program. */
2557 pop_all_targets_above (file_stratum, 0);
2558
2559 target_pre_inferior (from_tty);
2560 }
2561
2562 /* Detach a target after doing deferred register stores. */
2563
2564 void
2565 target_detach (char *args, int from_tty)
2566 {
2567 struct target_ops* t;
2568
2569 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2570 /* Don't remove global breakpoints here. They're removed on
2571 disconnection from the target. */
2572 ;
2573 else
2574 /* If we're in breakpoints-always-inserted mode, have to remove
2575 them before detaching. */
2576 remove_breakpoints_pid (PIDGET (inferior_ptid));
2577
2578 prepare_for_detach ();
2579
2580 for (t = current_target.beneath; t != NULL; t = t->beneath)
2581 {
2582 if (t->to_detach != NULL)
2583 {
2584 t->to_detach (t, args, from_tty);
2585 if (targetdebug)
2586 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2587 args, from_tty);
2588 return;
2589 }
2590 }
2591
2592 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2593 }
2594
2595 void
2596 target_disconnect (char *args, int from_tty)
2597 {
2598 struct target_ops *t;
2599
2600 /* If we're in breakpoints-always-inserted mode or if breakpoints
2601 are global across processes, we have to remove them before
2602 disconnecting. */
2603 remove_breakpoints ();
2604
2605 for (t = current_target.beneath; t != NULL; t = t->beneath)
2606 if (t->to_disconnect != NULL)
2607 {
2608 if (targetdebug)
2609 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2610 args, from_tty);
2611 t->to_disconnect (t, args, from_tty);
2612 return;
2613 }
2614
2615 tcomplain ();
2616 }
2617
2618 ptid_t
2619 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2620 {
2621 struct target_ops *t;
2622
2623 for (t = current_target.beneath; t != NULL; t = t->beneath)
2624 {
2625 if (t->to_wait != NULL)
2626 {
2627 ptid_t retval = (*t->to_wait) (t, ptid, status, options);
2628
2629 if (targetdebug)
2630 {
2631 char *status_string;
2632 char *options_string;
2633
2634 status_string = target_waitstatus_to_string (status);
2635 options_string = target_options_to_string (options);
2636 fprintf_unfiltered (gdb_stdlog,
2637 "target_wait (%d, status, options={%s})"
2638 " = %d, %s\n",
2639 PIDGET (ptid), options_string,
2640 PIDGET (retval), status_string);
2641 xfree (status_string);
2642 xfree (options_string);
2643 }
2644
2645 return retval;
2646 }
2647 }
2648
2649 noprocess ();
2650 }
2651
2652 char *
2653 target_pid_to_str (ptid_t ptid)
2654 {
2655 struct target_ops *t;
2656
2657 for (t = current_target.beneath; t != NULL; t = t->beneath)
2658 {
2659 if (t->to_pid_to_str != NULL)
2660 return (*t->to_pid_to_str) (t, ptid);
2661 }
2662
2663 return normal_pid_to_str (ptid);
2664 }
2665
2666 char *
2667 target_thread_name (struct thread_info *info)
2668 {
2669 struct target_ops *t;
2670
2671 for (t = current_target.beneath; t != NULL; t = t->beneath)
2672 {
2673 if (t->to_thread_name != NULL)
2674 return (*t->to_thread_name) (info);
2675 }
2676
2677 return NULL;
2678 }
2679
2680 void
2681 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2682 {
2683 struct target_ops *t;
2684
2685 target_dcache_invalidate ();
2686
2687 for (t = current_target.beneath; t != NULL; t = t->beneath)
2688 {
2689 if (t->to_resume != NULL)
2690 {
2691 t->to_resume (t, ptid, step, signal);
2692 if (targetdebug)
2693 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2694 PIDGET (ptid),
2695 step ? "step" : "continue",
2696 gdb_signal_to_name (signal));
2697
2698 registers_changed_ptid (ptid);
2699 set_executing (ptid, 1);
2700 set_running (ptid, 1);
2701 clear_inline_frame_state (ptid);
2702 return;
2703 }
2704 }
2705
2706 noprocess ();
2707 }
2708
2709 void
2710 target_pass_signals (int numsigs, unsigned char *pass_signals)
2711 {
2712 struct target_ops *t;
2713
2714 for (t = current_target.beneath; t != NULL; t = t->beneath)
2715 {
2716 if (t->to_pass_signals != NULL)
2717 {
2718 if (targetdebug)
2719 {
2720 int i;
2721
2722 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2723 numsigs);
2724
2725 for (i = 0; i < numsigs; i++)
2726 if (pass_signals[i])
2727 fprintf_unfiltered (gdb_stdlog, " %s",
2728 gdb_signal_to_name (i));
2729
2730 fprintf_unfiltered (gdb_stdlog, " })\n");
2731 }
2732
2733 (*t->to_pass_signals) (numsigs, pass_signals);
2734 return;
2735 }
2736 }
2737 }
2738
2739 void
2740 target_program_signals (int numsigs, unsigned char *program_signals)
2741 {
2742 struct target_ops *t;
2743
2744 for (t = current_target.beneath; t != NULL; t = t->beneath)
2745 {
2746 if (t->to_program_signals != NULL)
2747 {
2748 if (targetdebug)
2749 {
2750 int i;
2751
2752 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2753 numsigs);
2754
2755 for (i = 0; i < numsigs; i++)
2756 if (program_signals[i])
2757 fprintf_unfiltered (gdb_stdlog, " %s",
2758 gdb_signal_to_name (i));
2759
2760 fprintf_unfiltered (gdb_stdlog, " })\n");
2761 }
2762
2763 (*t->to_program_signals) (numsigs, program_signals);
2764 return;
2765 }
2766 }
2767 }
2768
2769 /* Look through the list of possible targets for a target that can
2770 follow forks. */
2771
2772 int
2773 target_follow_fork (int follow_child)
2774 {
2775 struct target_ops *t;
2776
2777 for (t = current_target.beneath; t != NULL; t = t->beneath)
2778 {
2779 if (t->to_follow_fork != NULL)
2780 {
2781 int retval = t->to_follow_fork (t, follow_child);
2782
2783 if (targetdebug)
2784 fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
2785 follow_child, retval);
2786 return retval;
2787 }
2788 }
2789
2790 /* Some target returned a fork event, but did not know how to follow it. */
2791 internal_error (__FILE__, __LINE__,
2792 _("could not find a target to follow fork"));
2793 }
2794
2795 void
2796 target_mourn_inferior (void)
2797 {
2798 struct target_ops *t;
2799
2800 for (t = current_target.beneath; t != NULL; t = t->beneath)
2801 {
2802 if (t->to_mourn_inferior != NULL)
2803 {
2804 t->to_mourn_inferior (t);
2805 if (targetdebug)
2806 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2807
2808 /* We no longer need to keep handles on any of the object files.
2809 Make sure to release them to avoid unnecessarily locking any
2810 of them while we're not actually debugging. */
2811 bfd_cache_close_all ();
2812
2813 return;
2814 }
2815 }
2816
2817 internal_error (__FILE__, __LINE__,
2818 _("could not find a target to follow mourn inferior"));
2819 }
2820
2821 /* Look for a target which can describe architectural features, starting
2822 from TARGET. If we find one, return its description. */
2823
2824 const struct target_desc *
2825 target_read_description (struct target_ops *target)
2826 {
2827 struct target_ops *t;
2828
2829 for (t = target; t != NULL; t = t->beneath)
2830 if (t->to_read_description != NULL)
2831 {
2832 const struct target_desc *tdesc;
2833
2834 tdesc = t->to_read_description (t);
2835 if (tdesc)
2836 return tdesc;
2837 }
2838
2839 return NULL;
2840 }
2841
2842 /* The default implementation of to_search_memory.
2843 This implements a basic search of memory, reading target memory and
2844 performing the search here (as opposed to performing the search in on the
2845 target side with, for example, gdbserver). */
2846
2847 int
2848 simple_search_memory (struct target_ops *ops,
2849 CORE_ADDR start_addr, ULONGEST search_space_len,
2850 const gdb_byte *pattern, ULONGEST pattern_len,
2851 CORE_ADDR *found_addrp)
2852 {
2853 /* NOTE: also defined in find.c testcase. */
2854 #define SEARCH_CHUNK_SIZE 16000
2855 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2856 /* Buffer to hold memory contents for searching. */
2857 gdb_byte *search_buf;
2858 unsigned search_buf_size;
2859 struct cleanup *old_cleanups;
2860
2861 search_buf_size = chunk_size + pattern_len - 1;
2862
2863 /* No point in trying to allocate a buffer larger than the search space. */
2864 if (search_space_len < search_buf_size)
2865 search_buf_size = search_space_len;
2866
2867 search_buf = malloc (search_buf_size);
2868 if (search_buf == NULL)
2869 error (_("Unable to allocate memory to perform the search."));
2870 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2871
2872 /* Prime the search buffer. */
2873
2874 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2875 search_buf, start_addr, search_buf_size) != search_buf_size)
2876 {
2877 warning (_("Unable to access %s bytes of target "
2878 "memory at %s, halting search."),
2879 pulongest (search_buf_size), hex_string (start_addr));
2880 do_cleanups (old_cleanups);
2881 return -1;
2882 }
2883
2884 /* Perform the search.
2885
2886 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2887 When we've scanned N bytes we copy the trailing bytes to the start and
2888 read in another N bytes. */
2889
2890 while (search_space_len >= pattern_len)
2891 {
2892 gdb_byte *found_ptr;
2893 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2894
2895 found_ptr = memmem (search_buf, nr_search_bytes,
2896 pattern, pattern_len);
2897
2898 if (found_ptr != NULL)
2899 {
2900 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2901
2902 *found_addrp = found_addr;
2903 do_cleanups (old_cleanups);
2904 return 1;
2905 }
2906
2907 /* Not found in this chunk, skip to next chunk. */
2908
2909 /* Don't let search_space_len wrap here, it's unsigned. */
2910 if (search_space_len >= chunk_size)
2911 search_space_len -= chunk_size;
2912 else
2913 search_space_len = 0;
2914
2915 if (search_space_len >= pattern_len)
2916 {
2917 unsigned keep_len = search_buf_size - chunk_size;
2918 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2919 int nr_to_read;
2920
2921 /* Copy the trailing part of the previous iteration to the front
2922 of the buffer for the next iteration. */
2923 gdb_assert (keep_len == pattern_len - 1);
2924 memcpy (search_buf, search_buf + chunk_size, keep_len);
2925
2926 nr_to_read = min (search_space_len - keep_len, chunk_size);
2927
2928 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2929 search_buf + keep_len, read_addr,
2930 nr_to_read) != nr_to_read)
2931 {
2932 warning (_("Unable to access %s bytes of target "
2933 "memory at %s, halting search."),
2934 plongest (nr_to_read),
2935 hex_string (read_addr));
2936 do_cleanups (old_cleanups);
2937 return -1;
2938 }
2939
2940 start_addr += chunk_size;
2941 }
2942 }
2943
2944 /* Not found. */
2945
2946 do_cleanups (old_cleanups);
2947 return 0;
2948 }
2949
2950 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2951 sequence of bytes in PATTERN with length PATTERN_LEN.
2952
2953 The result is 1 if found, 0 if not found, and -1 if there was an error
2954 requiring halting of the search (e.g. memory read error).
2955 If the pattern is found the address is recorded in FOUND_ADDRP. */
2956
2957 int
2958 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2959 const gdb_byte *pattern, ULONGEST pattern_len,
2960 CORE_ADDR *found_addrp)
2961 {
2962 struct target_ops *t;
2963 int found;
2964
2965 /* We don't use INHERIT to set current_target.to_search_memory,
2966 so we have to scan the target stack and handle targetdebug
2967 ourselves. */
2968
2969 if (targetdebug)
2970 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2971 hex_string (start_addr));
2972
2973 for (t = current_target.beneath; t != NULL; t = t->beneath)
2974 if (t->to_search_memory != NULL)
2975 break;
2976
2977 if (t != NULL)
2978 {
2979 found = t->to_search_memory (t, start_addr, search_space_len,
2980 pattern, pattern_len, found_addrp);
2981 }
2982 else
2983 {
2984 /* If a special version of to_search_memory isn't available, use the
2985 simple version. */
2986 found = simple_search_memory (current_target.beneath,
2987 start_addr, search_space_len,
2988 pattern, pattern_len, found_addrp);
2989 }
2990
2991 if (targetdebug)
2992 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2993
2994 return found;
2995 }
2996
2997 /* Look through the currently pushed targets. If none of them will
2998 be able to restart the currently running process, issue an error
2999 message. */
3000
3001 void
3002 target_require_runnable (void)
3003 {
3004 struct target_ops *t;
3005
3006 for (t = target_stack; t != NULL; t = t->beneath)
3007 {
3008 /* If this target knows how to create a new program, then
3009 assume we will still be able to after killing the current
3010 one. Either killing and mourning will not pop T, or else
3011 find_default_run_target will find it again. */
3012 if (t->to_create_inferior != NULL)
3013 return;
3014
3015 /* Do not worry about thread_stratum targets that can not
3016 create inferiors. Assume they will be pushed again if
3017 necessary, and continue to the process_stratum. */
3018 if (t->to_stratum == thread_stratum
3019 || t->to_stratum == arch_stratum)
3020 continue;
3021
3022 error (_("The \"%s\" target does not support \"run\". "
3023 "Try \"help target\" or \"continue\"."),
3024 t->to_shortname);
3025 }
3026
3027 /* This function is only called if the target is running. In that
3028 case there should have been a process_stratum target and it
3029 should either know how to create inferiors, or not... */
3030 internal_error (__FILE__, __LINE__, _("No targets found"));
3031 }
3032
3033 /* Look through the list of possible targets for a target that can
3034 execute a run or attach command without any other data. This is
3035 used to locate the default process stratum.
3036
3037 If DO_MESG is not NULL, the result is always valid (error() is
3038 called for errors); else, return NULL on error. */
3039
3040 static struct target_ops *
3041 find_default_run_target (char *do_mesg)
3042 {
3043 struct target_ops **t;
3044 struct target_ops *runable = NULL;
3045 int count;
3046
3047 count = 0;
3048
3049 for (t = target_structs; t < target_structs + target_struct_size;
3050 ++t)
3051 {
3052 if ((*t)->to_can_run && target_can_run (*t))
3053 {
3054 runable = *t;
3055 ++count;
3056 }
3057 }
3058
3059 if (count != 1)
3060 {
3061 if (do_mesg)
3062 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3063 else
3064 return NULL;
3065 }
3066
3067 return runable;
3068 }
3069
3070 void
3071 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3072 {
3073 struct target_ops *t;
3074
3075 t = find_default_run_target ("attach");
3076 (t->to_attach) (t, args, from_tty);
3077 return;
3078 }
3079
3080 void
3081 find_default_create_inferior (struct target_ops *ops,
3082 char *exec_file, char *allargs, char **env,
3083 int from_tty)
3084 {
3085 struct target_ops *t;
3086
3087 t = find_default_run_target ("run");
3088 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3089 return;
3090 }
3091
3092 static int
3093 find_default_can_async_p (void)
3094 {
3095 struct target_ops *t;
3096
3097 /* This may be called before the target is pushed on the stack;
3098 look for the default process stratum. If there's none, gdb isn't
3099 configured with a native debugger, and target remote isn't
3100 connected yet. */
3101 t = find_default_run_target (NULL);
3102 if (t && t->to_can_async_p)
3103 return (t->to_can_async_p) ();
3104 return 0;
3105 }
3106
3107 static int
3108 find_default_is_async_p (void)
3109 {
3110 struct target_ops *t;
3111
3112 /* This may be called before the target is pushed on the stack;
3113 look for the default process stratum. If there's none, gdb isn't
3114 configured with a native debugger, and target remote isn't
3115 connected yet. */
3116 t = find_default_run_target (NULL);
3117 if (t && t->to_is_async_p)
3118 return (t->to_is_async_p) ();
3119 return 0;
3120 }
3121
3122 static int
3123 find_default_supports_non_stop (void)
3124 {
3125 struct target_ops *t;
3126
3127 t = find_default_run_target (NULL);
3128 if (t && t->to_supports_non_stop)
3129 return (t->to_supports_non_stop) ();
3130 return 0;
3131 }
3132
3133 int
3134 target_supports_non_stop (void)
3135 {
3136 struct target_ops *t;
3137
3138 for (t = &current_target; t != NULL; t = t->beneath)
3139 if (t->to_supports_non_stop)
3140 return t->to_supports_non_stop ();
3141
3142 return 0;
3143 }
3144
3145 /* Implement the "info proc" command. */
3146
3147 void
3148 target_info_proc (char *args, enum info_proc_what what)
3149 {
3150 struct target_ops *t;
3151
3152 /* If we're already connected to something that can get us OS
3153 related data, use it. Otherwise, try using the native
3154 target. */
3155 if (current_target.to_stratum >= process_stratum)
3156 t = current_target.beneath;
3157 else
3158 t = find_default_run_target (NULL);
3159
3160 for (; t != NULL; t = t->beneath)
3161 {
3162 if (t->to_info_proc != NULL)
3163 {
3164 t->to_info_proc (t, args, what);
3165
3166 if (targetdebug)
3167 fprintf_unfiltered (gdb_stdlog,
3168 "target_info_proc (\"%s\", %d)\n", args, what);
3169
3170 return;
3171 }
3172 }
3173
3174 error (_("Not supported on this target."));
3175 }
3176
3177 static int
3178 find_default_supports_disable_randomization (void)
3179 {
3180 struct target_ops *t;
3181
3182 t = find_default_run_target (NULL);
3183 if (t && t->to_supports_disable_randomization)
3184 return (t->to_supports_disable_randomization) ();
3185 return 0;
3186 }
3187
3188 int
3189 target_supports_disable_randomization (void)
3190 {
3191 struct target_ops *t;
3192
3193 for (t = &current_target; t != NULL; t = t->beneath)
3194 if (t->to_supports_disable_randomization)
3195 return t->to_supports_disable_randomization ();
3196
3197 return 0;
3198 }
3199
3200 char *
3201 target_get_osdata (const char *type)
3202 {
3203 struct target_ops *t;
3204
3205 /* If we're already connected to something that can get us OS
3206 related data, use it. Otherwise, try using the native
3207 target. */
3208 if (current_target.to_stratum >= process_stratum)
3209 t = current_target.beneath;
3210 else
3211 t = find_default_run_target ("get OS data");
3212
3213 if (!t)
3214 return NULL;
3215
3216 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3217 }
3218
3219 /* Determine the current address space of thread PTID. */
3220
3221 struct address_space *
3222 target_thread_address_space (ptid_t ptid)
3223 {
3224 struct address_space *aspace;
3225 struct inferior *inf;
3226 struct target_ops *t;
3227
3228 for (t = current_target.beneath; t != NULL; t = t->beneath)
3229 {
3230 if (t->to_thread_address_space != NULL)
3231 {
3232 aspace = t->to_thread_address_space (t, ptid);
3233 gdb_assert (aspace);
3234
3235 if (targetdebug)
3236 fprintf_unfiltered (gdb_stdlog,
3237 "target_thread_address_space (%s) = %d\n",
3238 target_pid_to_str (ptid),
3239 address_space_num (aspace));
3240 return aspace;
3241 }
3242 }
3243
3244 /* Fall-back to the "main" address space of the inferior. */
3245 inf = find_inferior_pid (ptid_get_pid (ptid));
3246
3247 if (inf == NULL || inf->aspace == NULL)
3248 internal_error (__FILE__, __LINE__,
3249 _("Can't determine the current "
3250 "address space of thread %s\n"),
3251 target_pid_to_str (ptid));
3252
3253 return inf->aspace;
3254 }
3255
3256
3257 /* Target file operations. */
3258
3259 static struct target_ops *
3260 default_fileio_target (void)
3261 {
3262 /* If we're already connected to something that can perform
3263 file I/O, use it. Otherwise, try using the native target. */
3264 if (current_target.to_stratum >= process_stratum)
3265 return current_target.beneath;
3266 else
3267 return find_default_run_target ("file I/O");
3268 }
3269
3270 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3271 target file descriptor, or -1 if an error occurs (and set
3272 *TARGET_ERRNO). */
3273 int
3274 target_fileio_open (const char *filename, int flags, int mode,
3275 int *target_errno)
3276 {
3277 struct target_ops *t;
3278
3279 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3280 {
3281 if (t->to_fileio_open != NULL)
3282 {
3283 int fd = t->to_fileio_open (filename, flags, mode, target_errno);
3284
3285 if (targetdebug)
3286 fprintf_unfiltered (gdb_stdlog,
3287 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3288 filename, flags, mode,
3289 fd, fd != -1 ? 0 : *target_errno);
3290 return fd;
3291 }
3292 }
3293
3294 *target_errno = FILEIO_ENOSYS;
3295 return -1;
3296 }
3297
3298 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3299 Return the number of bytes written, or -1 if an error occurs
3300 (and set *TARGET_ERRNO). */
3301 int
3302 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3303 ULONGEST offset, int *target_errno)
3304 {
3305 struct target_ops *t;
3306
3307 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3308 {
3309 if (t->to_fileio_pwrite != NULL)
3310 {
3311 int ret = t->to_fileio_pwrite (fd, write_buf, len, offset,
3312 target_errno);
3313
3314 if (targetdebug)
3315 fprintf_unfiltered (gdb_stdlog,
3316 "target_fileio_pwrite (%d,...,%d,%s) "
3317 "= %d (%d)\n",
3318 fd, len, pulongest (offset),
3319 ret, ret != -1 ? 0 : *target_errno);
3320 return ret;
3321 }
3322 }
3323
3324 *target_errno = FILEIO_ENOSYS;
3325 return -1;
3326 }
3327
3328 /* Read up to LEN bytes FD on the target into READ_BUF.
3329 Return the number of bytes read, or -1 if an error occurs
3330 (and set *TARGET_ERRNO). */
3331 int
3332 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3333 ULONGEST offset, int *target_errno)
3334 {
3335 struct target_ops *t;
3336
3337 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3338 {
3339 if (t->to_fileio_pread != NULL)
3340 {
3341 int ret = t->to_fileio_pread (fd, read_buf, len, offset,
3342 target_errno);
3343
3344 if (targetdebug)
3345 fprintf_unfiltered (gdb_stdlog,
3346 "target_fileio_pread (%d,...,%d,%s) "
3347 "= %d (%d)\n",
3348 fd, len, pulongest (offset),
3349 ret, ret != -1 ? 0 : *target_errno);
3350 return ret;
3351 }
3352 }
3353
3354 *target_errno = FILEIO_ENOSYS;
3355 return -1;
3356 }
3357
3358 /* Close FD on the target. Return 0, or -1 if an error occurs
3359 (and set *TARGET_ERRNO). */
3360 int
3361 target_fileio_close (int fd, int *target_errno)
3362 {
3363 struct target_ops *t;
3364
3365 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3366 {
3367 if (t->to_fileio_close != NULL)
3368 {
3369 int ret = t->to_fileio_close (fd, target_errno);
3370
3371 if (targetdebug)
3372 fprintf_unfiltered (gdb_stdlog,
3373 "target_fileio_close (%d) = %d (%d)\n",
3374 fd, ret, ret != -1 ? 0 : *target_errno);
3375 return ret;
3376 }
3377 }
3378
3379 *target_errno = FILEIO_ENOSYS;
3380 return -1;
3381 }
3382
3383 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3384 occurs (and set *TARGET_ERRNO). */
3385 int
3386 target_fileio_unlink (const char *filename, int *target_errno)
3387 {
3388 struct target_ops *t;
3389
3390 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3391 {
3392 if (t->to_fileio_unlink != NULL)
3393 {
3394 int ret = t->to_fileio_unlink (filename, target_errno);
3395
3396 if (targetdebug)
3397 fprintf_unfiltered (gdb_stdlog,
3398 "target_fileio_unlink (%s) = %d (%d)\n",
3399 filename, ret, ret != -1 ? 0 : *target_errno);
3400 return ret;
3401 }
3402 }
3403
3404 *target_errno = FILEIO_ENOSYS;
3405 return -1;
3406 }
3407
3408 /* Read value of symbolic link FILENAME on the target. Return a
3409 null-terminated string allocated via xmalloc, or NULL if an error
3410 occurs (and set *TARGET_ERRNO). */
3411 char *
3412 target_fileio_readlink (const char *filename, int *target_errno)
3413 {
3414 struct target_ops *t;
3415
3416 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3417 {
3418 if (t->to_fileio_readlink != NULL)
3419 {
3420 char *ret = t->to_fileio_readlink (filename, target_errno);
3421
3422 if (targetdebug)
3423 fprintf_unfiltered (gdb_stdlog,
3424 "target_fileio_readlink (%s) = %s (%d)\n",
3425 filename, ret? ret : "(nil)",
3426 ret? 0 : *target_errno);
3427 return ret;
3428 }
3429 }
3430
3431 *target_errno = FILEIO_ENOSYS;
3432 return NULL;
3433 }
3434
3435 static void
3436 target_fileio_close_cleanup (void *opaque)
3437 {
3438 int fd = *(int *) opaque;
3439 int target_errno;
3440
3441 target_fileio_close (fd, &target_errno);
3442 }
3443
3444 /* Read target file FILENAME. Store the result in *BUF_P and
3445 return the size of the transferred data. PADDING additional bytes are
3446 available in *BUF_P. This is a helper function for
3447 target_fileio_read_alloc; see the declaration of that function for more
3448 information. */
3449
3450 static LONGEST
3451 target_fileio_read_alloc_1 (const char *filename,
3452 gdb_byte **buf_p, int padding)
3453 {
3454 struct cleanup *close_cleanup;
3455 size_t buf_alloc, buf_pos;
3456 gdb_byte *buf;
3457 LONGEST n;
3458 int fd;
3459 int target_errno;
3460
3461 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3462 if (fd == -1)
3463 return -1;
3464
3465 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3466
3467 /* Start by reading up to 4K at a time. The target will throttle
3468 this number down if necessary. */
3469 buf_alloc = 4096;
3470 buf = xmalloc (buf_alloc);
3471 buf_pos = 0;
3472 while (1)
3473 {
3474 n = target_fileio_pread (fd, &buf[buf_pos],
3475 buf_alloc - buf_pos - padding, buf_pos,
3476 &target_errno);
3477 if (n < 0)
3478 {
3479 /* An error occurred. */
3480 do_cleanups (close_cleanup);
3481 xfree (buf);
3482 return -1;
3483 }
3484 else if (n == 0)
3485 {
3486 /* Read all there was. */
3487 do_cleanups (close_cleanup);
3488 if (buf_pos == 0)
3489 xfree (buf);
3490 else
3491 *buf_p = buf;
3492 return buf_pos;
3493 }
3494
3495 buf_pos += n;
3496
3497 /* If the buffer is filling up, expand it. */
3498 if (buf_alloc < buf_pos * 2)
3499 {
3500 buf_alloc *= 2;
3501 buf = xrealloc (buf, buf_alloc);
3502 }
3503
3504 QUIT;
3505 }
3506 }
3507
3508 /* Read target file FILENAME. Store the result in *BUF_P and return
3509 the size of the transferred data. See the declaration in "target.h"
3510 function for more information about the return value. */
3511
3512 LONGEST
3513 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3514 {
3515 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3516 }
3517
3518 /* Read target file FILENAME. The result is NUL-terminated and
3519 returned as a string, allocated using xmalloc. If an error occurs
3520 or the transfer is unsupported, NULL is returned. Empty objects
3521 are returned as allocated but empty strings. A warning is issued
3522 if the result contains any embedded NUL bytes. */
3523
3524 char *
3525 target_fileio_read_stralloc (const char *filename)
3526 {
3527 gdb_byte *buffer;
3528 LONGEST i, transferred;
3529
3530 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3531
3532 if (transferred < 0)
3533 return NULL;
3534
3535 if (transferred == 0)
3536 return xstrdup ("");
3537
3538 buffer[transferred] = 0;
3539
3540 /* Check for embedded NUL bytes; but allow trailing NULs. */
3541 for (i = strlen (buffer); i < transferred; i++)
3542 if (buffer[i] != 0)
3543 {
3544 warning (_("target file %s "
3545 "contained unexpected null characters"),
3546 filename);
3547 break;
3548 }
3549
3550 return (char *) buffer;
3551 }
3552
3553
3554 static int
3555 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3556 {
3557 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3558 }
3559
3560 static int
3561 default_watchpoint_addr_within_range (struct target_ops *target,
3562 CORE_ADDR addr,
3563 CORE_ADDR start, int length)
3564 {
3565 return addr >= start && addr < start + length;
3566 }
3567
3568 static struct gdbarch *
3569 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3570 {
3571 return target_gdbarch ();
3572 }
3573
3574 static int
3575 return_zero (void)
3576 {
3577 return 0;
3578 }
3579
3580 static int
3581 return_one (void)
3582 {
3583 return 1;
3584 }
3585
3586 static int
3587 return_minus_one (void)
3588 {
3589 return -1;
3590 }
3591
3592 /* Find a single runnable target in the stack and return it. If for
3593 some reason there is more than one, return NULL. */
3594
3595 struct target_ops *
3596 find_run_target (void)
3597 {
3598 struct target_ops **t;
3599 struct target_ops *runable = NULL;
3600 int count;
3601
3602 count = 0;
3603
3604 for (t = target_structs; t < target_structs + target_struct_size; ++t)
3605 {
3606 if ((*t)->to_can_run && target_can_run (*t))
3607 {
3608 runable = *t;
3609 ++count;
3610 }
3611 }
3612
3613 return (count == 1 ? runable : NULL);
3614 }
3615
3616 /*
3617 * Find the next target down the stack from the specified target.
3618 */
3619
3620 struct target_ops *
3621 find_target_beneath (struct target_ops *t)
3622 {
3623 return t->beneath;
3624 }
3625
3626 \f
3627 /* The inferior process has died. Long live the inferior! */
3628
3629 void
3630 generic_mourn_inferior (void)
3631 {
3632 ptid_t ptid;
3633
3634 ptid = inferior_ptid;
3635 inferior_ptid = null_ptid;
3636
3637 /* Mark breakpoints uninserted in case something tries to delete a
3638 breakpoint while we delete the inferior's threads (which would
3639 fail, since the inferior is long gone). */
3640 mark_breakpoints_out ();
3641
3642 if (!ptid_equal (ptid, null_ptid))
3643 {
3644 int pid = ptid_get_pid (ptid);
3645 exit_inferior (pid);
3646 }
3647
3648 /* Note this wipes step-resume breakpoints, so needs to be done
3649 after exit_inferior, which ends up referencing the step-resume
3650 breakpoints through clear_thread_inferior_resources. */
3651 breakpoint_init_inferior (inf_exited);
3652
3653 registers_changed ();
3654
3655 reopen_exec_file ();
3656 reinit_frame_cache ();
3657
3658 if (deprecated_detach_hook)
3659 deprecated_detach_hook ();
3660 }
3661 \f
3662 /* Convert a normal process ID to a string. Returns the string in a
3663 static buffer. */
3664
3665 char *
3666 normal_pid_to_str (ptid_t ptid)
3667 {
3668 static char buf[32];
3669
3670 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3671 return buf;
3672 }
3673
3674 static char *
3675 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3676 {
3677 return normal_pid_to_str (ptid);
3678 }
3679
3680 /* Error-catcher for target_find_memory_regions. */
3681 static int
3682 dummy_find_memory_regions (find_memory_region_ftype ignore1, void *ignore2)
3683 {
3684 error (_("Command not implemented for this target."));
3685 return 0;
3686 }
3687
3688 /* Error-catcher for target_make_corefile_notes. */
3689 static char *
3690 dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
3691 {
3692 error (_("Command not implemented for this target."));
3693 return NULL;
3694 }
3695
3696 /* Error-catcher for target_get_bookmark. */
3697 static gdb_byte *
3698 dummy_get_bookmark (char *ignore1, int ignore2)
3699 {
3700 tcomplain ();
3701 return NULL;
3702 }
3703
3704 /* Error-catcher for target_goto_bookmark. */
3705 static void
3706 dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
3707 {
3708 tcomplain ();
3709 }
3710
3711 /* Set up the handful of non-empty slots needed by the dummy target
3712 vector. */
3713
3714 static void
3715 init_dummy_target (void)
3716 {
3717 dummy_target.to_shortname = "None";
3718 dummy_target.to_longname = "None";
3719 dummy_target.to_doc = "";
3720 dummy_target.to_attach = find_default_attach;
3721 dummy_target.to_detach =
3722 (void (*)(struct target_ops *, char *, int))target_ignore;
3723 dummy_target.to_create_inferior = find_default_create_inferior;
3724 dummy_target.to_can_async_p = find_default_can_async_p;
3725 dummy_target.to_is_async_p = find_default_is_async_p;
3726 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3727 dummy_target.to_supports_disable_randomization
3728 = find_default_supports_disable_randomization;
3729 dummy_target.to_pid_to_str = dummy_pid_to_str;
3730 dummy_target.to_stratum = dummy_stratum;
3731 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3732 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3733 dummy_target.to_get_bookmark = dummy_get_bookmark;
3734 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3735 dummy_target.to_xfer_partial = default_xfer_partial;
3736 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3737 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3738 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3739 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3740 dummy_target.to_has_execution
3741 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3742 dummy_target.to_stopped_by_watchpoint = return_zero;
3743 dummy_target.to_stopped_data_address =
3744 (int (*) (struct target_ops *, CORE_ADDR *)) return_zero;
3745 dummy_target.to_magic = OPS_MAGIC;
3746 }
3747 \f
3748 static void
3749 debug_to_open (char *args, int from_tty)
3750 {
3751 debug_target.to_open (args, from_tty);
3752
3753 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3754 }
3755
3756 void
3757 target_close (struct target_ops *targ, int quitting)
3758 {
3759 if (targ->to_xclose != NULL)
3760 targ->to_xclose (targ, quitting);
3761 else if (targ->to_close != NULL)
3762 targ->to_close (quitting);
3763
3764 if (targetdebug)
3765 fprintf_unfiltered (gdb_stdlog, "target_close (%d)\n", quitting);
3766 }
3767
3768 void
3769 target_attach (char *args, int from_tty)
3770 {
3771 struct target_ops *t;
3772
3773 for (t = current_target.beneath; t != NULL; t = t->beneath)
3774 {
3775 if (t->to_attach != NULL)
3776 {
3777 t->to_attach (t, args, from_tty);
3778 if (targetdebug)
3779 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3780 args, from_tty);
3781 return;
3782 }
3783 }
3784
3785 internal_error (__FILE__, __LINE__,
3786 _("could not find a target to attach"));
3787 }
3788
3789 int
3790 target_thread_alive (ptid_t ptid)
3791 {
3792 struct target_ops *t;
3793
3794 for (t = current_target.beneath; t != NULL; t = t->beneath)
3795 {
3796 if (t->to_thread_alive != NULL)
3797 {
3798 int retval;
3799
3800 retval = t->to_thread_alive (t, ptid);
3801 if (targetdebug)
3802 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3803 PIDGET (ptid), retval);
3804
3805 return retval;
3806 }
3807 }
3808
3809 return 0;
3810 }
3811
3812 void
3813 target_find_new_threads (void)
3814 {
3815 struct target_ops *t;
3816
3817 for (t = current_target.beneath; t != NULL; t = t->beneath)
3818 {
3819 if (t->to_find_new_threads != NULL)
3820 {
3821 t->to_find_new_threads (t);
3822 if (targetdebug)
3823 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3824
3825 return;
3826 }
3827 }
3828 }
3829
3830 void
3831 target_stop (ptid_t ptid)
3832 {
3833 if (!may_stop)
3834 {
3835 warning (_("May not interrupt or stop the target, ignoring attempt"));
3836 return;
3837 }
3838
3839 (*current_target.to_stop) (ptid);
3840 }
3841
3842 static void
3843 debug_to_post_attach (int pid)
3844 {
3845 debug_target.to_post_attach (pid);
3846
3847 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3848 }
3849
3850 /* Return a pretty printed form of target_waitstatus.
3851 Space for the result is malloc'd, caller must free. */
3852
3853 char *
3854 target_waitstatus_to_string (const struct target_waitstatus *ws)
3855 {
3856 const char *kind_str = "status->kind = ";
3857
3858 switch (ws->kind)
3859 {
3860 case TARGET_WAITKIND_EXITED:
3861 return xstrprintf ("%sexited, status = %d",
3862 kind_str, ws->value.integer);
3863 case TARGET_WAITKIND_STOPPED:
3864 return xstrprintf ("%sstopped, signal = %s",
3865 kind_str, gdb_signal_to_name (ws->value.sig));
3866 case TARGET_WAITKIND_SIGNALLED:
3867 return xstrprintf ("%ssignalled, signal = %s",
3868 kind_str, gdb_signal_to_name (ws->value.sig));
3869 case TARGET_WAITKIND_LOADED:
3870 return xstrprintf ("%sloaded", kind_str);
3871 case TARGET_WAITKIND_FORKED:
3872 return xstrprintf ("%sforked", kind_str);
3873 case TARGET_WAITKIND_VFORKED:
3874 return xstrprintf ("%svforked", kind_str);
3875 case TARGET_WAITKIND_EXECD:
3876 return xstrprintf ("%sexecd", kind_str);
3877 case TARGET_WAITKIND_VFORK_DONE:
3878 return xstrprintf ("%svfork-done", kind_str);
3879 case TARGET_WAITKIND_SYSCALL_ENTRY:
3880 return xstrprintf ("%sentered syscall", kind_str);
3881 case TARGET_WAITKIND_SYSCALL_RETURN:
3882 return xstrprintf ("%sexited syscall", kind_str);
3883 case TARGET_WAITKIND_SPURIOUS:
3884 return xstrprintf ("%sspurious", kind_str);
3885 case TARGET_WAITKIND_IGNORE:
3886 return xstrprintf ("%signore", kind_str);
3887 case TARGET_WAITKIND_NO_HISTORY:
3888 return xstrprintf ("%sno-history", kind_str);
3889 case TARGET_WAITKIND_NO_RESUMED:
3890 return xstrprintf ("%sno-resumed", kind_str);
3891 default:
3892 return xstrprintf ("%sunknown???", kind_str);
3893 }
3894 }
3895
3896 /* Concatenate ELEM to LIST, a comma separate list, and return the
3897 result. The LIST incoming argument is released. */
3898
3899 static char *
3900 str_comma_list_concat_elem (char *list, const char *elem)
3901 {
3902 if (list == NULL)
3903 return xstrdup (elem);
3904 else
3905 return reconcat (list, list, ", ", elem, (char *) NULL);
3906 }
3907
3908 /* Helper for target_options_to_string. If OPT is present in
3909 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3910 Returns the new resulting string. OPT is removed from
3911 TARGET_OPTIONS. */
3912
3913 static char *
3914 do_option (int *target_options, char *ret,
3915 int opt, char *opt_str)
3916 {
3917 if ((*target_options & opt) != 0)
3918 {
3919 ret = str_comma_list_concat_elem (ret, opt_str);
3920 *target_options &= ~opt;
3921 }
3922
3923 return ret;
3924 }
3925
3926 char *
3927 target_options_to_string (int target_options)
3928 {
3929 char *ret = NULL;
3930
3931 #define DO_TARG_OPTION(OPT) \
3932 ret = do_option (&target_options, ret, OPT, #OPT)
3933
3934 DO_TARG_OPTION (TARGET_WNOHANG);
3935
3936 if (target_options != 0)
3937 ret = str_comma_list_concat_elem (ret, "unknown???");
3938
3939 if (ret == NULL)
3940 ret = xstrdup ("");
3941 return ret;
3942 }
3943
3944 static void
3945 debug_print_register (const char * func,
3946 struct regcache *regcache, int regno)
3947 {
3948 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3949
3950 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3951 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3952 && gdbarch_register_name (gdbarch, regno) != NULL
3953 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3954 fprintf_unfiltered (gdb_stdlog, "(%s)",
3955 gdbarch_register_name (gdbarch, regno));
3956 else
3957 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3958 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3959 {
3960 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3961 int i, size = register_size (gdbarch, regno);
3962 unsigned char buf[MAX_REGISTER_SIZE];
3963
3964 regcache_raw_collect (regcache, regno, buf);
3965 fprintf_unfiltered (gdb_stdlog, " = ");
3966 for (i = 0; i < size; i++)
3967 {
3968 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3969 }
3970 if (size <= sizeof (LONGEST))
3971 {
3972 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3973
3974 fprintf_unfiltered (gdb_stdlog, " %s %s",
3975 core_addr_to_string_nz (val), plongest (val));
3976 }
3977 }
3978 fprintf_unfiltered (gdb_stdlog, "\n");
3979 }
3980
3981 void
3982 target_fetch_registers (struct regcache *regcache, int regno)
3983 {
3984 struct target_ops *t;
3985
3986 for (t = current_target.beneath; t != NULL; t = t->beneath)
3987 {
3988 if (t->to_fetch_registers != NULL)
3989 {
3990 t->to_fetch_registers (t, regcache, regno);
3991 if (targetdebug)
3992 debug_print_register ("target_fetch_registers", regcache, regno);
3993 return;
3994 }
3995 }
3996 }
3997
3998 void
3999 target_store_registers (struct regcache *regcache, int regno)
4000 {
4001 struct target_ops *t;
4002
4003 if (!may_write_registers)
4004 error (_("Writing to registers is not allowed (regno %d)"), regno);
4005
4006 for (t = current_target.beneath; t != NULL; t = t->beneath)
4007 {
4008 if (t->to_store_registers != NULL)
4009 {
4010 t->to_store_registers (t, regcache, regno);
4011 if (targetdebug)
4012 {
4013 debug_print_register ("target_store_registers", regcache, regno);
4014 }
4015 return;
4016 }
4017 }
4018
4019 noprocess ();
4020 }
4021
4022 int
4023 target_core_of_thread (ptid_t ptid)
4024 {
4025 struct target_ops *t;
4026
4027 for (t = current_target.beneath; t != NULL; t = t->beneath)
4028 {
4029 if (t->to_core_of_thread != NULL)
4030 {
4031 int retval = t->to_core_of_thread (t, ptid);
4032
4033 if (targetdebug)
4034 fprintf_unfiltered (gdb_stdlog,
4035 "target_core_of_thread (%d) = %d\n",
4036 PIDGET (ptid), retval);
4037 return retval;
4038 }
4039 }
4040
4041 return -1;
4042 }
4043
4044 int
4045 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4046 {
4047 struct target_ops *t;
4048
4049 for (t = current_target.beneath; t != NULL; t = t->beneath)
4050 {
4051 if (t->to_verify_memory != NULL)
4052 {
4053 int retval = t->to_verify_memory (t, data, memaddr, size);
4054
4055 if (targetdebug)
4056 fprintf_unfiltered (gdb_stdlog,
4057 "target_verify_memory (%s, %s) = %d\n",
4058 paddress (target_gdbarch (), memaddr),
4059 pulongest (size),
4060 retval);
4061 return retval;
4062 }
4063 }
4064
4065 tcomplain ();
4066 }
4067
4068 /* The documentation for this function is in its prototype declaration in
4069 target.h. */
4070
4071 int
4072 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4073 {
4074 struct target_ops *t;
4075
4076 for (t = current_target.beneath; t != NULL; t = t->beneath)
4077 if (t->to_insert_mask_watchpoint != NULL)
4078 {
4079 int ret;
4080
4081 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4082
4083 if (targetdebug)
4084 fprintf_unfiltered (gdb_stdlog, "\
4085 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4086 core_addr_to_string (addr),
4087 core_addr_to_string (mask), rw, ret);
4088
4089 return ret;
4090 }
4091
4092 return 1;
4093 }
4094
4095 /* The documentation for this function is in its prototype declaration in
4096 target.h. */
4097
4098 int
4099 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4100 {
4101 struct target_ops *t;
4102
4103 for (t = current_target.beneath; t != NULL; t = t->beneath)
4104 if (t->to_remove_mask_watchpoint != NULL)
4105 {
4106 int ret;
4107
4108 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4109
4110 if (targetdebug)
4111 fprintf_unfiltered (gdb_stdlog, "\
4112 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4113 core_addr_to_string (addr),
4114 core_addr_to_string (mask), rw, ret);
4115
4116 return ret;
4117 }
4118
4119 return 1;
4120 }
4121
4122 /* The documentation for this function is in its prototype declaration
4123 in target.h. */
4124
4125 int
4126 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4127 {
4128 struct target_ops *t;
4129
4130 for (t = current_target.beneath; t != NULL; t = t->beneath)
4131 if (t->to_masked_watch_num_registers != NULL)
4132 return t->to_masked_watch_num_registers (t, addr, mask);
4133
4134 return -1;
4135 }
4136
4137 /* The documentation for this function is in its prototype declaration
4138 in target.h. */
4139
4140 int
4141 target_ranged_break_num_registers (void)
4142 {
4143 struct target_ops *t;
4144
4145 for (t = current_target.beneath; t != NULL; t = t->beneath)
4146 if (t->to_ranged_break_num_registers != NULL)
4147 return t->to_ranged_break_num_registers (t);
4148
4149 return -1;
4150 }
4151
4152 static void
4153 debug_to_prepare_to_store (struct regcache *regcache)
4154 {
4155 debug_target.to_prepare_to_store (regcache);
4156
4157 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4158 }
4159
4160 static int
4161 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4162 int write, struct mem_attrib *attrib,
4163 struct target_ops *target)
4164 {
4165 int retval;
4166
4167 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4168 attrib, target);
4169
4170 fprintf_unfiltered (gdb_stdlog,
4171 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4172 paddress (target_gdbarch (), memaddr), len,
4173 write ? "write" : "read", retval);
4174
4175 if (retval > 0)
4176 {
4177 int i;
4178
4179 fputs_unfiltered (", bytes =", gdb_stdlog);
4180 for (i = 0; i < retval; i++)
4181 {
4182 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4183 {
4184 if (targetdebug < 2 && i > 0)
4185 {
4186 fprintf_unfiltered (gdb_stdlog, " ...");
4187 break;
4188 }
4189 fprintf_unfiltered (gdb_stdlog, "\n");
4190 }
4191
4192 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4193 }
4194 }
4195
4196 fputc_unfiltered ('\n', gdb_stdlog);
4197
4198 return retval;
4199 }
4200
4201 static void
4202 debug_to_files_info (struct target_ops *target)
4203 {
4204 debug_target.to_files_info (target);
4205
4206 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4207 }
4208
4209 static int
4210 debug_to_insert_breakpoint (struct gdbarch *gdbarch,
4211 struct bp_target_info *bp_tgt)
4212 {
4213 int retval;
4214
4215 retval = debug_target.to_insert_breakpoint (gdbarch, bp_tgt);
4216
4217 fprintf_unfiltered (gdb_stdlog,
4218 "target_insert_breakpoint (%s, xxx) = %ld\n",
4219 core_addr_to_string (bp_tgt->placed_address),
4220 (unsigned long) retval);
4221 return retval;
4222 }
4223
4224 static int
4225 debug_to_remove_breakpoint (struct gdbarch *gdbarch,
4226 struct bp_target_info *bp_tgt)
4227 {
4228 int retval;
4229
4230 retval = debug_target.to_remove_breakpoint (gdbarch, bp_tgt);
4231
4232 fprintf_unfiltered (gdb_stdlog,
4233 "target_remove_breakpoint (%s, xxx) = %ld\n",
4234 core_addr_to_string (bp_tgt->placed_address),
4235 (unsigned long) retval);
4236 return retval;
4237 }
4238
4239 static int
4240 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
4241 {
4242 int retval;
4243
4244 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
4245
4246 fprintf_unfiltered (gdb_stdlog,
4247 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4248 (unsigned long) type,
4249 (unsigned long) cnt,
4250 (unsigned long) from_tty,
4251 (unsigned long) retval);
4252 return retval;
4253 }
4254
4255 static int
4256 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
4257 {
4258 CORE_ADDR retval;
4259
4260 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
4261
4262 fprintf_unfiltered (gdb_stdlog,
4263 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4264 core_addr_to_string (addr), (unsigned long) len,
4265 core_addr_to_string (retval));
4266 return retval;
4267 }
4268
4269 static int
4270 debug_to_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int rw,
4271 struct expression *cond)
4272 {
4273 int retval;
4274
4275 retval = debug_target.to_can_accel_watchpoint_condition (addr, len,
4276 rw, cond);
4277
4278 fprintf_unfiltered (gdb_stdlog,
4279 "target_can_accel_watchpoint_condition "
4280 "(%s, %d, %d, %s) = %ld\n",
4281 core_addr_to_string (addr), len, rw,
4282 host_address_to_string (cond), (unsigned long) retval);
4283 return retval;
4284 }
4285
4286 static int
4287 debug_to_stopped_by_watchpoint (void)
4288 {
4289 int retval;
4290
4291 retval = debug_target.to_stopped_by_watchpoint ();
4292
4293 fprintf_unfiltered (gdb_stdlog,
4294 "target_stopped_by_watchpoint () = %ld\n",
4295 (unsigned long) retval);
4296 return retval;
4297 }
4298
4299 static int
4300 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4301 {
4302 int retval;
4303
4304 retval = debug_target.to_stopped_data_address (target, addr);
4305
4306 fprintf_unfiltered (gdb_stdlog,
4307 "target_stopped_data_address ([%s]) = %ld\n",
4308 core_addr_to_string (*addr),
4309 (unsigned long)retval);
4310 return retval;
4311 }
4312
4313 static int
4314 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4315 CORE_ADDR addr,
4316 CORE_ADDR start, int length)
4317 {
4318 int retval;
4319
4320 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4321 start, length);
4322
4323 fprintf_filtered (gdb_stdlog,
4324 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4325 core_addr_to_string (addr), core_addr_to_string (start),
4326 length, retval);
4327 return retval;
4328 }
4329
4330 static int
4331 debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
4332 struct bp_target_info *bp_tgt)
4333 {
4334 int retval;
4335
4336 retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
4337
4338 fprintf_unfiltered (gdb_stdlog,
4339 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4340 core_addr_to_string (bp_tgt->placed_address),
4341 (unsigned long) retval);
4342 return retval;
4343 }
4344
4345 static int
4346 debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
4347 struct bp_target_info *bp_tgt)
4348 {
4349 int retval;
4350
4351 retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
4352
4353 fprintf_unfiltered (gdb_stdlog,
4354 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4355 core_addr_to_string (bp_tgt->placed_address),
4356 (unsigned long) retval);
4357 return retval;
4358 }
4359
4360 static int
4361 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type,
4362 struct expression *cond)
4363 {
4364 int retval;
4365
4366 retval = debug_target.to_insert_watchpoint (addr, len, type, cond);
4367
4368 fprintf_unfiltered (gdb_stdlog,
4369 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4370 core_addr_to_string (addr), len, type,
4371 host_address_to_string (cond), (unsigned long) retval);
4372 return retval;
4373 }
4374
4375 static int
4376 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type,
4377 struct expression *cond)
4378 {
4379 int retval;
4380
4381 retval = debug_target.to_remove_watchpoint (addr, len, type, cond);
4382
4383 fprintf_unfiltered (gdb_stdlog,
4384 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4385 core_addr_to_string (addr), len, type,
4386 host_address_to_string (cond), (unsigned long) retval);
4387 return retval;
4388 }
4389
4390 static void
4391 debug_to_terminal_init (void)
4392 {
4393 debug_target.to_terminal_init ();
4394
4395 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4396 }
4397
4398 static void
4399 debug_to_terminal_inferior (void)
4400 {
4401 debug_target.to_terminal_inferior ();
4402
4403 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4404 }
4405
4406 static void
4407 debug_to_terminal_ours_for_output (void)
4408 {
4409 debug_target.to_terminal_ours_for_output ();
4410
4411 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4412 }
4413
4414 static void
4415 debug_to_terminal_ours (void)
4416 {
4417 debug_target.to_terminal_ours ();
4418
4419 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4420 }
4421
4422 static void
4423 debug_to_terminal_save_ours (void)
4424 {
4425 debug_target.to_terminal_save_ours ();
4426
4427 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4428 }
4429
4430 static void
4431 debug_to_terminal_info (char *arg, int from_tty)
4432 {
4433 debug_target.to_terminal_info (arg, from_tty);
4434
4435 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4436 from_tty);
4437 }
4438
4439 static void
4440 debug_to_load (char *args, int from_tty)
4441 {
4442 debug_target.to_load (args, from_tty);
4443
4444 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4445 }
4446
4447 static void
4448 debug_to_post_startup_inferior (ptid_t ptid)
4449 {
4450 debug_target.to_post_startup_inferior (ptid);
4451
4452 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4453 PIDGET (ptid));
4454 }
4455
4456 static int
4457 debug_to_insert_fork_catchpoint (int pid)
4458 {
4459 int retval;
4460
4461 retval = debug_target.to_insert_fork_catchpoint (pid);
4462
4463 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4464 pid, retval);
4465
4466 return retval;
4467 }
4468
4469 static int
4470 debug_to_remove_fork_catchpoint (int pid)
4471 {
4472 int retval;
4473
4474 retval = debug_target.to_remove_fork_catchpoint (pid);
4475
4476 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4477 pid, retval);
4478
4479 return retval;
4480 }
4481
4482 static int
4483 debug_to_insert_vfork_catchpoint (int pid)
4484 {
4485 int retval;
4486
4487 retval = debug_target.to_insert_vfork_catchpoint (pid);
4488
4489 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4490 pid, retval);
4491
4492 return retval;
4493 }
4494
4495 static int
4496 debug_to_remove_vfork_catchpoint (int pid)
4497 {
4498 int retval;
4499
4500 retval = debug_target.to_remove_vfork_catchpoint (pid);
4501
4502 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4503 pid, retval);
4504
4505 return retval;
4506 }
4507
4508 static int
4509 debug_to_insert_exec_catchpoint (int pid)
4510 {
4511 int retval;
4512
4513 retval = debug_target.to_insert_exec_catchpoint (pid);
4514
4515 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4516 pid, retval);
4517
4518 return retval;
4519 }
4520
4521 static int
4522 debug_to_remove_exec_catchpoint (int pid)
4523 {
4524 int retval;
4525
4526 retval = debug_target.to_remove_exec_catchpoint (pid);
4527
4528 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4529 pid, retval);
4530
4531 return retval;
4532 }
4533
4534 static int
4535 debug_to_has_exited (int pid, int wait_status, int *exit_status)
4536 {
4537 int has_exited;
4538
4539 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
4540
4541 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4542 pid, wait_status, *exit_status, has_exited);
4543
4544 return has_exited;
4545 }
4546
4547 static int
4548 debug_to_can_run (void)
4549 {
4550 int retval;
4551
4552 retval = debug_target.to_can_run ();
4553
4554 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4555
4556 return retval;
4557 }
4558
4559 static struct gdbarch *
4560 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4561 {
4562 struct gdbarch *retval;
4563
4564 retval = debug_target.to_thread_architecture (ops, ptid);
4565
4566 fprintf_unfiltered (gdb_stdlog,
4567 "target_thread_architecture (%s) = %s [%s]\n",
4568 target_pid_to_str (ptid),
4569 host_address_to_string (retval),
4570 gdbarch_bfd_arch_info (retval)->printable_name);
4571 return retval;
4572 }
4573
4574 static void
4575 debug_to_stop (ptid_t ptid)
4576 {
4577 debug_target.to_stop (ptid);
4578
4579 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4580 target_pid_to_str (ptid));
4581 }
4582
4583 static void
4584 debug_to_rcmd (char *command,
4585 struct ui_file *outbuf)
4586 {
4587 debug_target.to_rcmd (command, outbuf);
4588 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4589 }
4590
4591 static char *
4592 debug_to_pid_to_exec_file (int pid)
4593 {
4594 char *exec_file;
4595
4596 exec_file = debug_target.to_pid_to_exec_file (pid);
4597
4598 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4599 pid, exec_file);
4600
4601 return exec_file;
4602 }
4603
4604 static void
4605 setup_target_debug (void)
4606 {
4607 memcpy (&debug_target, &current_target, sizeof debug_target);
4608
4609 current_target.to_open = debug_to_open;
4610 current_target.to_post_attach = debug_to_post_attach;
4611 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4612 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4613 current_target.to_files_info = debug_to_files_info;
4614 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4615 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4616 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4617 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4618 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4619 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4620 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4621 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4622 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4623 current_target.to_watchpoint_addr_within_range
4624 = debug_to_watchpoint_addr_within_range;
4625 current_target.to_region_ok_for_hw_watchpoint
4626 = debug_to_region_ok_for_hw_watchpoint;
4627 current_target.to_can_accel_watchpoint_condition
4628 = debug_to_can_accel_watchpoint_condition;
4629 current_target.to_terminal_init = debug_to_terminal_init;
4630 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4631 current_target.to_terminal_ours_for_output
4632 = debug_to_terminal_ours_for_output;
4633 current_target.to_terminal_ours = debug_to_terminal_ours;
4634 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4635 current_target.to_terminal_info = debug_to_terminal_info;
4636 current_target.to_load = debug_to_load;
4637 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4638 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4639 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4640 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4641 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4642 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4643 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4644 current_target.to_has_exited = debug_to_has_exited;
4645 current_target.to_can_run = debug_to_can_run;
4646 current_target.to_stop = debug_to_stop;
4647 current_target.to_rcmd = debug_to_rcmd;
4648 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4649 current_target.to_thread_architecture = debug_to_thread_architecture;
4650 }
4651 \f
4652
4653 static char targ_desc[] =
4654 "Names of targets and files being debugged.\nShows the entire \
4655 stack of targets currently in use (including the exec-file,\n\
4656 core-file, and process, if any), as well as the symbol file name.";
4657
4658 static void
4659 do_monitor_command (char *cmd,
4660 int from_tty)
4661 {
4662 if ((current_target.to_rcmd
4663 == (void (*) (char *, struct ui_file *)) tcomplain)
4664 || (current_target.to_rcmd == debug_to_rcmd
4665 && (debug_target.to_rcmd
4666 == (void (*) (char *, struct ui_file *)) tcomplain)))
4667 error (_("\"monitor\" command not supported by this target."));
4668 target_rcmd (cmd, gdb_stdtarg);
4669 }
4670
4671 /* Print the name of each layers of our target stack. */
4672
4673 static void
4674 maintenance_print_target_stack (char *cmd, int from_tty)
4675 {
4676 struct target_ops *t;
4677
4678 printf_filtered (_("The current target stack is:\n"));
4679
4680 for (t = target_stack; t != NULL; t = t->beneath)
4681 {
4682 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4683 }
4684 }
4685
4686 /* Controls if async mode is permitted. */
4687 int target_async_permitted = 0;
4688
4689 /* The set command writes to this variable. If the inferior is
4690 executing, linux_nat_async_permitted is *not* updated. */
4691 static int target_async_permitted_1 = 0;
4692
4693 static void
4694 set_target_async_command (char *args, int from_tty,
4695 struct cmd_list_element *c)
4696 {
4697 if (have_live_inferiors ())
4698 {
4699 target_async_permitted_1 = target_async_permitted;
4700 error (_("Cannot change this setting while the inferior is running."));
4701 }
4702
4703 target_async_permitted = target_async_permitted_1;
4704 }
4705
4706 static void
4707 show_target_async_command (struct ui_file *file, int from_tty,
4708 struct cmd_list_element *c,
4709 const char *value)
4710 {
4711 fprintf_filtered (file,
4712 _("Controlling the inferior in "
4713 "asynchronous mode is %s.\n"), value);
4714 }
4715
4716 /* Temporary copies of permission settings. */
4717
4718 static int may_write_registers_1 = 1;
4719 static int may_write_memory_1 = 1;
4720 static int may_insert_breakpoints_1 = 1;
4721 static int may_insert_tracepoints_1 = 1;
4722 static int may_insert_fast_tracepoints_1 = 1;
4723 static int may_stop_1 = 1;
4724
4725 /* Make the user-set values match the real values again. */
4726
4727 void
4728 update_target_permissions (void)
4729 {
4730 may_write_registers_1 = may_write_registers;
4731 may_write_memory_1 = may_write_memory;
4732 may_insert_breakpoints_1 = may_insert_breakpoints;
4733 may_insert_tracepoints_1 = may_insert_tracepoints;
4734 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4735 may_stop_1 = may_stop;
4736 }
4737
4738 /* The one function handles (most of) the permission flags in the same
4739 way. */
4740
4741 static void
4742 set_target_permissions (char *args, int from_tty,
4743 struct cmd_list_element *c)
4744 {
4745 if (target_has_execution)
4746 {
4747 update_target_permissions ();
4748 error (_("Cannot change this setting while the inferior is running."));
4749 }
4750
4751 /* Make the real values match the user-changed values. */
4752 may_write_registers = may_write_registers_1;
4753 may_insert_breakpoints = may_insert_breakpoints_1;
4754 may_insert_tracepoints = may_insert_tracepoints_1;
4755 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4756 may_stop = may_stop_1;
4757 update_observer_mode ();
4758 }
4759
4760 /* Set memory write permission independently of observer mode. */
4761
4762 static void
4763 set_write_memory_permission (char *args, int from_tty,
4764 struct cmd_list_element *c)
4765 {
4766 /* Make the real values match the user-changed values. */
4767 may_write_memory = may_write_memory_1;
4768 update_observer_mode ();
4769 }
4770
4771
4772 void
4773 initialize_targets (void)
4774 {
4775 init_dummy_target ();
4776 push_target (&dummy_target);
4777
4778 add_info ("target", target_info, targ_desc);
4779 add_info ("files", target_info, targ_desc);
4780
4781 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4782 Set target debugging."), _("\
4783 Show target debugging."), _("\
4784 When non-zero, target debugging is enabled. Higher numbers are more\n\
4785 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4786 command."),
4787 NULL,
4788 show_targetdebug,
4789 &setdebuglist, &showdebuglist);
4790
4791 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4792 &trust_readonly, _("\
4793 Set mode for reading from readonly sections."), _("\
4794 Show mode for reading from readonly sections."), _("\
4795 When this mode is on, memory reads from readonly sections (such as .text)\n\
4796 will be read from the object file instead of from the target. This will\n\
4797 result in significant performance improvement for remote targets."),
4798 NULL,
4799 show_trust_readonly,
4800 &setlist, &showlist);
4801
4802 add_com ("monitor", class_obscure, do_monitor_command,
4803 _("Send a command to the remote monitor (remote targets only)."));
4804
4805 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4806 _("Print the name of each layer of the internal target stack."),
4807 &maintenanceprintlist);
4808
4809 add_setshow_boolean_cmd ("target-async", no_class,
4810 &target_async_permitted_1, _("\
4811 Set whether gdb controls the inferior in asynchronous mode."), _("\
4812 Show whether gdb controls the inferior in asynchronous mode."), _("\
4813 Tells gdb whether to control the inferior in asynchronous mode."),
4814 set_target_async_command,
4815 show_target_async_command,
4816 &setlist,
4817 &showlist);
4818
4819 add_setshow_boolean_cmd ("stack-cache", class_support,
4820 &stack_cache_enabled_p_1, _("\
4821 Set cache use for stack access."), _("\
4822 Show cache use for stack access."), _("\
4823 When on, use the data cache for all stack access, regardless of any\n\
4824 configured memory regions. This improves remote performance significantly.\n\
4825 By default, caching for stack access is on."),
4826 set_stack_cache_enabled_p,
4827 show_stack_cache_enabled_p,
4828 &setlist, &showlist);
4829
4830 add_setshow_boolean_cmd ("may-write-registers", class_support,
4831 &may_write_registers_1, _("\
4832 Set permission to write into registers."), _("\
4833 Show permission to write into registers."), _("\
4834 When this permission is on, GDB may write into the target's registers.\n\
4835 Otherwise, any sort of write attempt will result in an error."),
4836 set_target_permissions, NULL,
4837 &setlist, &showlist);
4838
4839 add_setshow_boolean_cmd ("may-write-memory", class_support,
4840 &may_write_memory_1, _("\
4841 Set permission to write into target memory."), _("\
4842 Show permission to write into target memory."), _("\
4843 When this permission is on, GDB may write into the target's memory.\n\
4844 Otherwise, any sort of write attempt will result in an error."),
4845 set_write_memory_permission, NULL,
4846 &setlist, &showlist);
4847
4848 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4849 &may_insert_breakpoints_1, _("\
4850 Set permission to insert breakpoints in the target."), _("\
4851 Show permission to insert breakpoints in the target."), _("\
4852 When this permission is on, GDB may insert breakpoints in the program.\n\
4853 Otherwise, any sort of insertion attempt will result in an error."),
4854 set_target_permissions, NULL,
4855 &setlist, &showlist);
4856
4857 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4858 &may_insert_tracepoints_1, _("\
4859 Set permission to insert tracepoints in the target."), _("\
4860 Show permission to insert tracepoints in the target."), _("\
4861 When this permission is on, GDB may insert tracepoints in the program.\n\
4862 Otherwise, any sort of insertion attempt will result in an error."),
4863 set_target_permissions, NULL,
4864 &setlist, &showlist);
4865
4866 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4867 &may_insert_fast_tracepoints_1, _("\
4868 Set permission to insert fast tracepoints in the target."), _("\
4869 Show permission to insert fast tracepoints in the target."), _("\
4870 When this permission is on, GDB may insert fast tracepoints.\n\
4871 Otherwise, any sort of insertion attempt will result in an error."),
4872 set_target_permissions, NULL,
4873 &setlist, &showlist);
4874
4875 add_setshow_boolean_cmd ("may-interrupt", class_support,
4876 &may_stop_1, _("\
4877 Set permission to interrupt or signal the target."), _("\
4878 Show permission to interrupt or signal the target."), _("\
4879 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4880 Otherwise, any attempt to interrupt or stop will be ignored."),
4881 set_target_permissions, NULL,
4882 &setlist, &showlist);
4883
4884
4885 target_dcache = dcache_init ();
4886 }