1 /* Select target systems and architectures at runtime for GDB.
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
5 Contributed by Cygnus Support.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
26 #include "target-dcache.h"
36 #include "gdb_assert.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
48 static void target_info (char *, int);
50 static void default_terminal_info (struct target_ops
*, const char *, int);
52 static int default_watchpoint_addr_within_range (struct target_ops
*,
53 CORE_ADDR
, CORE_ADDR
, int);
55 static int default_region_ok_for_hw_watchpoint (struct target_ops
*,
58 static void default_rcmd (struct target_ops
*, char *, struct ui_file
*);
60 static void tcomplain (void) ATTRIBUTE_NORETURN
;
62 static int nomemory (CORE_ADDR
, char *, int, int, struct target_ops
*);
64 static int return_zero (void);
66 static int return_minus_one (void);
68 static void *return_null (void);
70 void target_ignore (void);
72 static void target_command (char *, int);
74 static struct target_ops
*find_default_run_target (char *);
76 static target_xfer_partial_ftype default_xfer_partial
;
78 static struct gdbarch
*default_thread_architecture (struct target_ops
*ops
,
81 static int dummy_find_memory_regions (struct target_ops
*self
,
82 find_memory_region_ftype ignore1
,
85 static int find_default_can_async_p (struct target_ops
*ignore
);
87 static int find_default_is_async_p (struct target_ops
*ignore
);
89 #include "target-delegates.c"
91 static void init_dummy_target (void);
93 static struct target_ops debug_target
;
95 static void debug_to_open (char *, int);
97 static void debug_to_prepare_to_store (struct target_ops
*self
,
100 static void debug_to_files_info (struct target_ops
*);
102 static int debug_to_insert_breakpoint (struct target_ops
*, struct gdbarch
*,
103 struct bp_target_info
*);
105 static int debug_to_remove_breakpoint (struct target_ops
*, struct gdbarch
*,
106 struct bp_target_info
*);
108 static int debug_to_can_use_hw_breakpoint (struct target_ops
*self
,
111 static int debug_to_insert_hw_breakpoint (struct target_ops
*self
,
113 struct bp_target_info
*);
115 static int debug_to_remove_hw_breakpoint (struct target_ops
*self
,
117 struct bp_target_info
*);
119 static int debug_to_insert_watchpoint (struct target_ops
*self
,
121 struct expression
*);
123 static int debug_to_remove_watchpoint (struct target_ops
*self
,
125 struct expression
*);
127 static int debug_to_stopped_data_address (struct target_ops
*, CORE_ADDR
*);
129 static int debug_to_watchpoint_addr_within_range (struct target_ops
*,
130 CORE_ADDR
, CORE_ADDR
, int);
132 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops
*self
,
135 static int debug_to_can_accel_watchpoint_condition (struct target_ops
*self
,
137 struct expression
*);
139 static void debug_to_terminal_init (struct target_ops
*self
);
141 static void debug_to_terminal_inferior (struct target_ops
*self
);
143 static void debug_to_terminal_ours_for_output (struct target_ops
*self
);
145 static void debug_to_terminal_save_ours (struct target_ops
*self
);
147 static void debug_to_terminal_ours (struct target_ops
*self
);
149 static void debug_to_load (struct target_ops
*self
, char *, int);
151 static int debug_to_can_run (struct target_ops
*self
);
153 static void debug_to_stop (struct target_ops
*self
, ptid_t
);
155 /* Pointer to array of target architecture structures; the size of the
156 array; the current index into the array; the allocated size of the
158 struct target_ops
**target_structs
;
159 unsigned target_struct_size
;
160 unsigned target_struct_allocsize
;
161 #define DEFAULT_ALLOCSIZE 10
163 /* The initial current target, so that there is always a semi-valid
166 static struct target_ops dummy_target
;
168 /* Top of target stack. */
170 static struct target_ops
*target_stack
;
172 /* The target structure we are currently using to talk to a process
173 or file or whatever "inferior" we have. */
175 struct target_ops current_target
;
177 /* Command list for target. */
179 static struct cmd_list_element
*targetlist
= NULL
;
181 /* Nonzero if we should trust readonly sections from the
182 executable when reading memory. */
184 static int trust_readonly
= 0;
186 /* Nonzero if we should show true memory content including
187 memory breakpoint inserted by gdb. */
189 static int show_memory_breakpoints
= 0;
191 /* These globals control whether GDB attempts to perform these
192 operations; they are useful for targets that need to prevent
193 inadvertant disruption, such as in non-stop mode. */
195 int may_write_registers
= 1;
197 int may_write_memory
= 1;
199 int may_insert_breakpoints
= 1;
201 int may_insert_tracepoints
= 1;
203 int may_insert_fast_tracepoints
= 1;
207 /* Non-zero if we want to see trace of target level stuff. */
209 static unsigned int targetdebug
= 0;
211 show_targetdebug (struct ui_file
*file
, int from_tty
,
212 struct cmd_list_element
*c
, const char *value
)
214 fprintf_filtered (file
, _("Target debugging is %s.\n"), value
);
217 static void setup_target_debug (void);
219 /* The user just typed 'target' without the name of a target. */
222 target_command (char *arg
, int from_tty
)
224 fputs_filtered ("Argument required (target name). Try `help target'\n",
228 /* Default target_has_* methods for process_stratum targets. */
231 default_child_has_all_memory (struct target_ops
*ops
)
233 /* If no inferior selected, then we can't read memory here. */
234 if (ptid_equal (inferior_ptid
, null_ptid
))
241 default_child_has_memory (struct target_ops
*ops
)
243 /* If no inferior selected, then we can't read memory here. */
244 if (ptid_equal (inferior_ptid
, null_ptid
))
251 default_child_has_stack (struct target_ops
*ops
)
253 /* If no inferior selected, there's no stack. */
254 if (ptid_equal (inferior_ptid
, null_ptid
))
261 default_child_has_registers (struct target_ops
*ops
)
263 /* Can't read registers from no inferior. */
264 if (ptid_equal (inferior_ptid
, null_ptid
))
271 default_child_has_execution (struct target_ops
*ops
, ptid_t the_ptid
)
273 /* If there's no thread selected, then we can't make it run through
275 if (ptid_equal (the_ptid
, null_ptid
))
283 target_has_all_memory_1 (void)
285 struct target_ops
*t
;
287 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
288 if (t
->to_has_all_memory (t
))
295 target_has_memory_1 (void)
297 struct target_ops
*t
;
299 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
300 if (t
->to_has_memory (t
))
307 target_has_stack_1 (void)
309 struct target_ops
*t
;
311 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
312 if (t
->to_has_stack (t
))
319 target_has_registers_1 (void)
321 struct target_ops
*t
;
323 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
324 if (t
->to_has_registers (t
))
331 target_has_execution_1 (ptid_t the_ptid
)
333 struct target_ops
*t
;
335 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
336 if (t
->to_has_execution (t
, the_ptid
))
343 target_has_execution_current (void)
345 return target_has_execution_1 (inferior_ptid
);
348 /* Complete initialization of T. This ensures that various fields in
349 T are set, if needed by the target implementation. */
352 complete_target_initialization (struct target_ops
*t
)
354 /* Provide default values for all "must have" methods. */
355 if (t
->to_xfer_partial
== NULL
)
356 t
->to_xfer_partial
= default_xfer_partial
;
358 if (t
->to_has_all_memory
== NULL
)
359 t
->to_has_all_memory
= (int (*) (struct target_ops
*)) return_zero
;
361 if (t
->to_has_memory
== NULL
)
362 t
->to_has_memory
= (int (*) (struct target_ops
*)) return_zero
;
364 if (t
->to_has_stack
== NULL
)
365 t
->to_has_stack
= (int (*) (struct target_ops
*)) return_zero
;
367 if (t
->to_has_registers
== NULL
)
368 t
->to_has_registers
= (int (*) (struct target_ops
*)) return_zero
;
370 if (t
->to_has_execution
== NULL
)
371 t
->to_has_execution
= (int (*) (struct target_ops
*, ptid_t
)) return_zero
;
373 install_delegators (t
);
376 /* Add possible target architecture T to the list and add a new
377 command 'target T->to_shortname'. Set COMPLETER as the command's
378 completer if not NULL. */
381 add_target_with_completer (struct target_ops
*t
,
382 completer_ftype
*completer
)
384 struct cmd_list_element
*c
;
386 complete_target_initialization (t
);
390 target_struct_allocsize
= DEFAULT_ALLOCSIZE
;
391 target_structs
= (struct target_ops
**) xmalloc
392 (target_struct_allocsize
* sizeof (*target_structs
));
394 if (target_struct_size
>= target_struct_allocsize
)
396 target_struct_allocsize
*= 2;
397 target_structs
= (struct target_ops
**)
398 xrealloc ((char *) target_structs
,
399 target_struct_allocsize
* sizeof (*target_structs
));
401 target_structs
[target_struct_size
++] = t
;
403 if (targetlist
== NULL
)
404 add_prefix_cmd ("target", class_run
, target_command
, _("\
405 Connect to a target machine or process.\n\
406 The first argument is the type or protocol of the target machine.\n\
407 Remaining arguments are interpreted by the target protocol. For more\n\
408 information on the arguments for a particular protocol, type\n\
409 `help target ' followed by the protocol name."),
410 &targetlist
, "target ", 0, &cmdlist
);
411 c
= add_cmd (t
->to_shortname
, no_class
, t
->to_open
, t
->to_doc
,
413 if (completer
!= NULL
)
414 set_cmd_completer (c
, completer
);
417 /* Add a possible target architecture to the list. */
420 add_target (struct target_ops
*t
)
422 add_target_with_completer (t
, NULL
);
428 add_deprecated_target_alias (struct target_ops
*t
, char *alias
)
430 struct cmd_list_element
*c
;
433 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
435 c
= add_cmd (alias
, no_class
, t
->to_open
, t
->to_doc
, &targetlist
);
436 alt
= xstrprintf ("target %s", t
->to_shortname
);
437 deprecate_cmd (c
, alt
);
450 struct target_ops
*t
;
452 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
453 if (t
->to_kill
!= NULL
)
456 fprintf_unfiltered (gdb_stdlog
, "target_kill ()\n");
466 target_load (char *arg
, int from_tty
)
468 target_dcache_invalidate ();
469 (*current_target
.to_load
) (¤t_target
, arg
, from_tty
);
473 target_create_inferior (char *exec_file
, char *args
,
474 char **env
, int from_tty
)
476 struct target_ops
*t
;
478 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
480 if (t
->to_create_inferior
!= NULL
)
482 t
->to_create_inferior (t
, exec_file
, args
, env
, from_tty
);
484 fprintf_unfiltered (gdb_stdlog
,
485 "target_create_inferior (%s, %s, xxx, %d)\n",
486 exec_file
, args
, from_tty
);
491 internal_error (__FILE__
, __LINE__
,
492 _("could not find a target to create inferior"));
496 target_terminal_inferior (void)
498 /* A background resume (``run&'') should leave GDB in control of the
499 terminal. Use target_can_async_p, not target_is_async_p, since at
500 this point the target is not async yet. However, if sync_execution
501 is not set, we know it will become async prior to resume. */
502 if (target_can_async_p () && !sync_execution
)
505 /* If GDB is resuming the inferior in the foreground, install
506 inferior's terminal modes. */
507 (*current_target
.to_terminal_inferior
) (¤t_target
);
511 nomemory (CORE_ADDR memaddr
, char *myaddr
, int len
, int write
,
512 struct target_ops
*t
)
514 errno
= EIO
; /* Can't read/write this location. */
515 return 0; /* No bytes handled. */
521 error (_("You can't do that when your target is `%s'"),
522 current_target
.to_shortname
);
528 error (_("You can't do that without a process to debug."));
532 default_terminal_info (struct target_ops
*self
, const char *args
, int from_tty
)
534 printf_unfiltered (_("No saved terminal information.\n"));
537 /* A default implementation for the to_get_ada_task_ptid target method.
539 This function builds the PTID by using both LWP and TID as part of
540 the PTID lwp and tid elements. The pid used is the pid of the
544 default_get_ada_task_ptid (struct target_ops
*self
, long lwp
, long tid
)
546 return ptid_build (ptid_get_pid (inferior_ptid
), lwp
, tid
);
549 static enum exec_direction_kind
550 default_execution_direction (struct target_ops
*self
)
552 if (!target_can_execute_reverse
)
554 else if (!target_can_async_p ())
557 gdb_assert_not_reached ("\
558 to_execution_direction must be implemented for reverse async");
561 /* Go through the target stack from top to bottom, copying over zero
562 entries in current_target, then filling in still empty entries. In
563 effect, we are doing class inheritance through the pushed target
566 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
567 is currently implemented, is that it discards any knowledge of
568 which target an inherited method originally belonged to.
569 Consequently, new new target methods should instead explicitly and
570 locally search the target stack for the target that can handle the
574 update_current_target (void)
576 struct target_ops
*t
;
578 /* First, reset current's contents. */
579 memset (¤t_target
, 0, sizeof (current_target
));
581 /* Install the delegators. */
582 install_delegators (¤t_target
);
584 #define INHERIT(FIELD, TARGET) \
585 if (!current_target.FIELD) \
586 current_target.FIELD = (TARGET)->FIELD
588 for (t
= target_stack
; t
; t
= t
->beneath
)
590 INHERIT (to_shortname
, t
);
591 INHERIT (to_longname
, t
);
593 /* Do not inherit to_open. */
594 /* Do not inherit to_close. */
595 /* Do not inherit to_attach. */
596 /* Do not inherit to_post_attach. */
597 INHERIT (to_attach_no_wait
, t
);
598 /* Do not inherit to_detach. */
599 /* Do not inherit to_disconnect. */
600 /* Do not inherit to_resume. */
601 /* Do not inherit to_wait. */
602 /* Do not inherit to_fetch_registers. */
603 /* Do not inherit to_store_registers. */
604 /* Do not inherit to_prepare_to_store. */
605 INHERIT (deprecated_xfer_memory
, t
);
606 /* Do not inherit to_files_info. */
607 /* Do not inherit to_insert_breakpoint. */
608 /* Do not inherit to_remove_breakpoint. */
609 /* Do not inherit to_can_use_hw_breakpoint. */
610 /* Do not inherit to_insert_hw_breakpoint. */
611 /* Do not inherit to_remove_hw_breakpoint. */
612 /* Do not inherit to_ranged_break_num_registers. */
613 /* Do not inherit to_insert_watchpoint. */
614 /* Do not inherit to_remove_watchpoint. */
615 /* Do not inherit to_insert_mask_watchpoint. */
616 /* Do not inherit to_remove_mask_watchpoint. */
617 /* Do not inherit to_stopped_data_address. */
618 INHERIT (to_have_steppable_watchpoint
, t
);
619 INHERIT (to_have_continuable_watchpoint
, t
);
620 /* Do not inherit to_stopped_by_watchpoint. */
621 /* Do not inherit to_watchpoint_addr_within_range. */
622 /* Do not inherit to_region_ok_for_hw_watchpoint. */
623 /* Do not inherit to_can_accel_watchpoint_condition. */
624 /* Do not inherit to_masked_watch_num_registers. */
625 /* Do not inherit to_terminal_init. */
626 /* Do not inherit to_terminal_inferior. */
627 /* Do not inherit to_terminal_ours_for_output. */
628 /* Do not inherit to_terminal_ours. */
629 /* Do not inherit to_terminal_save_ours. */
630 /* Do not inherit to_terminal_info. */
631 /* Do not inherit to_kill. */
632 /* Do not inherit to_load. */
633 /* Do no inherit to_create_inferior. */
634 /* Do not inherit to_post_startup_inferior. */
635 /* Do not inherit to_insert_fork_catchpoint. */
636 /* Do not inherit to_remove_fork_catchpoint. */
637 /* Do not inherit to_insert_vfork_catchpoint. */
638 /* Do not inherit to_remove_vfork_catchpoint. */
639 /* Do not inherit to_follow_fork. */
640 /* Do not inherit to_insert_exec_catchpoint. */
641 /* Do not inherit to_remove_exec_catchpoint. */
642 /* Do not inherit to_set_syscall_catchpoint. */
643 /* Do not inherit to_has_exited. */
644 /* Do not inherit to_mourn_inferior. */
645 INHERIT (to_can_run
, t
);
646 /* Do not inherit to_pass_signals. */
647 /* Do not inherit to_program_signals. */
648 /* Do not inherit to_thread_alive. */
649 /* Do not inherit to_find_new_threads. */
650 /* Do not inherit to_pid_to_str. */
651 /* Do not inherit to_extra_thread_info. */
652 /* Do not inherit to_thread_name. */
653 INHERIT (to_stop
, t
);
654 /* Do not inherit to_xfer_partial. */
655 /* Do not inherit to_rcmd. */
656 /* Do not inherit to_pid_to_exec_file. */
657 /* Do not inherit to_log_command. */
658 INHERIT (to_stratum
, t
);
659 /* Do not inherit to_has_all_memory. */
660 /* Do not inherit to_has_memory. */
661 /* Do not inherit to_has_stack. */
662 /* Do not inherit to_has_registers. */
663 /* Do not inherit to_has_execution. */
664 INHERIT (to_has_thread_control
, t
);
665 /* Do not inherit to_can_async_p. */
666 /* Do not inherit to_is_async_p. */
667 /* Do not inherit to_async. */
668 /* Do not inherit to_find_memory_regions. */
669 INHERIT (to_make_corefile_notes
, t
);
670 INHERIT (to_get_bookmark
, t
);
671 INHERIT (to_goto_bookmark
, t
);
672 /* Do not inherit to_get_thread_local_address. */
673 INHERIT (to_can_execute_reverse
, t
);
674 INHERIT (to_execution_direction
, t
);
675 INHERIT (to_thread_architecture
, t
);
676 /* Do not inherit to_read_description. */
677 INHERIT (to_get_ada_task_ptid
, t
);
678 /* Do not inherit to_search_memory. */
679 INHERIT (to_supports_multi_process
, t
);
680 INHERIT (to_supports_enable_disable_tracepoint
, t
);
681 INHERIT (to_supports_string_tracing
, t
);
682 INHERIT (to_trace_init
, t
);
683 INHERIT (to_download_tracepoint
, t
);
684 INHERIT (to_can_download_tracepoint
, t
);
685 INHERIT (to_download_trace_state_variable
, t
);
686 INHERIT (to_enable_tracepoint
, t
);
687 INHERIT (to_disable_tracepoint
, t
);
688 INHERIT (to_trace_set_readonly_regions
, t
);
689 INHERIT (to_trace_start
, t
);
690 INHERIT (to_get_trace_status
, t
);
691 INHERIT (to_get_tracepoint_status
, t
);
692 INHERIT (to_trace_stop
, t
);
693 INHERIT (to_trace_find
, t
);
694 INHERIT (to_get_trace_state_variable_value
, t
);
695 INHERIT (to_save_trace_data
, t
);
696 INHERIT (to_upload_tracepoints
, t
);
697 INHERIT (to_upload_trace_state_variables
, t
);
698 INHERIT (to_get_raw_trace_data
, t
);
699 INHERIT (to_get_min_fast_tracepoint_insn_len
, t
);
700 INHERIT (to_set_disconnected_tracing
, t
);
701 INHERIT (to_set_circular_trace_buffer
, t
);
702 INHERIT (to_set_trace_buffer_size
, t
);
703 INHERIT (to_set_trace_notes
, t
);
704 INHERIT (to_get_tib_address
, t
);
705 INHERIT (to_set_permissions
, t
);
706 INHERIT (to_static_tracepoint_marker_at
, t
);
707 INHERIT (to_static_tracepoint_markers_by_strid
, t
);
708 INHERIT (to_traceframe_info
, t
);
709 INHERIT (to_use_agent
, t
);
710 INHERIT (to_can_use_agent
, t
);
711 INHERIT (to_augmented_libraries_svr4_read
, t
);
712 INHERIT (to_magic
, t
);
713 INHERIT (to_supports_evaluation_of_breakpoint_conditions
, t
);
714 INHERIT (to_can_run_breakpoint_commands
, t
);
715 /* Do not inherit to_memory_map. */
716 /* Do not inherit to_flash_erase. */
717 /* Do not inherit to_flash_done. */
721 /* Clean up a target struct so it no longer has any zero pointers in
722 it. Some entries are defaulted to a method that print an error,
723 others are hard-wired to a standard recursive default. */
725 #define de_fault(field, value) \
726 if (!current_target.field) \
727 current_target.field = value
730 (void (*) (char *, int))
733 (void (*) (struct target_ops
*))
735 de_fault (deprecated_xfer_memory
,
736 (int (*) (CORE_ADDR
, gdb_byte
*, int, int,
737 struct mem_attrib
*, struct target_ops
*))
739 de_fault (to_can_run
,
740 (int (*) (struct target_ops
*))
743 (void (*) (struct target_ops
*, ptid_t
))
745 de_fault (to_thread_architecture
,
746 default_thread_architecture
);
747 current_target
.to_read_description
= NULL
;
748 de_fault (to_get_ada_task_ptid
,
749 (ptid_t (*) (struct target_ops
*, long, long))
750 default_get_ada_task_ptid
);
751 de_fault (to_supports_multi_process
,
752 (int (*) (struct target_ops
*))
754 de_fault (to_supports_enable_disable_tracepoint
,
755 (int (*) (struct target_ops
*))
757 de_fault (to_supports_string_tracing
,
758 (int (*) (struct target_ops
*))
760 de_fault (to_trace_init
,
761 (void (*) (struct target_ops
*))
763 de_fault (to_download_tracepoint
,
764 (void (*) (struct target_ops
*, struct bp_location
*))
766 de_fault (to_can_download_tracepoint
,
767 (int (*) (struct target_ops
*))
769 de_fault (to_download_trace_state_variable
,
770 (void (*) (struct target_ops
*, struct trace_state_variable
*))
772 de_fault (to_enable_tracepoint
,
773 (void (*) (struct target_ops
*, struct bp_location
*))
775 de_fault (to_disable_tracepoint
,
776 (void (*) (struct target_ops
*, struct bp_location
*))
778 de_fault (to_trace_set_readonly_regions
,
779 (void (*) (struct target_ops
*))
781 de_fault (to_trace_start
,
782 (void (*) (struct target_ops
*))
784 de_fault (to_get_trace_status
,
785 (int (*) (struct target_ops
*, struct trace_status
*))
787 de_fault (to_get_tracepoint_status
,
788 (void (*) (struct target_ops
*, struct breakpoint
*,
789 struct uploaded_tp
*))
791 de_fault (to_trace_stop
,
792 (void (*) (struct target_ops
*))
794 de_fault (to_trace_find
,
795 (int (*) (struct target_ops
*,
796 enum trace_find_type
, int, CORE_ADDR
, CORE_ADDR
, int *))
798 de_fault (to_get_trace_state_variable_value
,
799 (int (*) (struct target_ops
*, int, LONGEST
*))
801 de_fault (to_save_trace_data
,
802 (int (*) (struct target_ops
*, const char *))
804 de_fault (to_upload_tracepoints
,
805 (int (*) (struct target_ops
*, struct uploaded_tp
**))
807 de_fault (to_upload_trace_state_variables
,
808 (int (*) (struct target_ops
*, struct uploaded_tsv
**))
810 de_fault (to_get_raw_trace_data
,
811 (LONGEST (*) (struct target_ops
*, gdb_byte
*, ULONGEST
, LONGEST
))
813 de_fault (to_get_min_fast_tracepoint_insn_len
,
814 (int (*) (struct target_ops
*))
816 de_fault (to_set_disconnected_tracing
,
817 (void (*) (struct target_ops
*, int))
819 de_fault (to_set_circular_trace_buffer
,
820 (void (*) (struct target_ops
*, int))
822 de_fault (to_set_trace_buffer_size
,
823 (void (*) (struct target_ops
*, LONGEST
))
825 de_fault (to_set_trace_notes
,
826 (int (*) (struct target_ops
*,
827 const char *, const char *, const char *))
829 de_fault (to_get_tib_address
,
830 (int (*) (struct target_ops
*, ptid_t
, CORE_ADDR
*))
832 de_fault (to_set_permissions
,
833 (void (*) (struct target_ops
*))
835 de_fault (to_static_tracepoint_marker_at
,
836 (int (*) (struct target_ops
*,
837 CORE_ADDR
, struct static_tracepoint_marker
*))
839 de_fault (to_static_tracepoint_markers_by_strid
,
840 (VEC(static_tracepoint_marker_p
) * (*) (struct target_ops
*,
843 de_fault (to_traceframe_info
,
844 (struct traceframe_info
* (*) (struct target_ops
*))
846 de_fault (to_supports_evaluation_of_breakpoint_conditions
,
847 (int (*) (struct target_ops
*))
849 de_fault (to_can_run_breakpoint_commands
,
850 (int (*) (struct target_ops
*))
852 de_fault (to_use_agent
,
853 (int (*) (struct target_ops
*, int))
855 de_fault (to_can_use_agent
,
856 (int (*) (struct target_ops
*))
858 de_fault (to_augmented_libraries_svr4_read
,
859 (int (*) (struct target_ops
*))
861 de_fault (to_execution_direction
, default_execution_direction
);
865 /* Finally, position the target-stack beneath the squashed
866 "current_target". That way code looking for a non-inherited
867 target method can quickly and simply find it. */
868 current_target
.beneath
= target_stack
;
871 setup_target_debug ();
874 /* Push a new target type into the stack of the existing target accessors,
875 possibly superseding some of the existing accessors.
877 Rather than allow an empty stack, we always have the dummy target at
878 the bottom stratum, so we can call the function vectors without
882 push_target (struct target_ops
*t
)
884 struct target_ops
**cur
;
886 /* Check magic number. If wrong, it probably means someone changed
887 the struct definition, but not all the places that initialize one. */
888 if (t
->to_magic
!= OPS_MAGIC
)
890 fprintf_unfiltered (gdb_stderr
,
891 "Magic number of %s target struct wrong\n",
893 internal_error (__FILE__
, __LINE__
,
894 _("failed internal consistency check"));
897 /* Find the proper stratum to install this target in. */
898 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
900 if ((int) (t
->to_stratum
) >= (int) (*cur
)->to_stratum
)
904 /* If there's already targets at this stratum, remove them. */
905 /* FIXME: cagney/2003-10-15: I think this should be popping all
906 targets to CUR, and not just those at this stratum level. */
907 while ((*cur
) != NULL
&& t
->to_stratum
== (*cur
)->to_stratum
)
909 /* There's already something at this stratum level. Close it,
910 and un-hook it from the stack. */
911 struct target_ops
*tmp
= (*cur
);
913 (*cur
) = (*cur
)->beneath
;
918 /* We have removed all targets in our stratum, now add the new one. */
922 update_current_target ();
925 /* Remove a target_ops vector from the stack, wherever it may be.
926 Return how many times it was removed (0 or 1). */
929 unpush_target (struct target_ops
*t
)
931 struct target_ops
**cur
;
932 struct target_ops
*tmp
;
934 if (t
->to_stratum
== dummy_stratum
)
935 internal_error (__FILE__
, __LINE__
,
936 _("Attempt to unpush the dummy target"));
938 /* Look for the specified target. Note that we assume that a target
939 can only occur once in the target stack. */
941 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
947 /* If we don't find target_ops, quit. Only open targets should be
952 /* Unchain the target. */
954 (*cur
) = (*cur
)->beneath
;
957 update_current_target ();
959 /* Finally close the target. Note we do this after unchaining, so
960 any target method calls from within the target_close
961 implementation don't end up in T anymore. */
968 pop_all_targets_above (enum strata above_stratum
)
970 while ((int) (current_target
.to_stratum
) > (int) above_stratum
)
972 if (!unpush_target (target_stack
))
974 fprintf_unfiltered (gdb_stderr
,
975 "pop_all_targets couldn't find target %s\n",
976 target_stack
->to_shortname
);
977 internal_error (__FILE__
, __LINE__
,
978 _("failed internal consistency check"));
985 pop_all_targets (void)
987 pop_all_targets_above (dummy_stratum
);
990 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
993 target_is_pushed (struct target_ops
*t
)
995 struct target_ops
**cur
;
997 /* Check magic number. If wrong, it probably means someone changed
998 the struct definition, but not all the places that initialize one. */
999 if (t
->to_magic
!= OPS_MAGIC
)
1001 fprintf_unfiltered (gdb_stderr
,
1002 "Magic number of %s target struct wrong\n",
1004 internal_error (__FILE__
, __LINE__
,
1005 _("failed internal consistency check"));
1008 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
1015 /* Using the objfile specified in OBJFILE, find the address for the
1016 current thread's thread-local storage with offset OFFSET. */
1018 target_translate_tls_address (struct objfile
*objfile
, CORE_ADDR offset
)
1020 volatile CORE_ADDR addr
= 0;
1021 struct target_ops
*target
;
1023 for (target
= current_target
.beneath
;
1025 target
= target
->beneath
)
1027 if (target
->to_get_thread_local_address
!= NULL
)
1032 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1034 ptid_t ptid
= inferior_ptid
;
1035 volatile struct gdb_exception ex
;
1037 TRY_CATCH (ex
, RETURN_MASK_ALL
)
1041 /* Fetch the load module address for this objfile. */
1042 lm_addr
= gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1044 /* If it's 0, throw the appropriate exception. */
1046 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR
,
1047 _("TLS load module not found"));
1049 addr
= target
->to_get_thread_local_address (target
, ptid
,
1052 /* If an error occurred, print TLS related messages here. Otherwise,
1053 throw the error to some higher catcher. */
1056 int objfile_is_library
= (objfile
->flags
& OBJF_SHARED
);
1060 case TLS_NO_LIBRARY_SUPPORT_ERROR
:
1061 error (_("Cannot find thread-local variables "
1062 "in this thread library."));
1064 case TLS_LOAD_MODULE_NOT_FOUND_ERROR
:
1065 if (objfile_is_library
)
1066 error (_("Cannot find shared library `%s' in dynamic"
1067 " linker's load module list"), objfile_name (objfile
));
1069 error (_("Cannot find executable file `%s' in dynamic"
1070 " linker's load module list"), objfile_name (objfile
));
1072 case TLS_NOT_ALLOCATED_YET_ERROR
:
1073 if (objfile_is_library
)
1074 error (_("The inferior has not yet allocated storage for"
1075 " thread-local variables in\n"
1076 "the shared library `%s'\n"
1078 objfile_name (objfile
), target_pid_to_str (ptid
));
1080 error (_("The inferior has not yet allocated storage for"
1081 " thread-local variables in\n"
1082 "the executable `%s'\n"
1084 objfile_name (objfile
), target_pid_to_str (ptid
));
1086 case TLS_GENERIC_ERROR
:
1087 if (objfile_is_library
)
1088 error (_("Cannot find thread-local storage for %s, "
1089 "shared library %s:\n%s"),
1090 target_pid_to_str (ptid
),
1091 objfile_name (objfile
), ex
.message
);
1093 error (_("Cannot find thread-local storage for %s, "
1094 "executable file %s:\n%s"),
1095 target_pid_to_str (ptid
),
1096 objfile_name (objfile
), ex
.message
);
1099 throw_exception (ex
);
1104 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1105 TLS is an ABI-specific thing. But we don't do that yet. */
1107 error (_("Cannot find thread-local variables on this target"));
1113 target_xfer_status_to_string (enum target_xfer_status err
)
1115 #define CASE(X) case X: return #X
1118 CASE(TARGET_XFER_E_IO
);
1119 CASE(TARGET_XFER_E_UNAVAILABLE
);
1128 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1130 /* target_read_string -- read a null terminated string, up to LEN bytes,
1131 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1132 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1133 is responsible for freeing it. Return the number of bytes successfully
1137 target_read_string (CORE_ADDR memaddr
, char **string
, int len
, int *errnop
)
1139 int tlen
, offset
, i
;
1143 int buffer_allocated
;
1145 unsigned int nbytes_read
= 0;
1147 gdb_assert (string
);
1149 /* Small for testing. */
1150 buffer_allocated
= 4;
1151 buffer
= xmalloc (buffer_allocated
);
1156 tlen
= MIN (len
, 4 - (memaddr
& 3));
1157 offset
= memaddr
& 3;
1159 errcode
= target_read_memory (memaddr
& ~3, buf
, sizeof buf
);
1162 /* The transfer request might have crossed the boundary to an
1163 unallocated region of memory. Retry the transfer, requesting
1167 errcode
= target_read_memory (memaddr
, buf
, 1);
1172 if (bufptr
- buffer
+ tlen
> buffer_allocated
)
1176 bytes
= bufptr
- buffer
;
1177 buffer_allocated
*= 2;
1178 buffer
= xrealloc (buffer
, buffer_allocated
);
1179 bufptr
= buffer
+ bytes
;
1182 for (i
= 0; i
< tlen
; i
++)
1184 *bufptr
++ = buf
[i
+ offset
];
1185 if (buf
[i
+ offset
] == '\000')
1187 nbytes_read
+= i
+ 1;
1194 nbytes_read
+= tlen
;
1203 struct target_section_table
*
1204 target_get_section_table (struct target_ops
*target
)
1206 struct target_ops
*t
;
1209 fprintf_unfiltered (gdb_stdlog
, "target_get_section_table ()\n");
1211 for (t
= target
; t
!= NULL
; t
= t
->beneath
)
1212 if (t
->to_get_section_table
!= NULL
)
1213 return (*t
->to_get_section_table
) (t
);
1218 /* Find a section containing ADDR. */
1220 struct target_section
*
1221 target_section_by_addr (struct target_ops
*target
, CORE_ADDR addr
)
1223 struct target_section_table
*table
= target_get_section_table (target
);
1224 struct target_section
*secp
;
1229 for (secp
= table
->sections
; secp
< table
->sections_end
; secp
++)
1231 if (addr
>= secp
->addr
&& addr
< secp
->endaddr
)
1237 /* Read memory from the live target, even if currently inspecting a
1238 traceframe. The return is the same as that of target_read. */
1240 static enum target_xfer_status
1241 target_read_live_memory (enum target_object object
,
1242 ULONGEST memaddr
, gdb_byte
*myaddr
, ULONGEST len
,
1243 ULONGEST
*xfered_len
)
1245 enum target_xfer_status ret
;
1246 struct cleanup
*cleanup
;
1248 /* Switch momentarily out of tfind mode so to access live memory.
1249 Note that this must not clear global state, such as the frame
1250 cache, which must still remain valid for the previous traceframe.
1251 We may be _building_ the frame cache at this point. */
1252 cleanup
= make_cleanup_restore_traceframe_number ();
1253 set_traceframe_number (-1);
1255 ret
= target_xfer_partial (current_target
.beneath
, object
, NULL
,
1256 myaddr
, NULL
, memaddr
, len
, xfered_len
);
1258 do_cleanups (cleanup
);
1262 /* Using the set of read-only target sections of OPS, read live
1263 read-only memory. Note that the actual reads start from the
1264 top-most target again.
1266 For interface/parameters/return description see target.h,
1269 static enum target_xfer_status
1270 memory_xfer_live_readonly_partial (struct target_ops
*ops
,
1271 enum target_object object
,
1272 gdb_byte
*readbuf
, ULONGEST memaddr
,
1273 ULONGEST len
, ULONGEST
*xfered_len
)
1275 struct target_section
*secp
;
1276 struct target_section_table
*table
;
1278 secp
= target_section_by_addr (ops
, memaddr
);
1280 && (bfd_get_section_flags (secp
->the_bfd_section
->owner
,
1281 secp
->the_bfd_section
)
1284 struct target_section
*p
;
1285 ULONGEST memend
= memaddr
+ len
;
1287 table
= target_get_section_table (ops
);
1289 for (p
= table
->sections
; p
< table
->sections_end
; p
++)
1291 if (memaddr
>= p
->addr
)
1293 if (memend
<= p
->endaddr
)
1295 /* Entire transfer is within this section. */
1296 return target_read_live_memory (object
, memaddr
,
1297 readbuf
, len
, xfered_len
);
1299 else if (memaddr
>= p
->endaddr
)
1301 /* This section ends before the transfer starts. */
1306 /* This section overlaps the transfer. Just do half. */
1307 len
= p
->endaddr
- memaddr
;
1308 return target_read_live_memory (object
, memaddr
,
1309 readbuf
, len
, xfered_len
);
1315 return TARGET_XFER_EOF
;
1318 /* Read memory from more than one valid target. A core file, for
1319 instance, could have some of memory but delegate other bits to
1320 the target below it. So, we must manually try all targets. */
1322 static enum target_xfer_status
1323 raw_memory_xfer_partial (struct target_ops
*ops
, gdb_byte
*readbuf
,
1324 const gdb_byte
*writebuf
, ULONGEST memaddr
, LONGEST len
,
1325 ULONGEST
*xfered_len
)
1327 enum target_xfer_status res
;
1331 res
= ops
->to_xfer_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
1332 readbuf
, writebuf
, memaddr
, len
,
1334 if (res
== TARGET_XFER_OK
)
1337 /* Stop if the target reports that the memory is not available. */
1338 if (res
== TARGET_XFER_E_UNAVAILABLE
)
1341 /* We want to continue past core files to executables, but not
1342 past a running target's memory. */
1343 if (ops
->to_has_all_memory (ops
))
1348 while (ops
!= NULL
);
1353 /* Perform a partial memory transfer.
1354 For docs see target.h, to_xfer_partial. */
1356 static enum target_xfer_status
1357 memory_xfer_partial_1 (struct target_ops
*ops
, enum target_object object
,
1358 gdb_byte
*readbuf
, const gdb_byte
*writebuf
, ULONGEST memaddr
,
1359 ULONGEST len
, ULONGEST
*xfered_len
)
1361 enum target_xfer_status res
;
1363 struct mem_region
*region
;
1364 struct inferior
*inf
;
1366 /* For accesses to unmapped overlay sections, read directly from
1367 files. Must do this first, as MEMADDR may need adjustment. */
1368 if (readbuf
!= NULL
&& overlay_debugging
)
1370 struct obj_section
*section
= find_pc_overlay (memaddr
);
1372 if (pc_in_unmapped_range (memaddr
, section
))
1374 struct target_section_table
*table
1375 = target_get_section_table (ops
);
1376 const char *section_name
= section
->the_bfd_section
->name
;
1378 memaddr
= overlay_mapped_address (memaddr
, section
);
1379 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1380 memaddr
, len
, xfered_len
,
1382 table
->sections_end
,
1387 /* Try the executable files, if "trust-readonly-sections" is set. */
1388 if (readbuf
!= NULL
&& trust_readonly
)
1390 struct target_section
*secp
;
1391 struct target_section_table
*table
;
1393 secp
= target_section_by_addr (ops
, memaddr
);
1395 && (bfd_get_section_flags (secp
->the_bfd_section
->owner
,
1396 secp
->the_bfd_section
)
1399 table
= target_get_section_table (ops
);
1400 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1401 memaddr
, len
, xfered_len
,
1403 table
->sections_end
,
1408 /* If reading unavailable memory in the context of traceframes, and
1409 this address falls within a read-only section, fallback to
1410 reading from live memory. */
1411 if (readbuf
!= NULL
&& get_traceframe_number () != -1)
1413 VEC(mem_range_s
) *available
;
1415 /* If we fail to get the set of available memory, then the
1416 target does not support querying traceframe info, and so we
1417 attempt reading from the traceframe anyway (assuming the
1418 target implements the old QTro packet then). */
1419 if (traceframe_available_memory (&available
, memaddr
, len
))
1421 struct cleanup
*old_chain
;
1423 old_chain
= make_cleanup (VEC_cleanup(mem_range_s
), &available
);
1425 if (VEC_empty (mem_range_s
, available
)
1426 || VEC_index (mem_range_s
, available
, 0)->start
!= memaddr
)
1428 /* Don't read into the traceframe's available
1430 if (!VEC_empty (mem_range_s
, available
))
1432 LONGEST oldlen
= len
;
1434 len
= VEC_index (mem_range_s
, available
, 0)->start
- memaddr
;
1435 gdb_assert (len
<= oldlen
);
1438 do_cleanups (old_chain
);
1440 /* This goes through the topmost target again. */
1441 res
= memory_xfer_live_readonly_partial (ops
, object
,
1444 if (res
== TARGET_XFER_OK
)
1445 return TARGET_XFER_OK
;
1448 /* No use trying further, we know some memory starting
1449 at MEMADDR isn't available. */
1451 return TARGET_XFER_E_UNAVAILABLE
;
1455 /* Don't try to read more than how much is available, in
1456 case the target implements the deprecated QTro packet to
1457 cater for older GDBs (the target's knowledge of read-only
1458 sections may be outdated by now). */
1459 len
= VEC_index (mem_range_s
, available
, 0)->length
;
1461 do_cleanups (old_chain
);
1465 /* Try GDB's internal data cache. */
1466 region
= lookup_mem_region (memaddr
);
1467 /* region->hi == 0 means there's no upper bound. */
1468 if (memaddr
+ len
< region
->hi
|| region
->hi
== 0)
1471 reg_len
= region
->hi
- memaddr
;
1473 switch (region
->attrib
.mode
)
1476 if (writebuf
!= NULL
)
1477 return TARGET_XFER_E_IO
;
1481 if (readbuf
!= NULL
)
1482 return TARGET_XFER_E_IO
;
1486 /* We only support writing to flash during "load" for now. */
1487 if (writebuf
!= NULL
)
1488 error (_("Writing to flash memory forbidden in this context"));
1492 return TARGET_XFER_E_IO
;
1495 if (!ptid_equal (inferior_ptid
, null_ptid
))
1496 inf
= find_inferior_pid (ptid_get_pid (inferior_ptid
));
1501 /* The dcache reads whole cache lines; that doesn't play well
1502 with reading from a trace buffer, because reading outside of
1503 the collected memory range fails. */
1504 && get_traceframe_number () == -1
1505 && (region
->attrib
.cache
1506 || (stack_cache_enabled_p () && object
== TARGET_OBJECT_STACK_MEMORY
)
1507 || (code_cache_enabled_p () && object
== TARGET_OBJECT_CODE_MEMORY
)))
1509 DCACHE
*dcache
= target_dcache_get_or_init ();
1512 if (readbuf
!= NULL
)
1513 l
= dcache_xfer_memory (ops
, dcache
, memaddr
, readbuf
, reg_len
, 0);
1515 /* FIXME drow/2006-08-09: If we're going to preserve const
1516 correctness dcache_xfer_memory should take readbuf and
1518 l
= dcache_xfer_memory (ops
, dcache
, memaddr
, (void *) writebuf
,
1521 return TARGET_XFER_E_IO
;
1524 *xfered_len
= (ULONGEST
) l
;
1525 return TARGET_XFER_OK
;
1529 /* If none of those methods found the memory we wanted, fall back
1530 to a target partial transfer. Normally a single call to
1531 to_xfer_partial is enough; if it doesn't recognize an object
1532 it will call the to_xfer_partial of the next target down.
1533 But for memory this won't do. Memory is the only target
1534 object which can be read from more than one valid target.
1535 A core file, for instance, could have some of memory but
1536 delegate other bits to the target below it. So, we must
1537 manually try all targets. */
1539 res
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, memaddr
, reg_len
,
1542 /* Make sure the cache gets updated no matter what - if we are writing
1543 to the stack. Even if this write is not tagged as such, we still need
1544 to update the cache. */
1546 if (res
== TARGET_XFER_OK
1549 && target_dcache_init_p ()
1550 && !region
->attrib
.cache
1551 && ((stack_cache_enabled_p () && object
!= TARGET_OBJECT_STACK_MEMORY
)
1552 || (code_cache_enabled_p () && object
!= TARGET_OBJECT_CODE_MEMORY
)))
1554 DCACHE
*dcache
= target_dcache_get ();
1556 dcache_update (dcache
, memaddr
, (void *) writebuf
, reg_len
);
1559 /* If we still haven't got anything, return the last error. We
1564 /* Perform a partial memory transfer. For docs see target.h,
1567 static enum target_xfer_status
1568 memory_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1569 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1570 ULONGEST memaddr
, ULONGEST len
, ULONGEST
*xfered_len
)
1572 enum target_xfer_status res
;
1574 /* Zero length requests are ok and require no work. */
1576 return TARGET_XFER_EOF
;
1578 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1579 breakpoint insns, thus hiding out from higher layers whether
1580 there are software breakpoints inserted in the code stream. */
1581 if (readbuf
!= NULL
)
1583 res
= memory_xfer_partial_1 (ops
, object
, readbuf
, NULL
, memaddr
, len
,
1586 if (res
== TARGET_XFER_OK
&& !show_memory_breakpoints
)
1587 breakpoint_xfer_memory (readbuf
, NULL
, NULL
, memaddr
, res
);
1592 struct cleanup
*old_chain
;
1594 /* A large write request is likely to be partially satisfied
1595 by memory_xfer_partial_1. We will continually malloc
1596 and free a copy of the entire write request for breakpoint
1597 shadow handling even though we only end up writing a small
1598 subset of it. Cap writes to 4KB to mitigate this. */
1599 len
= min (4096, len
);
1601 buf
= xmalloc (len
);
1602 old_chain
= make_cleanup (xfree
, buf
);
1603 memcpy (buf
, writebuf
, len
);
1605 breakpoint_xfer_memory (NULL
, buf
, writebuf
, memaddr
, len
);
1606 res
= memory_xfer_partial_1 (ops
, object
, NULL
, buf
, memaddr
, len
,
1609 do_cleanups (old_chain
);
1616 restore_show_memory_breakpoints (void *arg
)
1618 show_memory_breakpoints
= (uintptr_t) arg
;
1622 make_show_memory_breakpoints_cleanup (int show
)
1624 int current
= show_memory_breakpoints
;
1626 show_memory_breakpoints
= show
;
1627 return make_cleanup (restore_show_memory_breakpoints
,
1628 (void *) (uintptr_t) current
);
1631 /* For docs see target.h, to_xfer_partial. */
1633 enum target_xfer_status
1634 target_xfer_partial (struct target_ops
*ops
,
1635 enum target_object object
, const char *annex
,
1636 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1637 ULONGEST offset
, ULONGEST len
,
1638 ULONGEST
*xfered_len
)
1640 enum target_xfer_status retval
;
1642 gdb_assert (ops
->to_xfer_partial
!= NULL
);
1644 /* Transfer is done when LEN is zero. */
1646 return TARGET_XFER_EOF
;
1648 if (writebuf
&& !may_write_memory
)
1649 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1650 core_addr_to_string_nz (offset
), plongest (len
));
1654 /* If this is a memory transfer, let the memory-specific code
1655 have a look at it instead. Memory transfers are more
1657 if (object
== TARGET_OBJECT_MEMORY
|| object
== TARGET_OBJECT_STACK_MEMORY
1658 || object
== TARGET_OBJECT_CODE_MEMORY
)
1659 retval
= memory_xfer_partial (ops
, object
, readbuf
,
1660 writebuf
, offset
, len
, xfered_len
);
1661 else if (object
== TARGET_OBJECT_RAW_MEMORY
)
1663 /* Request the normal memory object from other layers. */
1664 retval
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, offset
, len
,
1668 retval
= ops
->to_xfer_partial (ops
, object
, annex
, readbuf
,
1669 writebuf
, offset
, len
, xfered_len
);
1673 const unsigned char *myaddr
= NULL
;
1675 fprintf_unfiltered (gdb_stdlog
,
1676 "%s:target_xfer_partial "
1677 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1680 (annex
? annex
: "(null)"),
1681 host_address_to_string (readbuf
),
1682 host_address_to_string (writebuf
),
1683 core_addr_to_string_nz (offset
),
1684 pulongest (len
), retval
,
1685 pulongest (*xfered_len
));
1691 if (retval
== TARGET_XFER_OK
&& myaddr
!= NULL
)
1695 fputs_unfiltered (", bytes =", gdb_stdlog
);
1696 for (i
= 0; i
< *xfered_len
; i
++)
1698 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
1700 if (targetdebug
< 2 && i
> 0)
1702 fprintf_unfiltered (gdb_stdlog
, " ...");
1705 fprintf_unfiltered (gdb_stdlog
, "\n");
1708 fprintf_unfiltered (gdb_stdlog
, " %02x", myaddr
[i
] & 0xff);
1712 fputc_unfiltered ('\n', gdb_stdlog
);
1715 /* Check implementations of to_xfer_partial update *XFERED_LEN
1716 properly. Do assertion after printing debug messages, so that we
1717 can find more clues on assertion failure from debugging messages. */
1718 if (retval
== TARGET_XFER_OK
|| retval
== TARGET_XFER_E_UNAVAILABLE
)
1719 gdb_assert (*xfered_len
> 0);
1724 /* Read LEN bytes of target memory at address MEMADDR, placing the
1725 results in GDB's memory at MYADDR. Returns either 0 for success or
1726 TARGET_XFER_E_IO if any error occurs.
1728 If an error occurs, no guarantee is made about the contents of the data at
1729 MYADDR. In particular, the caller should not depend upon partial reads
1730 filling the buffer with good data. There is no way for the caller to know
1731 how much good data might have been transfered anyway. Callers that can
1732 deal with partial reads should call target_read (which will retry until
1733 it makes no progress, and then return how much was transferred). */
1736 target_read_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1738 /* Dispatch to the topmost target, not the flattened current_target.
1739 Memory accesses check target->to_has_(all_)memory, and the
1740 flattened target doesn't inherit those. */
1741 if (target_read (current_target
.beneath
, TARGET_OBJECT_MEMORY
, NULL
,
1742 myaddr
, memaddr
, len
) == len
)
1745 return TARGET_XFER_E_IO
;
1748 /* Like target_read_memory, but specify explicitly that this is a read
1749 from the target's raw memory. That is, this read bypasses the
1750 dcache, breakpoint shadowing, etc. */
1753 target_read_raw_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1755 /* See comment in target_read_memory about why the request starts at
1756 current_target.beneath. */
1757 if (target_read (current_target
.beneath
, TARGET_OBJECT_RAW_MEMORY
, NULL
,
1758 myaddr
, memaddr
, len
) == len
)
1761 return TARGET_XFER_E_IO
;
1764 /* Like target_read_memory, but specify explicitly that this is a read from
1765 the target's stack. This may trigger different cache behavior. */
1768 target_read_stack (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1770 /* See comment in target_read_memory about why the request starts at
1771 current_target.beneath. */
1772 if (target_read (current_target
.beneath
, TARGET_OBJECT_STACK_MEMORY
, NULL
,
1773 myaddr
, memaddr
, len
) == len
)
1776 return TARGET_XFER_E_IO
;
1779 /* Like target_read_memory, but specify explicitly that this is a read from
1780 the target's code. This may trigger different cache behavior. */
1783 target_read_code (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1785 /* See comment in target_read_memory about why the request starts at
1786 current_target.beneath. */
1787 if (target_read (current_target
.beneath
, TARGET_OBJECT_CODE_MEMORY
, NULL
,
1788 myaddr
, memaddr
, len
) == len
)
1791 return TARGET_XFER_E_IO
;
1794 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1795 Returns either 0 for success or TARGET_XFER_E_IO if any
1796 error occurs. If an error occurs, no guarantee is made about how
1797 much data got written. Callers that can deal with partial writes
1798 should call target_write. */
1801 target_write_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1803 /* See comment in target_read_memory about why the request starts at
1804 current_target.beneath. */
1805 if (target_write (current_target
.beneath
, TARGET_OBJECT_MEMORY
, NULL
,
1806 myaddr
, memaddr
, len
) == len
)
1809 return TARGET_XFER_E_IO
;
1812 /* Write LEN bytes from MYADDR to target raw memory at address
1813 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1814 if any error occurs. If an error occurs, no guarantee is made
1815 about how much data got written. Callers that can deal with
1816 partial writes should call target_write. */
1819 target_write_raw_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1821 /* See comment in target_read_memory about why the request starts at
1822 current_target.beneath. */
1823 if (target_write (current_target
.beneath
, TARGET_OBJECT_RAW_MEMORY
, NULL
,
1824 myaddr
, memaddr
, len
) == len
)
1827 return TARGET_XFER_E_IO
;
1830 /* Fetch the target's memory map. */
1833 target_memory_map (void)
1835 VEC(mem_region_s
) *result
;
1836 struct mem_region
*last_one
, *this_one
;
1838 struct target_ops
*t
;
1841 fprintf_unfiltered (gdb_stdlog
, "target_memory_map ()\n");
1843 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1844 if (t
->to_memory_map
!= NULL
)
1850 result
= t
->to_memory_map (t
);
1854 qsort (VEC_address (mem_region_s
, result
),
1855 VEC_length (mem_region_s
, result
),
1856 sizeof (struct mem_region
), mem_region_cmp
);
1858 /* Check that regions do not overlap. Simultaneously assign
1859 a numbering for the "mem" commands to use to refer to
1862 for (ix
= 0; VEC_iterate (mem_region_s
, result
, ix
, this_one
); ix
++)
1864 this_one
->number
= ix
;
1866 if (last_one
&& last_one
->hi
> this_one
->lo
)
1868 warning (_("Overlapping regions in memory map: ignoring"));
1869 VEC_free (mem_region_s
, result
);
1872 last_one
= this_one
;
1879 target_flash_erase (ULONGEST address
, LONGEST length
)
1881 struct target_ops
*t
;
1883 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1884 if (t
->to_flash_erase
!= NULL
)
1887 fprintf_unfiltered (gdb_stdlog
, "target_flash_erase (%s, %s)\n",
1888 hex_string (address
), phex (length
, 0));
1889 t
->to_flash_erase (t
, address
, length
);
1897 target_flash_done (void)
1899 struct target_ops
*t
;
1901 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1902 if (t
->to_flash_done
!= NULL
)
1905 fprintf_unfiltered (gdb_stdlog
, "target_flash_done\n");
1906 t
->to_flash_done (t
);
1914 show_trust_readonly (struct ui_file
*file
, int from_tty
,
1915 struct cmd_list_element
*c
, const char *value
)
1917 fprintf_filtered (file
,
1918 _("Mode for reading from readonly sections is %s.\n"),
1922 /* More generic transfers. */
1924 static enum target_xfer_status
1925 default_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1926 const char *annex
, gdb_byte
*readbuf
,
1927 const gdb_byte
*writebuf
, ULONGEST offset
, ULONGEST len
,
1928 ULONGEST
*xfered_len
)
1930 if (object
== TARGET_OBJECT_MEMORY
1931 && ops
->deprecated_xfer_memory
!= NULL
)
1932 /* If available, fall back to the target's
1933 "deprecated_xfer_memory" method. */
1938 if (writebuf
!= NULL
)
1940 void *buffer
= xmalloc (len
);
1941 struct cleanup
*cleanup
= make_cleanup (xfree
, buffer
);
1943 memcpy (buffer
, writebuf
, len
);
1944 xfered
= ops
->deprecated_xfer_memory (offset
, buffer
, len
,
1945 1/*write*/, NULL
, ops
);
1946 do_cleanups (cleanup
);
1948 if (readbuf
!= NULL
)
1949 xfered
= ops
->deprecated_xfer_memory (offset
, readbuf
, len
,
1950 0/*read*/, NULL
, ops
);
1953 *xfered_len
= (ULONGEST
) xfered
;
1954 return TARGET_XFER_E_IO
;
1956 else if (xfered
== 0 && errno
== 0)
1957 /* "deprecated_xfer_memory" uses 0, cross checked against
1958 ERRNO as one indication of an error. */
1959 return TARGET_XFER_EOF
;
1961 return TARGET_XFER_E_IO
;
1965 gdb_assert (ops
->beneath
!= NULL
);
1966 return ops
->beneath
->to_xfer_partial (ops
->beneath
, object
, annex
,
1967 readbuf
, writebuf
, offset
, len
,
1972 /* Target vector read/write partial wrapper functions. */
1974 static enum target_xfer_status
1975 target_read_partial (struct target_ops
*ops
,
1976 enum target_object object
,
1977 const char *annex
, gdb_byte
*buf
,
1978 ULONGEST offset
, ULONGEST len
,
1979 ULONGEST
*xfered_len
)
1981 return target_xfer_partial (ops
, object
, annex
, buf
, NULL
, offset
, len
,
1985 static enum target_xfer_status
1986 target_write_partial (struct target_ops
*ops
,
1987 enum target_object object
,
1988 const char *annex
, const gdb_byte
*buf
,
1989 ULONGEST offset
, LONGEST len
, ULONGEST
*xfered_len
)
1991 return target_xfer_partial (ops
, object
, annex
, NULL
, buf
, offset
, len
,
1995 /* Wrappers to perform the full transfer. */
1997 /* For docs on target_read see target.h. */
2000 target_read (struct target_ops
*ops
,
2001 enum target_object object
,
2002 const char *annex
, gdb_byte
*buf
,
2003 ULONGEST offset
, LONGEST len
)
2007 while (xfered
< len
)
2009 ULONGEST xfered_len
;
2010 enum target_xfer_status status
;
2012 status
= target_read_partial (ops
, object
, annex
,
2013 (gdb_byte
*) buf
+ xfered
,
2014 offset
+ xfered
, len
- xfered
,
2017 /* Call an observer, notifying them of the xfer progress? */
2018 if (status
== TARGET_XFER_EOF
)
2020 else if (status
== TARGET_XFER_OK
)
2022 xfered
+= xfered_len
;
2032 /* Assuming that the entire [begin, end) range of memory cannot be
2033 read, try to read whatever subrange is possible to read.
2035 The function returns, in RESULT, either zero or one memory block.
2036 If there's a readable subrange at the beginning, it is completely
2037 read and returned. Any further readable subrange will not be read.
2038 Otherwise, if there's a readable subrange at the end, it will be
2039 completely read and returned. Any readable subranges before it
2040 (obviously, not starting at the beginning), will be ignored. In
2041 other cases -- either no readable subrange, or readable subrange(s)
2042 that is neither at the beginning, or end, nothing is returned.
2044 The purpose of this function is to handle a read across a boundary
2045 of accessible memory in a case when memory map is not available.
2046 The above restrictions are fine for this case, but will give
2047 incorrect results if the memory is 'patchy'. However, supporting
2048 'patchy' memory would require trying to read every single byte,
2049 and it seems unacceptable solution. Explicit memory map is
2050 recommended for this case -- and target_read_memory_robust will
2051 take care of reading multiple ranges then. */
2054 read_whatever_is_readable (struct target_ops
*ops
,
2055 ULONGEST begin
, ULONGEST end
,
2056 VEC(memory_read_result_s
) **result
)
2058 gdb_byte
*buf
= xmalloc (end
- begin
);
2059 ULONGEST current_begin
= begin
;
2060 ULONGEST current_end
= end
;
2062 memory_read_result_s r
;
2063 ULONGEST xfered_len
;
2065 /* If we previously failed to read 1 byte, nothing can be done here. */
2066 if (end
- begin
<= 1)
2072 /* Check that either first or the last byte is readable, and give up
2073 if not. This heuristic is meant to permit reading accessible memory
2074 at the boundary of accessible region. */
2075 if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2076 buf
, begin
, 1, &xfered_len
) == TARGET_XFER_OK
)
2081 else if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2082 buf
+ (end
-begin
) - 1, end
- 1, 1,
2083 &xfered_len
) == TARGET_XFER_OK
)
2094 /* Loop invariant is that the [current_begin, current_end) was previously
2095 found to be not readable as a whole.
2097 Note loop condition -- if the range has 1 byte, we can't divide the range
2098 so there's no point trying further. */
2099 while (current_end
- current_begin
> 1)
2101 ULONGEST first_half_begin
, first_half_end
;
2102 ULONGEST second_half_begin
, second_half_end
;
2104 ULONGEST middle
= current_begin
+ (current_end
- current_begin
)/2;
2108 first_half_begin
= current_begin
;
2109 first_half_end
= middle
;
2110 second_half_begin
= middle
;
2111 second_half_end
= current_end
;
2115 first_half_begin
= middle
;
2116 first_half_end
= current_end
;
2117 second_half_begin
= current_begin
;
2118 second_half_end
= middle
;
2121 xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2122 buf
+ (first_half_begin
- begin
),
2124 first_half_end
- first_half_begin
);
2126 if (xfer
== first_half_end
- first_half_begin
)
2128 /* This half reads up fine. So, the error must be in the
2130 current_begin
= second_half_begin
;
2131 current_end
= second_half_end
;
2135 /* This half is not readable. Because we've tried one byte, we
2136 know some part of this half if actually redable. Go to the next
2137 iteration to divide again and try to read.
2139 We don't handle the other half, because this function only tries
2140 to read a single readable subrange. */
2141 current_begin
= first_half_begin
;
2142 current_end
= first_half_end
;
2148 /* The [begin, current_begin) range has been read. */
2150 r
.end
= current_begin
;
2155 /* The [current_end, end) range has been read. */
2156 LONGEST rlen
= end
- current_end
;
2158 r
.data
= xmalloc (rlen
);
2159 memcpy (r
.data
, buf
+ current_end
- begin
, rlen
);
2160 r
.begin
= current_end
;
2164 VEC_safe_push(memory_read_result_s
, (*result
), &r
);
2168 free_memory_read_result_vector (void *x
)
2170 VEC(memory_read_result_s
) *v
= x
;
2171 memory_read_result_s
*current
;
2174 for (ix
= 0; VEC_iterate (memory_read_result_s
, v
, ix
, current
); ++ix
)
2176 xfree (current
->data
);
2178 VEC_free (memory_read_result_s
, v
);
2181 VEC(memory_read_result_s
) *
2182 read_memory_robust (struct target_ops
*ops
, ULONGEST offset
, LONGEST len
)
2184 VEC(memory_read_result_s
) *result
= 0;
2187 while (xfered
< len
)
2189 struct mem_region
*region
= lookup_mem_region (offset
+ xfered
);
2192 /* If there is no explicit region, a fake one should be created. */
2193 gdb_assert (region
);
2195 if (region
->hi
== 0)
2196 rlen
= len
- xfered
;
2198 rlen
= region
->hi
- offset
;
2200 if (region
->attrib
.mode
== MEM_NONE
|| region
->attrib
.mode
== MEM_WO
)
2202 /* Cannot read this region. Note that we can end up here only
2203 if the region is explicitly marked inaccessible, or
2204 'inaccessible-by-default' is in effect. */
2209 LONGEST to_read
= min (len
- xfered
, rlen
);
2210 gdb_byte
*buffer
= (gdb_byte
*)xmalloc (to_read
);
2212 LONGEST xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2213 (gdb_byte
*) buffer
,
2214 offset
+ xfered
, to_read
);
2215 /* Call an observer, notifying them of the xfer progress? */
2218 /* Got an error reading full chunk. See if maybe we can read
2221 read_whatever_is_readable (ops
, offset
+ xfered
,
2222 offset
+ xfered
+ to_read
, &result
);
2227 struct memory_read_result r
;
2229 r
.begin
= offset
+ xfered
;
2230 r
.end
= r
.begin
+ xfer
;
2231 VEC_safe_push (memory_read_result_s
, result
, &r
);
2241 /* An alternative to target_write with progress callbacks. */
2244 target_write_with_progress (struct target_ops
*ops
,
2245 enum target_object object
,
2246 const char *annex
, const gdb_byte
*buf
,
2247 ULONGEST offset
, LONGEST len
,
2248 void (*progress
) (ULONGEST
, void *), void *baton
)
2252 /* Give the progress callback a chance to set up. */
2254 (*progress
) (0, baton
);
2256 while (xfered
< len
)
2258 ULONGEST xfered_len
;
2259 enum target_xfer_status status
;
2261 status
= target_write_partial (ops
, object
, annex
,
2262 (gdb_byte
*) buf
+ xfered
,
2263 offset
+ xfered
, len
- xfered
,
2266 if (status
== TARGET_XFER_EOF
)
2268 if (TARGET_XFER_STATUS_ERROR_P (status
))
2271 gdb_assert (status
== TARGET_XFER_OK
);
2273 (*progress
) (xfered_len
, baton
);
2275 xfered
+= xfered_len
;
2281 /* For docs on target_write see target.h. */
2284 target_write (struct target_ops
*ops
,
2285 enum target_object object
,
2286 const char *annex
, const gdb_byte
*buf
,
2287 ULONGEST offset
, LONGEST len
)
2289 return target_write_with_progress (ops
, object
, annex
, buf
, offset
, len
,
2293 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2294 the size of the transferred data. PADDING additional bytes are
2295 available in *BUF_P. This is a helper function for
2296 target_read_alloc; see the declaration of that function for more
2300 target_read_alloc_1 (struct target_ops
*ops
, enum target_object object
,
2301 const char *annex
, gdb_byte
**buf_p
, int padding
)
2303 size_t buf_alloc
, buf_pos
;
2306 /* This function does not have a length parameter; it reads the
2307 entire OBJECT). Also, it doesn't support objects fetched partly
2308 from one target and partly from another (in a different stratum,
2309 e.g. a core file and an executable). Both reasons make it
2310 unsuitable for reading memory. */
2311 gdb_assert (object
!= TARGET_OBJECT_MEMORY
);
2313 /* Start by reading up to 4K at a time. The target will throttle
2314 this number down if necessary. */
2316 buf
= xmalloc (buf_alloc
);
2320 ULONGEST xfered_len
;
2321 enum target_xfer_status status
;
2323 status
= target_read_partial (ops
, object
, annex
, &buf
[buf_pos
],
2324 buf_pos
, buf_alloc
- buf_pos
- padding
,
2327 if (status
== TARGET_XFER_EOF
)
2329 /* Read all there was. */
2336 else if (status
!= TARGET_XFER_OK
)
2338 /* An error occurred. */
2340 return TARGET_XFER_E_IO
;
2343 buf_pos
+= xfered_len
;
2345 /* If the buffer is filling up, expand it. */
2346 if (buf_alloc
< buf_pos
* 2)
2349 buf
= xrealloc (buf
, buf_alloc
);
2356 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2357 the size of the transferred data. See the declaration in "target.h"
2358 function for more information about the return value. */
2361 target_read_alloc (struct target_ops
*ops
, enum target_object object
,
2362 const char *annex
, gdb_byte
**buf_p
)
2364 return target_read_alloc_1 (ops
, object
, annex
, buf_p
, 0);
2367 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2368 returned as a string, allocated using xmalloc. If an error occurs
2369 or the transfer is unsupported, NULL is returned. Empty objects
2370 are returned as allocated but empty strings. A warning is issued
2371 if the result contains any embedded NUL bytes. */
2374 target_read_stralloc (struct target_ops
*ops
, enum target_object object
,
2379 LONGEST i
, transferred
;
2381 transferred
= target_read_alloc_1 (ops
, object
, annex
, &buffer
, 1);
2382 bufstr
= (char *) buffer
;
2384 if (transferred
< 0)
2387 if (transferred
== 0)
2388 return xstrdup ("");
2390 bufstr
[transferred
] = 0;
2392 /* Check for embedded NUL bytes; but allow trailing NULs. */
2393 for (i
= strlen (bufstr
); i
< transferred
; i
++)
2396 warning (_("target object %d, annex %s, "
2397 "contained unexpected null characters"),
2398 (int) object
, annex
? annex
: "(none)");
2405 /* Memory transfer methods. */
2408 get_target_memory (struct target_ops
*ops
, CORE_ADDR addr
, gdb_byte
*buf
,
2411 /* This method is used to read from an alternate, non-current
2412 target. This read must bypass the overlay support (as symbols
2413 don't match this target), and GDB's internal cache (wrong cache
2414 for this target). */
2415 if (target_read (ops
, TARGET_OBJECT_RAW_MEMORY
, NULL
, buf
, addr
, len
)
2417 memory_error (TARGET_XFER_E_IO
, addr
);
2421 get_target_memory_unsigned (struct target_ops
*ops
, CORE_ADDR addr
,
2422 int len
, enum bfd_endian byte_order
)
2424 gdb_byte buf
[sizeof (ULONGEST
)];
2426 gdb_assert (len
<= sizeof (buf
));
2427 get_target_memory (ops
, addr
, buf
, len
);
2428 return extract_unsigned_integer (buf
, len
, byte_order
);
2434 target_insert_breakpoint (struct gdbarch
*gdbarch
,
2435 struct bp_target_info
*bp_tgt
)
2437 if (!may_insert_breakpoints
)
2439 warning (_("May not insert breakpoints"));
2443 return current_target
.to_insert_breakpoint (¤t_target
,
2450 target_remove_breakpoint (struct gdbarch
*gdbarch
,
2451 struct bp_target_info
*bp_tgt
)
2453 /* This is kind of a weird case to handle, but the permission might
2454 have been changed after breakpoints were inserted - in which case
2455 we should just take the user literally and assume that any
2456 breakpoints should be left in place. */
2457 if (!may_insert_breakpoints
)
2459 warning (_("May not remove breakpoints"));
2463 return current_target
.to_remove_breakpoint (¤t_target
,
2468 target_info (char *args
, int from_tty
)
2470 struct target_ops
*t
;
2471 int has_all_mem
= 0;
2473 if (symfile_objfile
!= NULL
)
2474 printf_unfiltered (_("Symbols from \"%s\".\n"),
2475 objfile_name (symfile_objfile
));
2477 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
2479 if (!(*t
->to_has_memory
) (t
))
2482 if ((int) (t
->to_stratum
) <= (int) dummy_stratum
)
2485 printf_unfiltered (_("\tWhile running this, "
2486 "GDB does not access memory from...\n"));
2487 printf_unfiltered ("%s:\n", t
->to_longname
);
2488 (t
->to_files_info
) (t
);
2489 has_all_mem
= (*t
->to_has_all_memory
) (t
);
2493 /* This function is called before any new inferior is created, e.g.
2494 by running a program, attaching, or connecting to a target.
2495 It cleans up any state from previous invocations which might
2496 change between runs. This is a subset of what target_preopen
2497 resets (things which might change between targets). */
2500 target_pre_inferior (int from_tty
)
2502 /* Clear out solib state. Otherwise the solib state of the previous
2503 inferior might have survived and is entirely wrong for the new
2504 target. This has been observed on GNU/Linux using glibc 2.3. How
2516 Cannot access memory at address 0xdeadbeef
2519 /* In some OSs, the shared library list is the same/global/shared
2520 across inferiors. If code is shared between processes, so are
2521 memory regions and features. */
2522 if (!gdbarch_has_global_solist (target_gdbarch ()))
2524 no_shared_libraries (NULL
, from_tty
);
2526 invalidate_target_mem_regions ();
2528 target_clear_description ();
2531 agent_capability_invalidate ();
2534 /* Callback for iterate_over_inferiors. Gets rid of the given
2538 dispose_inferior (struct inferior
*inf
, void *args
)
2540 struct thread_info
*thread
;
2542 thread
= any_thread_of_process (inf
->pid
);
2545 switch_to_thread (thread
->ptid
);
2547 /* Core inferiors actually should be detached, not killed. */
2548 if (target_has_execution
)
2551 target_detach (NULL
, 0);
2557 /* This is to be called by the open routine before it does
2561 target_preopen (int from_tty
)
2565 if (have_inferiors ())
2568 || !have_live_inferiors ()
2569 || query (_("A program is being debugged already. Kill it? ")))
2570 iterate_over_inferiors (dispose_inferior
, NULL
);
2572 error (_("Program not killed."));
2575 /* Calling target_kill may remove the target from the stack. But if
2576 it doesn't (which seems like a win for UDI), remove it now. */
2577 /* Leave the exec target, though. The user may be switching from a
2578 live process to a core of the same program. */
2579 pop_all_targets_above (file_stratum
);
2581 target_pre_inferior (from_tty
);
2584 /* Detach a target after doing deferred register stores. */
2587 target_detach (const char *args
, int from_tty
)
2589 struct target_ops
* t
;
2591 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2592 /* Don't remove global breakpoints here. They're removed on
2593 disconnection from the target. */
2596 /* If we're in breakpoints-always-inserted mode, have to remove
2597 them before detaching. */
2598 remove_breakpoints_pid (ptid_get_pid (inferior_ptid
));
2600 prepare_for_detach ();
2602 current_target
.to_detach (¤t_target
, args
, from_tty
);
2604 fprintf_unfiltered (gdb_stdlog
, "target_detach (%s, %d)\n",
2609 target_disconnect (char *args
, int from_tty
)
2611 struct target_ops
*t
;
2613 /* If we're in breakpoints-always-inserted mode or if breakpoints
2614 are global across processes, we have to remove them before
2616 remove_breakpoints ();
2618 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2619 if (t
->to_disconnect
!= NULL
)
2622 fprintf_unfiltered (gdb_stdlog
, "target_disconnect (%s, %d)\n",
2624 t
->to_disconnect (t
, args
, from_tty
);
2632 target_wait (ptid_t ptid
, struct target_waitstatus
*status
, int options
)
2634 struct target_ops
*t
;
2635 ptid_t retval
= (current_target
.to_wait
) (¤t_target
, ptid
,
2640 char *status_string
;
2641 char *options_string
;
2643 status_string
= target_waitstatus_to_string (status
);
2644 options_string
= target_options_to_string (options
);
2645 fprintf_unfiltered (gdb_stdlog
,
2646 "target_wait (%d, status, options={%s})"
2648 ptid_get_pid (ptid
), options_string
,
2649 ptid_get_pid (retval
), status_string
);
2650 xfree (status_string
);
2651 xfree (options_string
);
2658 target_pid_to_str (ptid_t ptid
)
2660 struct target_ops
*t
;
2662 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2664 if (t
->to_pid_to_str
!= NULL
)
2665 return (*t
->to_pid_to_str
) (t
, ptid
);
2668 return normal_pid_to_str (ptid
);
2672 target_thread_name (struct thread_info
*info
)
2674 return current_target
.to_thread_name (¤t_target
, info
);
2678 target_resume (ptid_t ptid
, int step
, enum gdb_signal signal
)
2680 struct target_ops
*t
;
2682 target_dcache_invalidate ();
2684 current_target
.to_resume (¤t_target
, ptid
, step
, signal
);
2686 fprintf_unfiltered (gdb_stdlog
, "target_resume (%d, %s, %s)\n",
2687 ptid_get_pid (ptid
),
2688 step
? "step" : "continue",
2689 gdb_signal_to_name (signal
));
2691 registers_changed_ptid (ptid
);
2692 set_executing (ptid
, 1);
2693 set_running (ptid
, 1);
2694 clear_inline_frame_state (ptid
);
2698 target_pass_signals (int numsigs
, unsigned char *pass_signals
)
2700 struct target_ops
*t
;
2702 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2704 if (t
->to_pass_signals
!= NULL
)
2710 fprintf_unfiltered (gdb_stdlog
, "target_pass_signals (%d, {",
2713 for (i
= 0; i
< numsigs
; i
++)
2714 if (pass_signals
[i
])
2715 fprintf_unfiltered (gdb_stdlog
, " %s",
2716 gdb_signal_to_name (i
));
2718 fprintf_unfiltered (gdb_stdlog
, " })\n");
2721 (*t
->to_pass_signals
) (t
, numsigs
, pass_signals
);
2728 target_program_signals (int numsigs
, unsigned char *program_signals
)
2730 struct target_ops
*t
;
2732 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2734 if (t
->to_program_signals
!= NULL
)
2740 fprintf_unfiltered (gdb_stdlog
, "target_program_signals (%d, {",
2743 for (i
= 0; i
< numsigs
; i
++)
2744 if (program_signals
[i
])
2745 fprintf_unfiltered (gdb_stdlog
, " %s",
2746 gdb_signal_to_name (i
));
2748 fprintf_unfiltered (gdb_stdlog
, " })\n");
2751 (*t
->to_program_signals
) (t
, numsigs
, program_signals
);
2757 /* Look through the list of possible targets for a target that can
2761 target_follow_fork (int follow_child
, int detach_fork
)
2763 struct target_ops
*t
;
2765 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2767 if (t
->to_follow_fork
!= NULL
)
2769 int retval
= t
->to_follow_fork (t
, follow_child
, detach_fork
);
2772 fprintf_unfiltered (gdb_stdlog
,
2773 "target_follow_fork (%d, %d) = %d\n",
2774 follow_child
, detach_fork
, retval
);
2779 /* Some target returned a fork event, but did not know how to follow it. */
2780 internal_error (__FILE__
, __LINE__
,
2781 _("could not find a target to follow fork"));
2785 target_mourn_inferior (void)
2787 struct target_ops
*t
;
2789 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2791 if (t
->to_mourn_inferior
!= NULL
)
2793 t
->to_mourn_inferior (t
);
2795 fprintf_unfiltered (gdb_stdlog
, "target_mourn_inferior ()\n");
2797 /* We no longer need to keep handles on any of the object files.
2798 Make sure to release them to avoid unnecessarily locking any
2799 of them while we're not actually debugging. */
2800 bfd_cache_close_all ();
2806 internal_error (__FILE__
, __LINE__
,
2807 _("could not find a target to follow mourn inferior"));
2810 /* Look for a target which can describe architectural features, starting
2811 from TARGET. If we find one, return its description. */
2813 const struct target_desc
*
2814 target_read_description (struct target_ops
*target
)
2816 struct target_ops
*t
;
2818 for (t
= target
; t
!= NULL
; t
= t
->beneath
)
2819 if (t
->to_read_description
!= NULL
)
2821 const struct target_desc
*tdesc
;
2823 tdesc
= t
->to_read_description (t
);
2831 /* The default implementation of to_search_memory.
2832 This implements a basic search of memory, reading target memory and
2833 performing the search here (as opposed to performing the search in on the
2834 target side with, for example, gdbserver). */
2837 simple_search_memory (struct target_ops
*ops
,
2838 CORE_ADDR start_addr
, ULONGEST search_space_len
,
2839 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2840 CORE_ADDR
*found_addrp
)
2842 /* NOTE: also defined in find.c testcase. */
2843 #define SEARCH_CHUNK_SIZE 16000
2844 const unsigned chunk_size
= SEARCH_CHUNK_SIZE
;
2845 /* Buffer to hold memory contents for searching. */
2846 gdb_byte
*search_buf
;
2847 unsigned search_buf_size
;
2848 struct cleanup
*old_cleanups
;
2850 search_buf_size
= chunk_size
+ pattern_len
- 1;
2852 /* No point in trying to allocate a buffer larger than the search space. */
2853 if (search_space_len
< search_buf_size
)
2854 search_buf_size
= search_space_len
;
2856 search_buf
= malloc (search_buf_size
);
2857 if (search_buf
== NULL
)
2858 error (_("Unable to allocate memory to perform the search."));
2859 old_cleanups
= make_cleanup (free_current_contents
, &search_buf
);
2861 /* Prime the search buffer. */
2863 if (target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2864 search_buf
, start_addr
, search_buf_size
) != search_buf_size
)
2866 warning (_("Unable to access %s bytes of target "
2867 "memory at %s, halting search."),
2868 pulongest (search_buf_size
), hex_string (start_addr
));
2869 do_cleanups (old_cleanups
);
2873 /* Perform the search.
2875 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2876 When we've scanned N bytes we copy the trailing bytes to the start and
2877 read in another N bytes. */
2879 while (search_space_len
>= pattern_len
)
2881 gdb_byte
*found_ptr
;
2882 unsigned nr_search_bytes
= min (search_space_len
, search_buf_size
);
2884 found_ptr
= memmem (search_buf
, nr_search_bytes
,
2885 pattern
, pattern_len
);
2887 if (found_ptr
!= NULL
)
2889 CORE_ADDR found_addr
= start_addr
+ (found_ptr
- search_buf
);
2891 *found_addrp
= found_addr
;
2892 do_cleanups (old_cleanups
);
2896 /* Not found in this chunk, skip to next chunk. */
2898 /* Don't let search_space_len wrap here, it's unsigned. */
2899 if (search_space_len
>= chunk_size
)
2900 search_space_len
-= chunk_size
;
2902 search_space_len
= 0;
2904 if (search_space_len
>= pattern_len
)
2906 unsigned keep_len
= search_buf_size
- chunk_size
;
2907 CORE_ADDR read_addr
= start_addr
+ chunk_size
+ keep_len
;
2910 /* Copy the trailing part of the previous iteration to the front
2911 of the buffer for the next iteration. */
2912 gdb_assert (keep_len
== pattern_len
- 1);
2913 memcpy (search_buf
, search_buf
+ chunk_size
, keep_len
);
2915 nr_to_read
= min (search_space_len
- keep_len
, chunk_size
);
2917 if (target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2918 search_buf
+ keep_len
, read_addr
,
2919 nr_to_read
) != nr_to_read
)
2921 warning (_("Unable to access %s bytes of target "
2922 "memory at %s, halting search."),
2923 plongest (nr_to_read
),
2924 hex_string (read_addr
));
2925 do_cleanups (old_cleanups
);
2929 start_addr
+= chunk_size
;
2935 do_cleanups (old_cleanups
);
2939 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2940 sequence of bytes in PATTERN with length PATTERN_LEN.
2942 The result is 1 if found, 0 if not found, and -1 if there was an error
2943 requiring halting of the search (e.g. memory read error).
2944 If the pattern is found the address is recorded in FOUND_ADDRP. */
2947 target_search_memory (CORE_ADDR start_addr
, ULONGEST search_space_len
,
2948 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2949 CORE_ADDR
*found_addrp
)
2951 struct target_ops
*t
;
2954 /* We don't use INHERIT to set current_target.to_search_memory,
2955 so we have to scan the target stack and handle targetdebug
2959 fprintf_unfiltered (gdb_stdlog
, "target_search_memory (%s, ...)\n",
2960 hex_string (start_addr
));
2962 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2963 if (t
->to_search_memory
!= NULL
)
2968 found
= t
->to_search_memory (t
, start_addr
, search_space_len
,
2969 pattern
, pattern_len
, found_addrp
);
2973 /* If a special version of to_search_memory isn't available, use the
2975 found
= simple_search_memory (current_target
.beneath
,
2976 start_addr
, search_space_len
,
2977 pattern
, pattern_len
, found_addrp
);
2981 fprintf_unfiltered (gdb_stdlog
, " = %d\n", found
);
2986 /* Look through the currently pushed targets. If none of them will
2987 be able to restart the currently running process, issue an error
2991 target_require_runnable (void)
2993 struct target_ops
*t
;
2995 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
2997 /* If this target knows how to create a new program, then
2998 assume we will still be able to after killing the current
2999 one. Either killing and mourning will not pop T, or else
3000 find_default_run_target will find it again. */
3001 if (t
->to_create_inferior
!= NULL
)
3004 /* Do not worry about thread_stratum targets that can not
3005 create inferiors. Assume they will be pushed again if
3006 necessary, and continue to the process_stratum. */
3007 if (t
->to_stratum
== thread_stratum
3008 || t
->to_stratum
== arch_stratum
)
3011 error (_("The \"%s\" target does not support \"run\". "
3012 "Try \"help target\" or \"continue\"."),
3016 /* This function is only called if the target is running. In that
3017 case there should have been a process_stratum target and it
3018 should either know how to create inferiors, or not... */
3019 internal_error (__FILE__
, __LINE__
, _("No targets found"));
3022 /* Look through the list of possible targets for a target that can
3023 execute a run or attach command without any other data. This is
3024 used to locate the default process stratum.
3026 If DO_MESG is not NULL, the result is always valid (error() is
3027 called for errors); else, return NULL on error. */
3029 static struct target_ops
*
3030 find_default_run_target (char *do_mesg
)
3032 struct target_ops
**t
;
3033 struct target_ops
*runable
= NULL
;
3038 for (t
= target_structs
; t
< target_structs
+ target_struct_size
;
3041 if ((*t
)->to_can_run
&& target_can_run (*t
))
3051 error (_("Don't know how to %s. Try \"help target\"."), do_mesg
);
3060 find_default_attach (struct target_ops
*ops
, char *args
, int from_tty
)
3062 struct target_ops
*t
;
3064 t
= find_default_run_target ("attach");
3065 (t
->to_attach
) (t
, args
, from_tty
);
3070 find_default_create_inferior (struct target_ops
*ops
,
3071 char *exec_file
, char *allargs
, char **env
,
3074 struct target_ops
*t
;
3076 t
= find_default_run_target ("run");
3077 (t
->to_create_inferior
) (t
, exec_file
, allargs
, env
, from_tty
);
3082 find_default_can_async_p (struct target_ops
*ignore
)
3084 struct target_ops
*t
;
3086 /* This may be called before the target is pushed on the stack;
3087 look for the default process stratum. If there's none, gdb isn't
3088 configured with a native debugger, and target remote isn't
3090 t
= find_default_run_target (NULL
);
3091 if (t
&& t
->to_can_async_p
!= delegate_can_async_p
)
3092 return (t
->to_can_async_p
) (t
);
3097 find_default_is_async_p (struct target_ops
*ignore
)
3099 struct target_ops
*t
;
3101 /* This may be called before the target is pushed on the stack;
3102 look for the default process stratum. If there's none, gdb isn't
3103 configured with a native debugger, and target remote isn't
3105 t
= find_default_run_target (NULL
);
3106 if (t
&& t
->to_is_async_p
!= delegate_is_async_p
)
3107 return (t
->to_is_async_p
) (t
);
3112 find_default_supports_non_stop (struct target_ops
*self
)
3114 struct target_ops
*t
;
3116 t
= find_default_run_target (NULL
);
3117 if (t
&& t
->to_supports_non_stop
)
3118 return (t
->to_supports_non_stop
) (t
);
3123 target_supports_non_stop (void)
3125 struct target_ops
*t
;
3127 for (t
= ¤t_target
; t
!= NULL
; t
= t
->beneath
)
3128 if (t
->to_supports_non_stop
)
3129 return t
->to_supports_non_stop (t
);
3134 /* Implement the "info proc" command. */
3137 target_info_proc (char *args
, enum info_proc_what what
)
3139 struct target_ops
*t
;
3141 /* If we're already connected to something that can get us OS
3142 related data, use it. Otherwise, try using the native
3144 if (current_target
.to_stratum
>= process_stratum
)
3145 t
= current_target
.beneath
;
3147 t
= find_default_run_target (NULL
);
3149 for (; t
!= NULL
; t
= t
->beneath
)
3151 if (t
->to_info_proc
!= NULL
)
3153 t
->to_info_proc (t
, args
, what
);
3156 fprintf_unfiltered (gdb_stdlog
,
3157 "target_info_proc (\"%s\", %d)\n", args
, what
);
3167 find_default_supports_disable_randomization (struct target_ops
*self
)
3169 struct target_ops
*t
;
3171 t
= find_default_run_target (NULL
);
3172 if (t
&& t
->to_supports_disable_randomization
)
3173 return (t
->to_supports_disable_randomization
) (t
);
3178 target_supports_disable_randomization (void)
3180 struct target_ops
*t
;
3182 for (t
= ¤t_target
; t
!= NULL
; t
= t
->beneath
)
3183 if (t
->to_supports_disable_randomization
)
3184 return t
->to_supports_disable_randomization (t
);
3190 target_get_osdata (const char *type
)
3192 struct target_ops
*t
;
3194 /* If we're already connected to something that can get us OS
3195 related data, use it. Otherwise, try using the native
3197 if (current_target
.to_stratum
>= process_stratum
)
3198 t
= current_target
.beneath
;
3200 t
= find_default_run_target ("get OS data");
3205 return target_read_stralloc (t
, TARGET_OBJECT_OSDATA
, type
);
3208 /* Determine the current address space of thread PTID. */
3210 struct address_space
*
3211 target_thread_address_space (ptid_t ptid
)
3213 struct address_space
*aspace
;
3214 struct inferior
*inf
;
3215 struct target_ops
*t
;
3217 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3219 if (t
->to_thread_address_space
!= NULL
)
3221 aspace
= t
->to_thread_address_space (t
, ptid
);
3222 gdb_assert (aspace
);
3225 fprintf_unfiltered (gdb_stdlog
,
3226 "target_thread_address_space (%s) = %d\n",
3227 target_pid_to_str (ptid
),
3228 address_space_num (aspace
));
3233 /* Fall-back to the "main" address space of the inferior. */
3234 inf
= find_inferior_pid (ptid_get_pid (ptid
));
3236 if (inf
== NULL
|| inf
->aspace
== NULL
)
3237 internal_error (__FILE__
, __LINE__
,
3238 _("Can't determine the current "
3239 "address space of thread %s\n"),
3240 target_pid_to_str (ptid
));
3246 /* Target file operations. */
3248 static struct target_ops
*
3249 default_fileio_target (void)
3251 /* If we're already connected to something that can perform
3252 file I/O, use it. Otherwise, try using the native target. */
3253 if (current_target
.to_stratum
>= process_stratum
)
3254 return current_target
.beneath
;
3256 return find_default_run_target ("file I/O");
3259 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3260 target file descriptor, or -1 if an error occurs (and set
3263 target_fileio_open (const char *filename
, int flags
, int mode
,
3266 struct target_ops
*t
;
3268 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3270 if (t
->to_fileio_open
!= NULL
)
3272 int fd
= t
->to_fileio_open (t
, filename
, flags
, mode
, target_errno
);
3275 fprintf_unfiltered (gdb_stdlog
,
3276 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3277 filename
, flags
, mode
,
3278 fd
, fd
!= -1 ? 0 : *target_errno
);
3283 *target_errno
= FILEIO_ENOSYS
;
3287 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3288 Return the number of bytes written, or -1 if an error occurs
3289 (and set *TARGET_ERRNO). */
3291 target_fileio_pwrite (int fd
, const gdb_byte
*write_buf
, int len
,
3292 ULONGEST offset
, int *target_errno
)
3294 struct target_ops
*t
;
3296 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3298 if (t
->to_fileio_pwrite
!= NULL
)
3300 int ret
= t
->to_fileio_pwrite (t
, fd
, write_buf
, len
, offset
,
3304 fprintf_unfiltered (gdb_stdlog
,
3305 "target_fileio_pwrite (%d,...,%d,%s) "
3307 fd
, len
, pulongest (offset
),
3308 ret
, ret
!= -1 ? 0 : *target_errno
);
3313 *target_errno
= FILEIO_ENOSYS
;
3317 /* Read up to LEN bytes FD on the target into READ_BUF.
3318 Return the number of bytes read, or -1 if an error occurs
3319 (and set *TARGET_ERRNO). */
3321 target_fileio_pread (int fd
, gdb_byte
*read_buf
, int len
,
3322 ULONGEST offset
, int *target_errno
)
3324 struct target_ops
*t
;
3326 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3328 if (t
->to_fileio_pread
!= NULL
)
3330 int ret
= t
->to_fileio_pread (t
, fd
, read_buf
, len
, offset
,
3334 fprintf_unfiltered (gdb_stdlog
,
3335 "target_fileio_pread (%d,...,%d,%s) "
3337 fd
, len
, pulongest (offset
),
3338 ret
, ret
!= -1 ? 0 : *target_errno
);
3343 *target_errno
= FILEIO_ENOSYS
;
3347 /* Close FD on the target. Return 0, or -1 if an error occurs
3348 (and set *TARGET_ERRNO). */
3350 target_fileio_close (int fd
, int *target_errno
)
3352 struct target_ops
*t
;
3354 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3356 if (t
->to_fileio_close
!= NULL
)
3358 int ret
= t
->to_fileio_close (t
, fd
, target_errno
);
3361 fprintf_unfiltered (gdb_stdlog
,
3362 "target_fileio_close (%d) = %d (%d)\n",
3363 fd
, ret
, ret
!= -1 ? 0 : *target_errno
);
3368 *target_errno
= FILEIO_ENOSYS
;
3372 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3373 occurs (and set *TARGET_ERRNO). */
3375 target_fileio_unlink (const char *filename
, int *target_errno
)
3377 struct target_ops
*t
;
3379 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3381 if (t
->to_fileio_unlink
!= NULL
)
3383 int ret
= t
->to_fileio_unlink (t
, filename
, target_errno
);
3386 fprintf_unfiltered (gdb_stdlog
,
3387 "target_fileio_unlink (%s) = %d (%d)\n",
3388 filename
, ret
, ret
!= -1 ? 0 : *target_errno
);
3393 *target_errno
= FILEIO_ENOSYS
;
3397 /* Read value of symbolic link FILENAME on the target. Return a
3398 null-terminated string allocated via xmalloc, or NULL if an error
3399 occurs (and set *TARGET_ERRNO). */
3401 target_fileio_readlink (const char *filename
, int *target_errno
)
3403 struct target_ops
*t
;
3405 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3407 if (t
->to_fileio_readlink
!= NULL
)
3409 char *ret
= t
->to_fileio_readlink (t
, filename
, target_errno
);
3412 fprintf_unfiltered (gdb_stdlog
,
3413 "target_fileio_readlink (%s) = %s (%d)\n",
3414 filename
, ret
? ret
: "(nil)",
3415 ret
? 0 : *target_errno
);
3420 *target_errno
= FILEIO_ENOSYS
;
3425 target_fileio_close_cleanup (void *opaque
)
3427 int fd
= *(int *) opaque
;
3430 target_fileio_close (fd
, &target_errno
);
3433 /* Read target file FILENAME. Store the result in *BUF_P and
3434 return the size of the transferred data. PADDING additional bytes are
3435 available in *BUF_P. This is a helper function for
3436 target_fileio_read_alloc; see the declaration of that function for more
3440 target_fileio_read_alloc_1 (const char *filename
,
3441 gdb_byte
**buf_p
, int padding
)
3443 struct cleanup
*close_cleanup
;
3444 size_t buf_alloc
, buf_pos
;
3450 fd
= target_fileio_open (filename
, FILEIO_O_RDONLY
, 0700, &target_errno
);
3454 close_cleanup
= make_cleanup (target_fileio_close_cleanup
, &fd
);
3456 /* Start by reading up to 4K at a time. The target will throttle
3457 this number down if necessary. */
3459 buf
= xmalloc (buf_alloc
);
3463 n
= target_fileio_pread (fd
, &buf
[buf_pos
],
3464 buf_alloc
- buf_pos
- padding
, buf_pos
,
3468 /* An error occurred. */
3469 do_cleanups (close_cleanup
);
3475 /* Read all there was. */
3476 do_cleanups (close_cleanup
);
3486 /* If the buffer is filling up, expand it. */
3487 if (buf_alloc
< buf_pos
* 2)
3490 buf
= xrealloc (buf
, buf_alloc
);
3497 /* Read target file FILENAME. Store the result in *BUF_P and return
3498 the size of the transferred data. See the declaration in "target.h"
3499 function for more information about the return value. */
3502 target_fileio_read_alloc (const char *filename
, gdb_byte
**buf_p
)
3504 return target_fileio_read_alloc_1 (filename
, buf_p
, 0);
3507 /* Read target file FILENAME. The result is NUL-terminated and
3508 returned as a string, allocated using xmalloc. If an error occurs
3509 or the transfer is unsupported, NULL is returned. Empty objects
3510 are returned as allocated but empty strings. A warning is issued
3511 if the result contains any embedded NUL bytes. */
3514 target_fileio_read_stralloc (const char *filename
)
3518 LONGEST i
, transferred
;
3520 transferred
= target_fileio_read_alloc_1 (filename
, &buffer
, 1);
3521 bufstr
= (char *) buffer
;
3523 if (transferred
< 0)
3526 if (transferred
== 0)
3527 return xstrdup ("");
3529 bufstr
[transferred
] = 0;
3531 /* Check for embedded NUL bytes; but allow trailing NULs. */
3532 for (i
= strlen (bufstr
); i
< transferred
; i
++)
3535 warning (_("target file %s "
3536 "contained unexpected null characters"),
3546 default_region_ok_for_hw_watchpoint (struct target_ops
*self
,
3547 CORE_ADDR addr
, int len
)
3549 return (len
<= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT
);
3553 default_watchpoint_addr_within_range (struct target_ops
*target
,
3555 CORE_ADDR start
, int length
)
3557 return addr
>= start
&& addr
< start
+ length
;
3560 static struct gdbarch
*
3561 default_thread_architecture (struct target_ops
*ops
, ptid_t ptid
)
3563 return target_gdbarch ();
3573 return_minus_one (void)
3585 * Find the next target down the stack from the specified target.
3589 find_target_beneath (struct target_ops
*t
)
3597 find_target_at (enum strata stratum
)
3599 struct target_ops
*t
;
3601 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3602 if (t
->to_stratum
== stratum
)
3609 /* The inferior process has died. Long live the inferior! */
3612 generic_mourn_inferior (void)
3616 ptid
= inferior_ptid
;
3617 inferior_ptid
= null_ptid
;
3619 /* Mark breakpoints uninserted in case something tries to delete a
3620 breakpoint while we delete the inferior's threads (which would
3621 fail, since the inferior is long gone). */
3622 mark_breakpoints_out ();
3624 if (!ptid_equal (ptid
, null_ptid
))
3626 int pid
= ptid_get_pid (ptid
);
3627 exit_inferior (pid
);
3630 /* Note this wipes step-resume breakpoints, so needs to be done
3631 after exit_inferior, which ends up referencing the step-resume
3632 breakpoints through clear_thread_inferior_resources. */
3633 breakpoint_init_inferior (inf_exited
);
3635 registers_changed ();
3637 reopen_exec_file ();
3638 reinit_frame_cache ();
3640 if (deprecated_detach_hook
)
3641 deprecated_detach_hook ();
3644 /* Convert a normal process ID to a string. Returns the string in a
3648 normal_pid_to_str (ptid_t ptid
)
3650 static char buf
[32];
3652 xsnprintf (buf
, sizeof buf
, "process %d", ptid_get_pid (ptid
));
3657 dummy_pid_to_str (struct target_ops
*ops
, ptid_t ptid
)
3659 return normal_pid_to_str (ptid
);
3662 /* Error-catcher for target_find_memory_regions. */
3664 dummy_find_memory_regions (struct target_ops
*self
,
3665 find_memory_region_ftype ignore1
, void *ignore2
)
3667 error (_("Command not implemented for this target."));
3671 /* Error-catcher for target_make_corefile_notes. */
3673 dummy_make_corefile_notes (struct target_ops
*self
,
3674 bfd
*ignore1
, int *ignore2
)
3676 error (_("Command not implemented for this target."));
3680 /* Error-catcher for target_get_bookmark. */
3682 dummy_get_bookmark (struct target_ops
*self
, char *ignore1
, int ignore2
)
3688 /* Error-catcher for target_goto_bookmark. */
3690 dummy_goto_bookmark (struct target_ops
*self
, gdb_byte
*ignore
, int from_tty
)
3695 /* Set up the handful of non-empty slots needed by the dummy target
3699 init_dummy_target (void)
3701 dummy_target
.to_shortname
= "None";
3702 dummy_target
.to_longname
= "None";
3703 dummy_target
.to_doc
= "";
3704 dummy_target
.to_create_inferior
= find_default_create_inferior
;
3705 dummy_target
.to_supports_non_stop
= find_default_supports_non_stop
;
3706 dummy_target
.to_supports_disable_randomization
3707 = find_default_supports_disable_randomization
;
3708 dummy_target
.to_pid_to_str
= dummy_pid_to_str
;
3709 dummy_target
.to_stratum
= dummy_stratum
;
3710 dummy_target
.to_make_corefile_notes
= dummy_make_corefile_notes
;
3711 dummy_target
.to_get_bookmark
= dummy_get_bookmark
;
3712 dummy_target
.to_goto_bookmark
= dummy_goto_bookmark
;
3713 dummy_target
.to_has_all_memory
= (int (*) (struct target_ops
*)) return_zero
;
3714 dummy_target
.to_has_memory
= (int (*) (struct target_ops
*)) return_zero
;
3715 dummy_target
.to_has_stack
= (int (*) (struct target_ops
*)) return_zero
;
3716 dummy_target
.to_has_registers
= (int (*) (struct target_ops
*)) return_zero
;
3717 dummy_target
.to_has_execution
3718 = (int (*) (struct target_ops
*, ptid_t
)) return_zero
;
3719 dummy_target
.to_magic
= OPS_MAGIC
;
3721 install_dummy_methods (&dummy_target
);
3725 debug_to_open (char *args
, int from_tty
)
3727 debug_target
.to_open (args
, from_tty
);
3729 fprintf_unfiltered (gdb_stdlog
, "target_open (%s, %d)\n", args
, from_tty
);
3733 target_close (struct target_ops
*targ
)
3735 gdb_assert (!target_is_pushed (targ
));
3737 if (targ
->to_xclose
!= NULL
)
3738 targ
->to_xclose (targ
);
3739 else if (targ
->to_close
!= NULL
)
3740 targ
->to_close (targ
);
3743 fprintf_unfiltered (gdb_stdlog
, "target_close ()\n");
3747 target_attach (char *args
, int from_tty
)
3749 current_target
.to_attach (¤t_target
, args
, from_tty
);
3751 fprintf_unfiltered (gdb_stdlog
, "target_attach (%s, %d)\n",
3756 target_thread_alive (ptid_t ptid
)
3758 struct target_ops
*t
;
3760 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3762 if (t
->to_thread_alive
!= NULL
)
3766 retval
= t
->to_thread_alive (t
, ptid
);
3768 fprintf_unfiltered (gdb_stdlog
, "target_thread_alive (%d) = %d\n",
3769 ptid_get_pid (ptid
), retval
);
3779 target_find_new_threads (void)
3781 struct target_ops
*t
;
3783 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3785 if (t
->to_find_new_threads
!= NULL
)
3787 t
->to_find_new_threads (t
);
3789 fprintf_unfiltered (gdb_stdlog
, "target_find_new_threads ()\n");
3797 target_stop (ptid_t ptid
)
3801 warning (_("May not interrupt or stop the target, ignoring attempt"));
3805 (*current_target
.to_stop
) (¤t_target
, ptid
);
3809 debug_to_post_attach (struct target_ops
*self
, int pid
)
3811 debug_target
.to_post_attach (&debug_target
, pid
);
3813 fprintf_unfiltered (gdb_stdlog
, "target_post_attach (%d)\n", pid
);
3816 /* Concatenate ELEM to LIST, a comma separate list, and return the
3817 result. The LIST incoming argument is released. */
3820 str_comma_list_concat_elem (char *list
, const char *elem
)
3823 return xstrdup (elem
);
3825 return reconcat (list
, list
, ", ", elem
, (char *) NULL
);
3828 /* Helper for target_options_to_string. If OPT is present in
3829 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3830 Returns the new resulting string. OPT is removed from
3834 do_option (int *target_options
, char *ret
,
3835 int opt
, char *opt_str
)
3837 if ((*target_options
& opt
) != 0)
3839 ret
= str_comma_list_concat_elem (ret
, opt_str
);
3840 *target_options
&= ~opt
;
3847 target_options_to_string (int target_options
)
3851 #define DO_TARG_OPTION(OPT) \
3852 ret = do_option (&target_options, ret, OPT, #OPT)
3854 DO_TARG_OPTION (TARGET_WNOHANG
);
3856 if (target_options
!= 0)
3857 ret
= str_comma_list_concat_elem (ret
, "unknown???");
3865 debug_print_register (const char * func
,
3866 struct regcache
*regcache
, int regno
)
3868 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
3870 fprintf_unfiltered (gdb_stdlog
, "%s ", func
);
3871 if (regno
>= 0 && regno
< gdbarch_num_regs (gdbarch
)
3872 && gdbarch_register_name (gdbarch
, regno
) != NULL
3873 && gdbarch_register_name (gdbarch
, regno
)[0] != '\0')
3874 fprintf_unfiltered (gdb_stdlog
, "(%s)",
3875 gdbarch_register_name (gdbarch
, regno
));
3877 fprintf_unfiltered (gdb_stdlog
, "(%d)", regno
);
3878 if (regno
>= 0 && regno
< gdbarch_num_regs (gdbarch
))
3880 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
3881 int i
, size
= register_size (gdbarch
, regno
);
3882 gdb_byte buf
[MAX_REGISTER_SIZE
];
3884 regcache_raw_collect (regcache
, regno
, buf
);
3885 fprintf_unfiltered (gdb_stdlog
, " = ");
3886 for (i
= 0; i
< size
; i
++)
3888 fprintf_unfiltered (gdb_stdlog
, "%02x", buf
[i
]);
3890 if (size
<= sizeof (LONGEST
))
3892 ULONGEST val
= extract_unsigned_integer (buf
, size
, byte_order
);
3894 fprintf_unfiltered (gdb_stdlog
, " %s %s",
3895 core_addr_to_string_nz (val
), plongest (val
));
3898 fprintf_unfiltered (gdb_stdlog
, "\n");
3902 target_fetch_registers (struct regcache
*regcache
, int regno
)
3904 struct target_ops
*t
;
3906 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3908 if (t
->to_fetch_registers
!= NULL
)
3910 t
->to_fetch_registers (t
, regcache
, regno
);
3912 debug_print_register ("target_fetch_registers", regcache
, regno
);
3919 target_store_registers (struct regcache
*regcache
, int regno
)
3921 struct target_ops
*t
;
3923 if (!may_write_registers
)
3924 error (_("Writing to registers is not allowed (regno %d)"), regno
);
3926 current_target
.to_store_registers (¤t_target
, regcache
, regno
);
3929 debug_print_register ("target_store_registers", regcache
, regno
);
3934 target_core_of_thread (ptid_t ptid
)
3936 struct target_ops
*t
;
3938 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3940 if (t
->to_core_of_thread
!= NULL
)
3942 int retval
= t
->to_core_of_thread (t
, ptid
);
3945 fprintf_unfiltered (gdb_stdlog
,
3946 "target_core_of_thread (%d) = %d\n",
3947 ptid_get_pid (ptid
), retval
);
3956 target_verify_memory (const gdb_byte
*data
, CORE_ADDR memaddr
, ULONGEST size
)
3958 struct target_ops
*t
;
3960 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3962 if (t
->to_verify_memory
!= NULL
)
3964 int retval
= t
->to_verify_memory (t
, data
, memaddr
, size
);
3967 fprintf_unfiltered (gdb_stdlog
,
3968 "target_verify_memory (%s, %s) = %d\n",
3969 paddress (target_gdbarch (), memaddr
),
3979 /* The documentation for this function is in its prototype declaration in
3983 target_insert_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
, int rw
)
3985 struct target_ops
*t
;
3987 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3988 if (t
->to_insert_mask_watchpoint
!= NULL
)
3992 ret
= t
->to_insert_mask_watchpoint (t
, addr
, mask
, rw
);
3995 fprintf_unfiltered (gdb_stdlog
, "\
3996 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3997 core_addr_to_string (addr
),
3998 core_addr_to_string (mask
), rw
, ret
);
4006 /* The documentation for this function is in its prototype declaration in
4010 target_remove_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
, int rw
)
4012 struct target_ops
*t
;
4014 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4015 if (t
->to_remove_mask_watchpoint
!= NULL
)
4019 ret
= t
->to_remove_mask_watchpoint (t
, addr
, mask
, rw
);
4022 fprintf_unfiltered (gdb_stdlog
, "\
4023 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4024 core_addr_to_string (addr
),
4025 core_addr_to_string (mask
), rw
, ret
);
4033 /* The documentation for this function is in its prototype declaration
4037 target_masked_watch_num_registers (CORE_ADDR addr
, CORE_ADDR mask
)
4039 struct target_ops
*t
;
4041 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4042 if (t
->to_masked_watch_num_registers
!= NULL
)
4043 return t
->to_masked_watch_num_registers (t
, addr
, mask
);
4048 /* The documentation for this function is in its prototype declaration
4052 target_ranged_break_num_registers (void)
4054 struct target_ops
*t
;
4056 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4057 if (t
->to_ranged_break_num_registers
!= NULL
)
4058 return t
->to_ranged_break_num_registers (t
);
4065 struct btrace_target_info
*
4066 target_enable_btrace (ptid_t ptid
)
4068 struct target_ops
*t
;
4070 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4071 if (t
->to_enable_btrace
!= NULL
)
4072 return t
->to_enable_btrace (t
, ptid
);
4081 target_disable_btrace (struct btrace_target_info
*btinfo
)
4083 struct target_ops
*t
;
4085 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4086 if (t
->to_disable_btrace
!= NULL
)
4088 t
->to_disable_btrace (t
, btinfo
);
4098 target_teardown_btrace (struct btrace_target_info
*btinfo
)
4100 struct target_ops
*t
;
4102 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4103 if (t
->to_teardown_btrace
!= NULL
)
4105 t
->to_teardown_btrace (t
, btinfo
);
4115 target_read_btrace (VEC (btrace_block_s
) **btrace
,
4116 struct btrace_target_info
*btinfo
,
4117 enum btrace_read_type type
)
4119 struct target_ops
*t
;
4121 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4122 if (t
->to_read_btrace
!= NULL
)
4123 return t
->to_read_btrace (t
, btrace
, btinfo
, type
);
4126 return BTRACE_ERR_NOT_SUPPORTED
;
4132 target_stop_recording (void)
4134 struct target_ops
*t
;
4136 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4137 if (t
->to_stop_recording
!= NULL
)
4139 t
->to_stop_recording (t
);
4143 /* This is optional. */
4149 target_info_record (void)
4151 struct target_ops
*t
;
4153 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4154 if (t
->to_info_record
!= NULL
)
4156 t
->to_info_record (t
);
4166 target_save_record (const char *filename
)
4168 struct target_ops
*t
;
4170 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4171 if (t
->to_save_record
!= NULL
)
4173 t
->to_save_record (t
, filename
);
4183 target_supports_delete_record (void)
4185 struct target_ops
*t
;
4187 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4188 if (t
->to_delete_record
!= NULL
)
4197 target_delete_record (void)
4199 struct target_ops
*t
;
4201 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4202 if (t
->to_delete_record
!= NULL
)
4204 t
->to_delete_record (t
);
4214 target_record_is_replaying (void)
4216 struct target_ops
*t
;
4218 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4219 if (t
->to_record_is_replaying
!= NULL
)
4220 return t
->to_record_is_replaying (t
);
4228 target_goto_record_begin (void)
4230 struct target_ops
*t
;
4232 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4233 if (t
->to_goto_record_begin
!= NULL
)
4235 t
->to_goto_record_begin (t
);
4245 target_goto_record_end (void)
4247 struct target_ops
*t
;
4249 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4250 if (t
->to_goto_record_end
!= NULL
)
4252 t
->to_goto_record_end (t
);
4262 target_goto_record (ULONGEST insn
)
4264 struct target_ops
*t
;
4266 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4267 if (t
->to_goto_record
!= NULL
)
4269 t
->to_goto_record (t
, insn
);
4279 target_insn_history (int size
, int flags
)
4281 struct target_ops
*t
;
4283 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4284 if (t
->to_insn_history
!= NULL
)
4286 t
->to_insn_history (t
, size
, flags
);
4296 target_insn_history_from (ULONGEST from
, int size
, int flags
)
4298 struct target_ops
*t
;
4300 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4301 if (t
->to_insn_history_from
!= NULL
)
4303 t
->to_insn_history_from (t
, from
, size
, flags
);
4313 target_insn_history_range (ULONGEST begin
, ULONGEST end
, int flags
)
4315 struct target_ops
*t
;
4317 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4318 if (t
->to_insn_history_range
!= NULL
)
4320 t
->to_insn_history_range (t
, begin
, end
, flags
);
4330 target_call_history (int size
, int flags
)
4332 struct target_ops
*t
;
4334 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4335 if (t
->to_call_history
!= NULL
)
4337 t
->to_call_history (t
, size
, flags
);
4347 target_call_history_from (ULONGEST begin
, int size
, int flags
)
4349 struct target_ops
*t
;
4351 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4352 if (t
->to_call_history_from
!= NULL
)
4354 t
->to_call_history_from (t
, begin
, size
, flags
);
4364 target_call_history_range (ULONGEST begin
, ULONGEST end
, int flags
)
4366 struct target_ops
*t
;
4368 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4369 if (t
->to_call_history_range
!= NULL
)
4371 t
->to_call_history_range (t
, begin
, end
, flags
);
4379 debug_to_prepare_to_store (struct target_ops
*self
, struct regcache
*regcache
)
4381 debug_target
.to_prepare_to_store (&debug_target
, regcache
);
4383 fprintf_unfiltered (gdb_stdlog
, "target_prepare_to_store ()\n");
4388 const struct frame_unwind
*
4389 target_get_unwinder (void)
4391 struct target_ops
*t
;
4393 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4394 if (t
->to_get_unwinder
!= NULL
)
4395 return t
->to_get_unwinder
;
4402 const struct frame_unwind
*
4403 target_get_tailcall_unwinder (void)
4405 struct target_ops
*t
;
4407 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4408 if (t
->to_get_tailcall_unwinder
!= NULL
)
4409 return t
->to_get_tailcall_unwinder
;
4417 forward_target_decr_pc_after_break (struct target_ops
*ops
,
4418 struct gdbarch
*gdbarch
)
4420 for (; ops
!= NULL
; ops
= ops
->beneath
)
4421 if (ops
->to_decr_pc_after_break
!= NULL
)
4422 return ops
->to_decr_pc_after_break (ops
, gdbarch
);
4424 return gdbarch_decr_pc_after_break (gdbarch
);
4430 target_decr_pc_after_break (struct gdbarch
*gdbarch
)
4432 return forward_target_decr_pc_after_break (current_target
.beneath
, gdbarch
);
4436 deprecated_debug_xfer_memory (CORE_ADDR memaddr
, bfd_byte
*myaddr
, int len
,
4437 int write
, struct mem_attrib
*attrib
,
4438 struct target_ops
*target
)
4442 retval
= debug_target
.deprecated_xfer_memory (memaddr
, myaddr
, len
, write
,
4445 fprintf_unfiltered (gdb_stdlog
,
4446 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4447 paddress (target_gdbarch (), memaddr
), len
,
4448 write
? "write" : "read", retval
);
4454 fputs_unfiltered (", bytes =", gdb_stdlog
);
4455 for (i
= 0; i
< retval
; i
++)
4457 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
4459 if (targetdebug
< 2 && i
> 0)
4461 fprintf_unfiltered (gdb_stdlog
, " ...");
4464 fprintf_unfiltered (gdb_stdlog
, "\n");
4467 fprintf_unfiltered (gdb_stdlog
, " %02x", myaddr
[i
] & 0xff);
4471 fputc_unfiltered ('\n', gdb_stdlog
);
4477 debug_to_files_info (struct target_ops
*target
)
4479 debug_target
.to_files_info (target
);
4481 fprintf_unfiltered (gdb_stdlog
, "target_files_info (xxx)\n");
4485 debug_to_insert_breakpoint (struct target_ops
*ops
, struct gdbarch
*gdbarch
,
4486 struct bp_target_info
*bp_tgt
)
4490 retval
= debug_target
.to_insert_breakpoint (&debug_target
, gdbarch
, bp_tgt
);
4492 fprintf_unfiltered (gdb_stdlog
,
4493 "target_insert_breakpoint (%s, xxx) = %ld\n",
4494 core_addr_to_string (bp_tgt
->placed_address
),
4495 (unsigned long) retval
);
4500 debug_to_remove_breakpoint (struct target_ops
*ops
, struct gdbarch
*gdbarch
,
4501 struct bp_target_info
*bp_tgt
)
4505 retval
= debug_target
.to_remove_breakpoint (&debug_target
, gdbarch
, bp_tgt
);
4507 fprintf_unfiltered (gdb_stdlog
,
4508 "target_remove_breakpoint (%s, xxx) = %ld\n",
4509 core_addr_to_string (bp_tgt
->placed_address
),
4510 (unsigned long) retval
);
4515 debug_to_can_use_hw_breakpoint (struct target_ops
*self
,
4516 int type
, int cnt
, int from_tty
)
4520 retval
= debug_target
.to_can_use_hw_breakpoint (&debug_target
,
4521 type
, cnt
, from_tty
);
4523 fprintf_unfiltered (gdb_stdlog
,
4524 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4525 (unsigned long) type
,
4526 (unsigned long) cnt
,
4527 (unsigned long) from_tty
,
4528 (unsigned long) retval
);
4533 debug_to_region_ok_for_hw_watchpoint (struct target_ops
*self
,
4534 CORE_ADDR addr
, int len
)
4538 retval
= debug_target
.to_region_ok_for_hw_watchpoint (&debug_target
,
4541 fprintf_unfiltered (gdb_stdlog
,
4542 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4543 core_addr_to_string (addr
), (unsigned long) len
,
4544 core_addr_to_string (retval
));
4549 debug_to_can_accel_watchpoint_condition (struct target_ops
*self
,
4550 CORE_ADDR addr
, int len
, int rw
,
4551 struct expression
*cond
)
4555 retval
= debug_target
.to_can_accel_watchpoint_condition (&debug_target
,
4559 fprintf_unfiltered (gdb_stdlog
,
4560 "target_can_accel_watchpoint_condition "
4561 "(%s, %d, %d, %s) = %ld\n",
4562 core_addr_to_string (addr
), len
, rw
,
4563 host_address_to_string (cond
), (unsigned long) retval
);
4568 debug_to_stopped_by_watchpoint (struct target_ops
*ops
)
4572 retval
= debug_target
.to_stopped_by_watchpoint (&debug_target
);
4574 fprintf_unfiltered (gdb_stdlog
,
4575 "target_stopped_by_watchpoint () = %ld\n",
4576 (unsigned long) retval
);
4581 debug_to_stopped_data_address (struct target_ops
*target
, CORE_ADDR
*addr
)
4585 retval
= debug_target
.to_stopped_data_address (target
, addr
);
4587 fprintf_unfiltered (gdb_stdlog
,
4588 "target_stopped_data_address ([%s]) = %ld\n",
4589 core_addr_to_string (*addr
),
4590 (unsigned long)retval
);
4595 debug_to_watchpoint_addr_within_range (struct target_ops
*target
,
4597 CORE_ADDR start
, int length
)
4601 retval
= debug_target
.to_watchpoint_addr_within_range (target
, addr
,
4604 fprintf_filtered (gdb_stdlog
,
4605 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4606 core_addr_to_string (addr
), core_addr_to_string (start
),
4612 debug_to_insert_hw_breakpoint (struct target_ops
*self
,
4613 struct gdbarch
*gdbarch
,
4614 struct bp_target_info
*bp_tgt
)
4618 retval
= debug_target
.to_insert_hw_breakpoint (&debug_target
,
4621 fprintf_unfiltered (gdb_stdlog
,
4622 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4623 core_addr_to_string (bp_tgt
->placed_address
),
4624 (unsigned long) retval
);
4629 debug_to_remove_hw_breakpoint (struct target_ops
*self
,
4630 struct gdbarch
*gdbarch
,
4631 struct bp_target_info
*bp_tgt
)
4635 retval
= debug_target
.to_remove_hw_breakpoint (&debug_target
,
4638 fprintf_unfiltered (gdb_stdlog
,
4639 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4640 core_addr_to_string (bp_tgt
->placed_address
),
4641 (unsigned long) retval
);
4646 debug_to_insert_watchpoint (struct target_ops
*self
,
4647 CORE_ADDR addr
, int len
, int type
,
4648 struct expression
*cond
)
4652 retval
= debug_target
.to_insert_watchpoint (&debug_target
,
4653 addr
, len
, type
, cond
);
4655 fprintf_unfiltered (gdb_stdlog
,
4656 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4657 core_addr_to_string (addr
), len
, type
,
4658 host_address_to_string (cond
), (unsigned long) retval
);
4663 debug_to_remove_watchpoint (struct target_ops
*self
,
4664 CORE_ADDR addr
, int len
, int type
,
4665 struct expression
*cond
)
4669 retval
= debug_target
.to_remove_watchpoint (&debug_target
,
4670 addr
, len
, type
, cond
);
4672 fprintf_unfiltered (gdb_stdlog
,
4673 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4674 core_addr_to_string (addr
), len
, type
,
4675 host_address_to_string (cond
), (unsigned long) retval
);
4680 debug_to_terminal_init (struct target_ops
*self
)
4682 debug_target
.to_terminal_init (&debug_target
);
4684 fprintf_unfiltered (gdb_stdlog
, "target_terminal_init ()\n");
4688 debug_to_terminal_inferior (struct target_ops
*self
)
4690 debug_target
.to_terminal_inferior (&debug_target
);
4692 fprintf_unfiltered (gdb_stdlog
, "target_terminal_inferior ()\n");
4696 debug_to_terminal_ours_for_output (struct target_ops
*self
)
4698 debug_target
.to_terminal_ours_for_output (&debug_target
);
4700 fprintf_unfiltered (gdb_stdlog
, "target_terminal_ours_for_output ()\n");
4704 debug_to_terminal_ours (struct target_ops
*self
)
4706 debug_target
.to_terminal_ours (&debug_target
);
4708 fprintf_unfiltered (gdb_stdlog
, "target_terminal_ours ()\n");
4712 debug_to_terminal_save_ours (struct target_ops
*self
)
4714 debug_target
.to_terminal_save_ours (&debug_target
);
4716 fprintf_unfiltered (gdb_stdlog
, "target_terminal_save_ours ()\n");
4720 debug_to_terminal_info (struct target_ops
*self
,
4721 const char *arg
, int from_tty
)
4723 debug_target
.to_terminal_info (&debug_target
, arg
, from_tty
);
4725 fprintf_unfiltered (gdb_stdlog
, "target_terminal_info (%s, %d)\n", arg
,
4730 debug_to_load (struct target_ops
*self
, char *args
, int from_tty
)
4732 debug_target
.to_load (&debug_target
, args
, from_tty
);
4734 fprintf_unfiltered (gdb_stdlog
, "target_load (%s, %d)\n", args
, from_tty
);
4738 debug_to_post_startup_inferior (struct target_ops
*self
, ptid_t ptid
)
4740 debug_target
.to_post_startup_inferior (&debug_target
, ptid
);
4742 fprintf_unfiltered (gdb_stdlog
, "target_post_startup_inferior (%d)\n",
4743 ptid_get_pid (ptid
));
4747 debug_to_insert_fork_catchpoint (struct target_ops
*self
, int pid
)
4751 retval
= debug_target
.to_insert_fork_catchpoint (&debug_target
, pid
);
4753 fprintf_unfiltered (gdb_stdlog
, "target_insert_fork_catchpoint (%d) = %d\n",
4760 debug_to_remove_fork_catchpoint (struct target_ops
*self
, int pid
)
4764 retval
= debug_target
.to_remove_fork_catchpoint (&debug_target
, pid
);
4766 fprintf_unfiltered (gdb_stdlog
, "target_remove_fork_catchpoint (%d) = %d\n",
4773 debug_to_insert_vfork_catchpoint (struct target_ops
*self
, int pid
)
4777 retval
= debug_target
.to_insert_vfork_catchpoint (&debug_target
, pid
);
4779 fprintf_unfiltered (gdb_stdlog
, "target_insert_vfork_catchpoint (%d) = %d\n",
4786 debug_to_remove_vfork_catchpoint (struct target_ops
*self
, int pid
)
4790 retval
= debug_target
.to_remove_vfork_catchpoint (&debug_target
, pid
);
4792 fprintf_unfiltered (gdb_stdlog
, "target_remove_vfork_catchpoint (%d) = %d\n",
4799 debug_to_insert_exec_catchpoint (struct target_ops
*self
, int pid
)
4803 retval
= debug_target
.to_insert_exec_catchpoint (&debug_target
, pid
);
4805 fprintf_unfiltered (gdb_stdlog
, "target_insert_exec_catchpoint (%d) = %d\n",
4812 debug_to_remove_exec_catchpoint (struct target_ops
*self
, int pid
)
4816 retval
= debug_target
.to_remove_exec_catchpoint (&debug_target
, pid
);
4818 fprintf_unfiltered (gdb_stdlog
, "target_remove_exec_catchpoint (%d) = %d\n",
4825 debug_to_has_exited (struct target_ops
*self
,
4826 int pid
, int wait_status
, int *exit_status
)
4830 has_exited
= debug_target
.to_has_exited (&debug_target
,
4831 pid
, wait_status
, exit_status
);
4833 fprintf_unfiltered (gdb_stdlog
, "target_has_exited (%d, %d, %d) = %d\n",
4834 pid
, wait_status
, *exit_status
, has_exited
);
4840 debug_to_can_run (struct target_ops
*self
)
4844 retval
= debug_target
.to_can_run (&debug_target
);
4846 fprintf_unfiltered (gdb_stdlog
, "target_can_run () = %d\n", retval
);
4851 static struct gdbarch
*
4852 debug_to_thread_architecture (struct target_ops
*ops
, ptid_t ptid
)
4854 struct gdbarch
*retval
;
4856 retval
= debug_target
.to_thread_architecture (ops
, ptid
);
4858 fprintf_unfiltered (gdb_stdlog
,
4859 "target_thread_architecture (%s) = %s [%s]\n",
4860 target_pid_to_str (ptid
),
4861 host_address_to_string (retval
),
4862 gdbarch_bfd_arch_info (retval
)->printable_name
);
4867 debug_to_stop (struct target_ops
*self
, ptid_t ptid
)
4869 debug_target
.to_stop (&debug_target
, ptid
);
4871 fprintf_unfiltered (gdb_stdlog
, "target_stop (%s)\n",
4872 target_pid_to_str (ptid
));
4876 debug_to_rcmd (struct target_ops
*self
, char *command
,
4877 struct ui_file
*outbuf
)
4879 debug_target
.to_rcmd (&debug_target
, command
, outbuf
);
4880 fprintf_unfiltered (gdb_stdlog
, "target_rcmd (%s, ...)\n", command
);
4884 debug_to_pid_to_exec_file (struct target_ops
*self
, int pid
)
4888 exec_file
= debug_target
.to_pid_to_exec_file (&debug_target
, pid
);
4890 fprintf_unfiltered (gdb_stdlog
, "target_pid_to_exec_file (%d) = %s\n",
4897 setup_target_debug (void)
4899 memcpy (&debug_target
, ¤t_target
, sizeof debug_target
);
4901 current_target
.to_open
= debug_to_open
;
4902 current_target
.to_post_attach
= debug_to_post_attach
;
4903 current_target
.to_prepare_to_store
= debug_to_prepare_to_store
;
4904 current_target
.deprecated_xfer_memory
= deprecated_debug_xfer_memory
;
4905 current_target
.to_files_info
= debug_to_files_info
;
4906 current_target
.to_insert_breakpoint
= debug_to_insert_breakpoint
;
4907 current_target
.to_remove_breakpoint
= debug_to_remove_breakpoint
;
4908 current_target
.to_can_use_hw_breakpoint
= debug_to_can_use_hw_breakpoint
;
4909 current_target
.to_insert_hw_breakpoint
= debug_to_insert_hw_breakpoint
;
4910 current_target
.to_remove_hw_breakpoint
= debug_to_remove_hw_breakpoint
;
4911 current_target
.to_insert_watchpoint
= debug_to_insert_watchpoint
;
4912 current_target
.to_remove_watchpoint
= debug_to_remove_watchpoint
;
4913 current_target
.to_stopped_by_watchpoint
= debug_to_stopped_by_watchpoint
;
4914 current_target
.to_stopped_data_address
= debug_to_stopped_data_address
;
4915 current_target
.to_watchpoint_addr_within_range
4916 = debug_to_watchpoint_addr_within_range
;
4917 current_target
.to_region_ok_for_hw_watchpoint
4918 = debug_to_region_ok_for_hw_watchpoint
;
4919 current_target
.to_can_accel_watchpoint_condition
4920 = debug_to_can_accel_watchpoint_condition
;
4921 current_target
.to_terminal_init
= debug_to_terminal_init
;
4922 current_target
.to_terminal_inferior
= debug_to_terminal_inferior
;
4923 current_target
.to_terminal_ours_for_output
4924 = debug_to_terminal_ours_for_output
;
4925 current_target
.to_terminal_ours
= debug_to_terminal_ours
;
4926 current_target
.to_terminal_save_ours
= debug_to_terminal_save_ours
;
4927 current_target
.to_terminal_info
= debug_to_terminal_info
;
4928 current_target
.to_load
= debug_to_load
;
4929 current_target
.to_post_startup_inferior
= debug_to_post_startup_inferior
;
4930 current_target
.to_insert_fork_catchpoint
= debug_to_insert_fork_catchpoint
;
4931 current_target
.to_remove_fork_catchpoint
= debug_to_remove_fork_catchpoint
;
4932 current_target
.to_insert_vfork_catchpoint
= debug_to_insert_vfork_catchpoint
;
4933 current_target
.to_remove_vfork_catchpoint
= debug_to_remove_vfork_catchpoint
;
4934 current_target
.to_insert_exec_catchpoint
= debug_to_insert_exec_catchpoint
;
4935 current_target
.to_remove_exec_catchpoint
= debug_to_remove_exec_catchpoint
;
4936 current_target
.to_has_exited
= debug_to_has_exited
;
4937 current_target
.to_can_run
= debug_to_can_run
;
4938 current_target
.to_stop
= debug_to_stop
;
4939 current_target
.to_rcmd
= debug_to_rcmd
;
4940 current_target
.to_pid_to_exec_file
= debug_to_pid_to_exec_file
;
4941 current_target
.to_thread_architecture
= debug_to_thread_architecture
;
4945 static char targ_desc
[] =
4946 "Names of targets and files being debugged.\nShows the entire \
4947 stack of targets currently in use (including the exec-file,\n\
4948 core-file, and process, if any), as well as the symbol file name.";
4951 default_rcmd (struct target_ops
*self
, char *command
, struct ui_file
*output
)
4953 error (_("\"monitor\" command not supported by this target."));
4957 do_monitor_command (char *cmd
,
4960 target_rcmd (cmd
, gdb_stdtarg
);
4963 /* Print the name of each layers of our target stack. */
4966 maintenance_print_target_stack (char *cmd
, int from_tty
)
4968 struct target_ops
*t
;
4970 printf_filtered (_("The current target stack is:\n"));
4972 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
4974 printf_filtered (" - %s (%s)\n", t
->to_shortname
, t
->to_longname
);
4978 /* Controls if async mode is permitted. */
4979 int target_async_permitted
= 0;
4981 /* The set command writes to this variable. If the inferior is
4982 executing, target_async_permitted is *not* updated. */
4983 static int target_async_permitted_1
= 0;
4986 set_target_async_command (char *args
, int from_tty
,
4987 struct cmd_list_element
*c
)
4989 if (have_live_inferiors ())
4991 target_async_permitted_1
= target_async_permitted
;
4992 error (_("Cannot change this setting while the inferior is running."));
4995 target_async_permitted
= target_async_permitted_1
;
4999 show_target_async_command (struct ui_file
*file
, int from_tty
,
5000 struct cmd_list_element
*c
,
5003 fprintf_filtered (file
,
5004 _("Controlling the inferior in "
5005 "asynchronous mode is %s.\n"), value
);
5008 /* Temporary copies of permission settings. */
5010 static int may_write_registers_1
= 1;
5011 static int may_write_memory_1
= 1;
5012 static int may_insert_breakpoints_1
= 1;
5013 static int may_insert_tracepoints_1
= 1;
5014 static int may_insert_fast_tracepoints_1
= 1;
5015 static int may_stop_1
= 1;
5017 /* Make the user-set values match the real values again. */
5020 update_target_permissions (void)
5022 may_write_registers_1
= may_write_registers
;
5023 may_write_memory_1
= may_write_memory
;
5024 may_insert_breakpoints_1
= may_insert_breakpoints
;
5025 may_insert_tracepoints_1
= may_insert_tracepoints
;
5026 may_insert_fast_tracepoints_1
= may_insert_fast_tracepoints
;
5027 may_stop_1
= may_stop
;
5030 /* The one function handles (most of) the permission flags in the same
5034 set_target_permissions (char *args
, int from_tty
,
5035 struct cmd_list_element
*c
)
5037 if (target_has_execution
)
5039 update_target_permissions ();
5040 error (_("Cannot change this setting while the inferior is running."));
5043 /* Make the real values match the user-changed values. */
5044 may_write_registers
= may_write_registers_1
;
5045 may_insert_breakpoints
= may_insert_breakpoints_1
;
5046 may_insert_tracepoints
= may_insert_tracepoints_1
;
5047 may_insert_fast_tracepoints
= may_insert_fast_tracepoints_1
;
5048 may_stop
= may_stop_1
;
5049 update_observer_mode ();
5052 /* Set memory write permission independently of observer mode. */
5055 set_write_memory_permission (char *args
, int from_tty
,
5056 struct cmd_list_element
*c
)
5058 /* Make the real values match the user-changed values. */
5059 may_write_memory
= may_write_memory_1
;
5060 update_observer_mode ();
5065 initialize_targets (void)
5067 init_dummy_target ();
5068 push_target (&dummy_target
);
5070 add_info ("target", target_info
, targ_desc
);
5071 add_info ("files", target_info
, targ_desc
);
5073 add_setshow_zuinteger_cmd ("target", class_maintenance
, &targetdebug
, _("\
5074 Set target debugging."), _("\
5075 Show target debugging."), _("\
5076 When non-zero, target debugging is enabled. Higher numbers are more\n\
5077 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5081 &setdebuglist
, &showdebuglist
);
5083 add_setshow_boolean_cmd ("trust-readonly-sections", class_support
,
5084 &trust_readonly
, _("\
5085 Set mode for reading from readonly sections."), _("\
5086 Show mode for reading from readonly sections."), _("\
5087 When this mode is on, memory reads from readonly sections (such as .text)\n\
5088 will be read from the object file instead of from the target. This will\n\
5089 result in significant performance improvement for remote targets."),
5091 show_trust_readonly
,
5092 &setlist
, &showlist
);
5094 add_com ("monitor", class_obscure
, do_monitor_command
,
5095 _("Send a command to the remote monitor (remote targets only)."));
5097 add_cmd ("target-stack", class_maintenance
, maintenance_print_target_stack
,
5098 _("Print the name of each layer of the internal target stack."),
5099 &maintenanceprintlist
);
5101 add_setshow_boolean_cmd ("target-async", no_class
,
5102 &target_async_permitted_1
, _("\
5103 Set whether gdb controls the inferior in asynchronous mode."), _("\
5104 Show whether gdb controls the inferior in asynchronous mode."), _("\
5105 Tells gdb whether to control the inferior in asynchronous mode."),
5106 set_target_async_command
,
5107 show_target_async_command
,
5111 add_setshow_boolean_cmd ("may-write-registers", class_support
,
5112 &may_write_registers_1
, _("\
5113 Set permission to write into registers."), _("\
5114 Show permission to write into registers."), _("\
5115 When this permission is on, GDB may write into the target's registers.\n\
5116 Otherwise, any sort of write attempt will result in an error."),
5117 set_target_permissions
, NULL
,
5118 &setlist
, &showlist
);
5120 add_setshow_boolean_cmd ("may-write-memory", class_support
,
5121 &may_write_memory_1
, _("\
5122 Set permission to write into target memory."), _("\
5123 Show permission to write into target memory."), _("\
5124 When this permission is on, GDB may write into the target's memory.\n\
5125 Otherwise, any sort of write attempt will result in an error."),
5126 set_write_memory_permission
, NULL
,
5127 &setlist
, &showlist
);
5129 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support
,
5130 &may_insert_breakpoints_1
, _("\
5131 Set permission to insert breakpoints in the target."), _("\
5132 Show permission to insert breakpoints in the target."), _("\
5133 When this permission is on, GDB may insert breakpoints in the program.\n\
5134 Otherwise, any sort of insertion attempt will result in an error."),
5135 set_target_permissions
, NULL
,
5136 &setlist
, &showlist
);
5138 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support
,
5139 &may_insert_tracepoints_1
, _("\
5140 Set permission to insert tracepoints in the target."), _("\
5141 Show permission to insert tracepoints in the target."), _("\
5142 When this permission is on, GDB may insert tracepoints in the program.\n\
5143 Otherwise, any sort of insertion attempt will result in an error."),
5144 set_target_permissions
, NULL
,
5145 &setlist
, &showlist
);
5147 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support
,
5148 &may_insert_fast_tracepoints_1
, _("\
5149 Set permission to insert fast tracepoints in the target."), _("\
5150 Show permission to insert fast tracepoints in the target."), _("\
5151 When this permission is on, GDB may insert fast tracepoints.\n\
5152 Otherwise, any sort of insertion attempt will result in an error."),
5153 set_target_permissions
, NULL
,
5154 &setlist
, &showlist
);
5156 add_setshow_boolean_cmd ("may-interrupt", class_support
,
5158 Set permission to interrupt or signal the target."), _("\
5159 Show permission to interrupt or signal the target."), _("\
5160 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5161 Otherwise, any attempt to interrupt or stop will be ignored."),
5162 set_target_permissions
, NULL
,
5163 &setlist
, &showlist
);