]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/target.c
remove some calls to INHERIT and de_fault
[thirdparty/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47 #include "auxv.h"
48
49 static void target_info (char *, int);
50
51 static void default_terminal_info (struct target_ops *, const char *, int);
52
53 static int default_watchpoint_addr_within_range (struct target_ops *,
54 CORE_ADDR, CORE_ADDR, int);
55
56 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
57 CORE_ADDR, int);
58
59 static void default_rcmd (struct target_ops *, char *, struct ui_file *);
60
61 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
62 long lwp, long tid);
63
64 static int default_follow_fork (struct target_ops *self, int follow_child,
65 int detach_fork);
66
67 static void default_mourn_inferior (struct target_ops *self);
68
69 static int default_search_memory (struct target_ops *ops,
70 CORE_ADDR start_addr,
71 ULONGEST search_space_len,
72 const gdb_byte *pattern,
73 ULONGEST pattern_len,
74 CORE_ADDR *found_addrp);
75
76 static void tcomplain (void) ATTRIBUTE_NORETURN;
77
78 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
79
80 static int return_zero (struct target_ops *);
81
82 static int return_zero_has_execution (struct target_ops *, ptid_t);
83
84 void target_ignore (void);
85
86 static void target_command (char *, int);
87
88 static struct target_ops *find_default_run_target (char *);
89
90 static target_xfer_partial_ftype default_xfer_partial;
91
92 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
93 ptid_t ptid);
94
95 static int dummy_find_memory_regions (struct target_ops *self,
96 find_memory_region_ftype ignore1,
97 void *ignore2);
98
99 static char *dummy_make_corefile_notes (struct target_ops *self,
100 bfd *ignore1, int *ignore2);
101
102 static char *default_pid_to_str (struct target_ops *ops, ptid_t ptid);
103
104 static int find_default_can_async_p (struct target_ops *ignore);
105
106 static int find_default_is_async_p (struct target_ops *ignore);
107
108 static enum exec_direction_kind default_execution_direction
109 (struct target_ops *self);
110
111 #include "target-delegates.c"
112
113 static void init_dummy_target (void);
114
115 static struct target_ops debug_target;
116
117 static void debug_to_open (char *, int);
118
119 static void debug_to_prepare_to_store (struct target_ops *self,
120 struct regcache *);
121
122 static void debug_to_files_info (struct target_ops *);
123
124 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
125 struct bp_target_info *);
126
127 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
128 struct bp_target_info *);
129
130 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
131 int, int, int);
132
133 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
134 struct gdbarch *,
135 struct bp_target_info *);
136
137 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
138 struct gdbarch *,
139 struct bp_target_info *);
140
141 static int debug_to_insert_watchpoint (struct target_ops *self,
142 CORE_ADDR, int, int,
143 struct expression *);
144
145 static int debug_to_remove_watchpoint (struct target_ops *self,
146 CORE_ADDR, int, int,
147 struct expression *);
148
149 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
150
151 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
152 CORE_ADDR, CORE_ADDR, int);
153
154 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
155 CORE_ADDR, int);
156
157 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
158 CORE_ADDR, int, int,
159 struct expression *);
160
161 static void debug_to_terminal_init (struct target_ops *self);
162
163 static void debug_to_terminal_inferior (struct target_ops *self);
164
165 static void debug_to_terminal_ours_for_output (struct target_ops *self);
166
167 static void debug_to_terminal_save_ours (struct target_ops *self);
168
169 static void debug_to_terminal_ours (struct target_ops *self);
170
171 static void debug_to_load (struct target_ops *self, char *, int);
172
173 static int debug_to_can_run (struct target_ops *self);
174
175 static void debug_to_stop (struct target_ops *self, ptid_t);
176
177 /* Pointer to array of target architecture structures; the size of the
178 array; the current index into the array; the allocated size of the
179 array. */
180 struct target_ops **target_structs;
181 unsigned target_struct_size;
182 unsigned target_struct_allocsize;
183 #define DEFAULT_ALLOCSIZE 10
184
185 /* The initial current target, so that there is always a semi-valid
186 current target. */
187
188 static struct target_ops dummy_target;
189
190 /* Top of target stack. */
191
192 static struct target_ops *target_stack;
193
194 /* The target structure we are currently using to talk to a process
195 or file or whatever "inferior" we have. */
196
197 struct target_ops current_target;
198
199 /* Command list for target. */
200
201 static struct cmd_list_element *targetlist = NULL;
202
203 /* Nonzero if we should trust readonly sections from the
204 executable when reading memory. */
205
206 static int trust_readonly = 0;
207
208 /* Nonzero if we should show true memory content including
209 memory breakpoint inserted by gdb. */
210
211 static int show_memory_breakpoints = 0;
212
213 /* These globals control whether GDB attempts to perform these
214 operations; they are useful for targets that need to prevent
215 inadvertant disruption, such as in non-stop mode. */
216
217 int may_write_registers = 1;
218
219 int may_write_memory = 1;
220
221 int may_insert_breakpoints = 1;
222
223 int may_insert_tracepoints = 1;
224
225 int may_insert_fast_tracepoints = 1;
226
227 int may_stop = 1;
228
229 /* Non-zero if we want to see trace of target level stuff. */
230
231 static unsigned int targetdebug = 0;
232 static void
233 show_targetdebug (struct ui_file *file, int from_tty,
234 struct cmd_list_element *c, const char *value)
235 {
236 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
237 }
238
239 static void setup_target_debug (void);
240
241 /* The user just typed 'target' without the name of a target. */
242
243 static void
244 target_command (char *arg, int from_tty)
245 {
246 fputs_filtered ("Argument required (target name). Try `help target'\n",
247 gdb_stdout);
248 }
249
250 /* Default target_has_* methods for process_stratum targets. */
251
252 int
253 default_child_has_all_memory (struct target_ops *ops)
254 {
255 /* If no inferior selected, then we can't read memory here. */
256 if (ptid_equal (inferior_ptid, null_ptid))
257 return 0;
258
259 return 1;
260 }
261
262 int
263 default_child_has_memory (struct target_ops *ops)
264 {
265 /* If no inferior selected, then we can't read memory here. */
266 if (ptid_equal (inferior_ptid, null_ptid))
267 return 0;
268
269 return 1;
270 }
271
272 int
273 default_child_has_stack (struct target_ops *ops)
274 {
275 /* If no inferior selected, there's no stack. */
276 if (ptid_equal (inferior_ptid, null_ptid))
277 return 0;
278
279 return 1;
280 }
281
282 int
283 default_child_has_registers (struct target_ops *ops)
284 {
285 /* Can't read registers from no inferior. */
286 if (ptid_equal (inferior_ptid, null_ptid))
287 return 0;
288
289 return 1;
290 }
291
292 int
293 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
294 {
295 /* If there's no thread selected, then we can't make it run through
296 hoops. */
297 if (ptid_equal (the_ptid, null_ptid))
298 return 0;
299
300 return 1;
301 }
302
303
304 int
305 target_has_all_memory_1 (void)
306 {
307 struct target_ops *t;
308
309 for (t = current_target.beneath; t != NULL; t = t->beneath)
310 if (t->to_has_all_memory (t))
311 return 1;
312
313 return 0;
314 }
315
316 int
317 target_has_memory_1 (void)
318 {
319 struct target_ops *t;
320
321 for (t = current_target.beneath; t != NULL; t = t->beneath)
322 if (t->to_has_memory (t))
323 return 1;
324
325 return 0;
326 }
327
328 int
329 target_has_stack_1 (void)
330 {
331 struct target_ops *t;
332
333 for (t = current_target.beneath; t != NULL; t = t->beneath)
334 if (t->to_has_stack (t))
335 return 1;
336
337 return 0;
338 }
339
340 int
341 target_has_registers_1 (void)
342 {
343 struct target_ops *t;
344
345 for (t = current_target.beneath; t != NULL; t = t->beneath)
346 if (t->to_has_registers (t))
347 return 1;
348
349 return 0;
350 }
351
352 int
353 target_has_execution_1 (ptid_t the_ptid)
354 {
355 struct target_ops *t;
356
357 for (t = current_target.beneath; t != NULL; t = t->beneath)
358 if (t->to_has_execution (t, the_ptid))
359 return 1;
360
361 return 0;
362 }
363
364 int
365 target_has_execution_current (void)
366 {
367 return target_has_execution_1 (inferior_ptid);
368 }
369
370 /* Complete initialization of T. This ensures that various fields in
371 T are set, if needed by the target implementation. */
372
373 void
374 complete_target_initialization (struct target_ops *t)
375 {
376 /* Provide default values for all "must have" methods. */
377 if (t->to_xfer_partial == NULL)
378 t->to_xfer_partial = default_xfer_partial;
379
380 if (t->to_has_all_memory == NULL)
381 t->to_has_all_memory = return_zero;
382
383 if (t->to_has_memory == NULL)
384 t->to_has_memory = return_zero;
385
386 if (t->to_has_stack == NULL)
387 t->to_has_stack = return_zero;
388
389 if (t->to_has_registers == NULL)
390 t->to_has_registers = return_zero;
391
392 if (t->to_has_execution == NULL)
393 t->to_has_execution = return_zero_has_execution;
394
395 install_delegators (t);
396 }
397
398 /* Add possible target architecture T to the list and add a new
399 command 'target T->to_shortname'. Set COMPLETER as the command's
400 completer if not NULL. */
401
402 void
403 add_target_with_completer (struct target_ops *t,
404 completer_ftype *completer)
405 {
406 struct cmd_list_element *c;
407
408 complete_target_initialization (t);
409
410 if (!target_structs)
411 {
412 target_struct_allocsize = DEFAULT_ALLOCSIZE;
413 target_structs = (struct target_ops **) xmalloc
414 (target_struct_allocsize * sizeof (*target_structs));
415 }
416 if (target_struct_size >= target_struct_allocsize)
417 {
418 target_struct_allocsize *= 2;
419 target_structs = (struct target_ops **)
420 xrealloc ((char *) target_structs,
421 target_struct_allocsize * sizeof (*target_structs));
422 }
423 target_structs[target_struct_size++] = t;
424
425 if (targetlist == NULL)
426 add_prefix_cmd ("target", class_run, target_command, _("\
427 Connect to a target machine or process.\n\
428 The first argument is the type or protocol of the target machine.\n\
429 Remaining arguments are interpreted by the target protocol. For more\n\
430 information on the arguments for a particular protocol, type\n\
431 `help target ' followed by the protocol name."),
432 &targetlist, "target ", 0, &cmdlist);
433 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
434 &targetlist);
435 if (completer != NULL)
436 set_cmd_completer (c, completer);
437 }
438
439 /* Add a possible target architecture to the list. */
440
441 void
442 add_target (struct target_ops *t)
443 {
444 add_target_with_completer (t, NULL);
445 }
446
447 /* See target.h. */
448
449 void
450 add_deprecated_target_alias (struct target_ops *t, char *alias)
451 {
452 struct cmd_list_element *c;
453 char *alt;
454
455 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
456 see PR cli/15104. */
457 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
458 alt = xstrprintf ("target %s", t->to_shortname);
459 deprecate_cmd (c, alt);
460 }
461
462 /* Stub functions */
463
464 void
465 target_ignore (void)
466 {
467 }
468
469 void
470 target_kill (void)
471 {
472 if (targetdebug)
473 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
474
475 current_target.to_kill (&current_target);
476 }
477
478 void
479 target_load (char *arg, int from_tty)
480 {
481 target_dcache_invalidate ();
482 (*current_target.to_load) (&current_target, arg, from_tty);
483 }
484
485 void
486 target_create_inferior (char *exec_file, char *args,
487 char **env, int from_tty)
488 {
489 struct target_ops *t;
490
491 for (t = current_target.beneath; t != NULL; t = t->beneath)
492 {
493 if (t->to_create_inferior != NULL)
494 {
495 t->to_create_inferior (t, exec_file, args, env, from_tty);
496 if (targetdebug)
497 fprintf_unfiltered (gdb_stdlog,
498 "target_create_inferior (%s, %s, xxx, %d)\n",
499 exec_file, args, from_tty);
500 return;
501 }
502 }
503
504 internal_error (__FILE__, __LINE__,
505 _("could not find a target to create inferior"));
506 }
507
508 void
509 target_terminal_inferior (void)
510 {
511 /* A background resume (``run&'') should leave GDB in control of the
512 terminal. Use target_can_async_p, not target_is_async_p, since at
513 this point the target is not async yet. However, if sync_execution
514 is not set, we know it will become async prior to resume. */
515 if (target_can_async_p () && !sync_execution)
516 return;
517
518 /* If GDB is resuming the inferior in the foreground, install
519 inferior's terminal modes. */
520 (*current_target.to_terminal_inferior) (&current_target);
521 }
522
523 static int
524 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
525 struct target_ops *t)
526 {
527 errno = EIO; /* Can't read/write this location. */
528 return 0; /* No bytes handled. */
529 }
530
531 static void
532 tcomplain (void)
533 {
534 error (_("You can't do that when your target is `%s'"),
535 current_target.to_shortname);
536 }
537
538 void
539 noprocess (void)
540 {
541 error (_("You can't do that without a process to debug."));
542 }
543
544 static void
545 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
546 {
547 printf_unfiltered (_("No saved terminal information.\n"));
548 }
549
550 /* A default implementation for the to_get_ada_task_ptid target method.
551
552 This function builds the PTID by using both LWP and TID as part of
553 the PTID lwp and tid elements. The pid used is the pid of the
554 inferior_ptid. */
555
556 static ptid_t
557 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
558 {
559 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
560 }
561
562 static enum exec_direction_kind
563 default_execution_direction (struct target_ops *self)
564 {
565 if (!target_can_execute_reverse)
566 return EXEC_FORWARD;
567 else if (!target_can_async_p ())
568 return EXEC_FORWARD;
569 else
570 gdb_assert_not_reached ("\
571 to_execution_direction must be implemented for reverse async");
572 }
573
574 /* Go through the target stack from top to bottom, copying over zero
575 entries in current_target, then filling in still empty entries. In
576 effect, we are doing class inheritance through the pushed target
577 vectors.
578
579 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
580 is currently implemented, is that it discards any knowledge of
581 which target an inherited method originally belonged to.
582 Consequently, new new target methods should instead explicitly and
583 locally search the target stack for the target that can handle the
584 request. */
585
586 static void
587 update_current_target (void)
588 {
589 struct target_ops *t;
590
591 /* First, reset current's contents. */
592 memset (&current_target, 0, sizeof (current_target));
593
594 /* Install the delegators. */
595 install_delegators (&current_target);
596
597 current_target.to_stratum = target_stack->to_stratum;
598
599 #define INHERIT(FIELD, TARGET) \
600 if (!current_target.FIELD) \
601 current_target.FIELD = (TARGET)->FIELD
602
603 /* Do not add any new INHERITs here. Instead, use the delegation
604 mechanism provided by make-target-delegates. */
605 for (t = target_stack; t; t = t->beneath)
606 {
607 INHERIT (to_shortname, t);
608 INHERIT (to_longname, t);
609 INHERIT (to_attach_no_wait, t);
610 INHERIT (deprecated_xfer_memory, t);
611 INHERIT (to_have_steppable_watchpoint, t);
612 INHERIT (to_have_continuable_watchpoint, t);
613 INHERIT (to_has_thread_control, t);
614 }
615 #undef INHERIT
616
617 /* Clean up a target struct so it no longer has any zero pointers in
618 it. Do not add any new de_faults here. Instead, use the
619 delegation mechanism provided by make-target-delegates. */
620
621 #define de_fault(field, value) \
622 if (!current_target.field) \
623 current_target.field = value
624
625 de_fault (deprecated_xfer_memory,
626 (int (*) (CORE_ADDR, gdb_byte *, int, int,
627 struct mem_attrib *, struct target_ops *))
628 nomemory);
629
630 #undef de_fault
631
632 /* Finally, position the target-stack beneath the squashed
633 "current_target". That way code looking for a non-inherited
634 target method can quickly and simply find it. */
635 current_target.beneath = target_stack;
636
637 if (targetdebug)
638 setup_target_debug ();
639 }
640
641 /* Push a new target type into the stack of the existing target accessors,
642 possibly superseding some of the existing accessors.
643
644 Rather than allow an empty stack, we always have the dummy target at
645 the bottom stratum, so we can call the function vectors without
646 checking them. */
647
648 void
649 push_target (struct target_ops *t)
650 {
651 struct target_ops **cur;
652
653 /* Check magic number. If wrong, it probably means someone changed
654 the struct definition, but not all the places that initialize one. */
655 if (t->to_magic != OPS_MAGIC)
656 {
657 fprintf_unfiltered (gdb_stderr,
658 "Magic number of %s target struct wrong\n",
659 t->to_shortname);
660 internal_error (__FILE__, __LINE__,
661 _("failed internal consistency check"));
662 }
663
664 /* Find the proper stratum to install this target in. */
665 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
666 {
667 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
668 break;
669 }
670
671 /* If there's already targets at this stratum, remove them. */
672 /* FIXME: cagney/2003-10-15: I think this should be popping all
673 targets to CUR, and not just those at this stratum level. */
674 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
675 {
676 /* There's already something at this stratum level. Close it,
677 and un-hook it from the stack. */
678 struct target_ops *tmp = (*cur);
679
680 (*cur) = (*cur)->beneath;
681 tmp->beneath = NULL;
682 target_close (tmp);
683 }
684
685 /* We have removed all targets in our stratum, now add the new one. */
686 t->beneath = (*cur);
687 (*cur) = t;
688
689 update_current_target ();
690 }
691
692 /* Remove a target_ops vector from the stack, wherever it may be.
693 Return how many times it was removed (0 or 1). */
694
695 int
696 unpush_target (struct target_ops *t)
697 {
698 struct target_ops **cur;
699 struct target_ops *tmp;
700
701 if (t->to_stratum == dummy_stratum)
702 internal_error (__FILE__, __LINE__,
703 _("Attempt to unpush the dummy target"));
704
705 /* Look for the specified target. Note that we assume that a target
706 can only occur once in the target stack. */
707
708 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
709 {
710 if ((*cur) == t)
711 break;
712 }
713
714 /* If we don't find target_ops, quit. Only open targets should be
715 closed. */
716 if ((*cur) == NULL)
717 return 0;
718
719 /* Unchain the target. */
720 tmp = (*cur);
721 (*cur) = (*cur)->beneath;
722 tmp->beneath = NULL;
723
724 update_current_target ();
725
726 /* Finally close the target. Note we do this after unchaining, so
727 any target method calls from within the target_close
728 implementation don't end up in T anymore. */
729 target_close (t);
730
731 return 1;
732 }
733
734 void
735 pop_all_targets_above (enum strata above_stratum)
736 {
737 while ((int) (current_target.to_stratum) > (int) above_stratum)
738 {
739 if (!unpush_target (target_stack))
740 {
741 fprintf_unfiltered (gdb_stderr,
742 "pop_all_targets couldn't find target %s\n",
743 target_stack->to_shortname);
744 internal_error (__FILE__, __LINE__,
745 _("failed internal consistency check"));
746 break;
747 }
748 }
749 }
750
751 void
752 pop_all_targets (void)
753 {
754 pop_all_targets_above (dummy_stratum);
755 }
756
757 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
758
759 int
760 target_is_pushed (struct target_ops *t)
761 {
762 struct target_ops **cur;
763
764 /* Check magic number. If wrong, it probably means someone changed
765 the struct definition, but not all the places that initialize one. */
766 if (t->to_magic != OPS_MAGIC)
767 {
768 fprintf_unfiltered (gdb_stderr,
769 "Magic number of %s target struct wrong\n",
770 t->to_shortname);
771 internal_error (__FILE__, __LINE__,
772 _("failed internal consistency check"));
773 }
774
775 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
776 if (*cur == t)
777 return 1;
778
779 return 0;
780 }
781
782 /* Using the objfile specified in OBJFILE, find the address for the
783 current thread's thread-local storage with offset OFFSET. */
784 CORE_ADDR
785 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
786 {
787 volatile CORE_ADDR addr = 0;
788 struct target_ops *target;
789
790 for (target = current_target.beneath;
791 target != NULL;
792 target = target->beneath)
793 {
794 if (target->to_get_thread_local_address != NULL)
795 break;
796 }
797
798 if (target != NULL
799 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
800 {
801 ptid_t ptid = inferior_ptid;
802 volatile struct gdb_exception ex;
803
804 TRY_CATCH (ex, RETURN_MASK_ALL)
805 {
806 CORE_ADDR lm_addr;
807
808 /* Fetch the load module address for this objfile. */
809 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
810 objfile);
811 /* If it's 0, throw the appropriate exception. */
812 if (lm_addr == 0)
813 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
814 _("TLS load module not found"));
815
816 addr = target->to_get_thread_local_address (target, ptid,
817 lm_addr, offset);
818 }
819 /* If an error occurred, print TLS related messages here. Otherwise,
820 throw the error to some higher catcher. */
821 if (ex.reason < 0)
822 {
823 int objfile_is_library = (objfile->flags & OBJF_SHARED);
824
825 switch (ex.error)
826 {
827 case TLS_NO_LIBRARY_SUPPORT_ERROR:
828 error (_("Cannot find thread-local variables "
829 "in this thread library."));
830 break;
831 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
832 if (objfile_is_library)
833 error (_("Cannot find shared library `%s' in dynamic"
834 " linker's load module list"), objfile_name (objfile));
835 else
836 error (_("Cannot find executable file `%s' in dynamic"
837 " linker's load module list"), objfile_name (objfile));
838 break;
839 case TLS_NOT_ALLOCATED_YET_ERROR:
840 if (objfile_is_library)
841 error (_("The inferior has not yet allocated storage for"
842 " thread-local variables in\n"
843 "the shared library `%s'\n"
844 "for %s"),
845 objfile_name (objfile), target_pid_to_str (ptid));
846 else
847 error (_("The inferior has not yet allocated storage for"
848 " thread-local variables in\n"
849 "the executable `%s'\n"
850 "for %s"),
851 objfile_name (objfile), target_pid_to_str (ptid));
852 break;
853 case TLS_GENERIC_ERROR:
854 if (objfile_is_library)
855 error (_("Cannot find thread-local storage for %s, "
856 "shared library %s:\n%s"),
857 target_pid_to_str (ptid),
858 objfile_name (objfile), ex.message);
859 else
860 error (_("Cannot find thread-local storage for %s, "
861 "executable file %s:\n%s"),
862 target_pid_to_str (ptid),
863 objfile_name (objfile), ex.message);
864 break;
865 default:
866 throw_exception (ex);
867 break;
868 }
869 }
870 }
871 /* It wouldn't be wrong here to try a gdbarch method, too; finding
872 TLS is an ABI-specific thing. But we don't do that yet. */
873 else
874 error (_("Cannot find thread-local variables on this target"));
875
876 return addr;
877 }
878
879 const char *
880 target_xfer_status_to_string (enum target_xfer_status err)
881 {
882 #define CASE(X) case X: return #X
883 switch (err)
884 {
885 CASE(TARGET_XFER_E_IO);
886 CASE(TARGET_XFER_E_UNAVAILABLE);
887 default:
888 return "<unknown>";
889 }
890 #undef CASE
891 };
892
893
894 #undef MIN
895 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
896
897 /* target_read_string -- read a null terminated string, up to LEN bytes,
898 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
899 Set *STRING to a pointer to malloc'd memory containing the data; the caller
900 is responsible for freeing it. Return the number of bytes successfully
901 read. */
902
903 int
904 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
905 {
906 int tlen, offset, i;
907 gdb_byte buf[4];
908 int errcode = 0;
909 char *buffer;
910 int buffer_allocated;
911 char *bufptr;
912 unsigned int nbytes_read = 0;
913
914 gdb_assert (string);
915
916 /* Small for testing. */
917 buffer_allocated = 4;
918 buffer = xmalloc (buffer_allocated);
919 bufptr = buffer;
920
921 while (len > 0)
922 {
923 tlen = MIN (len, 4 - (memaddr & 3));
924 offset = memaddr & 3;
925
926 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
927 if (errcode != 0)
928 {
929 /* The transfer request might have crossed the boundary to an
930 unallocated region of memory. Retry the transfer, requesting
931 a single byte. */
932 tlen = 1;
933 offset = 0;
934 errcode = target_read_memory (memaddr, buf, 1);
935 if (errcode != 0)
936 goto done;
937 }
938
939 if (bufptr - buffer + tlen > buffer_allocated)
940 {
941 unsigned int bytes;
942
943 bytes = bufptr - buffer;
944 buffer_allocated *= 2;
945 buffer = xrealloc (buffer, buffer_allocated);
946 bufptr = buffer + bytes;
947 }
948
949 for (i = 0; i < tlen; i++)
950 {
951 *bufptr++ = buf[i + offset];
952 if (buf[i + offset] == '\000')
953 {
954 nbytes_read += i + 1;
955 goto done;
956 }
957 }
958
959 memaddr += tlen;
960 len -= tlen;
961 nbytes_read += tlen;
962 }
963 done:
964 *string = buffer;
965 if (errnop != NULL)
966 *errnop = errcode;
967 return nbytes_read;
968 }
969
970 struct target_section_table *
971 target_get_section_table (struct target_ops *target)
972 {
973 if (targetdebug)
974 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
975
976 return (*target->to_get_section_table) (target);
977 }
978
979 /* Find a section containing ADDR. */
980
981 struct target_section *
982 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
983 {
984 struct target_section_table *table = target_get_section_table (target);
985 struct target_section *secp;
986
987 if (table == NULL)
988 return NULL;
989
990 for (secp = table->sections; secp < table->sections_end; secp++)
991 {
992 if (addr >= secp->addr && addr < secp->endaddr)
993 return secp;
994 }
995 return NULL;
996 }
997
998 /* Read memory from the live target, even if currently inspecting a
999 traceframe. The return is the same as that of target_read. */
1000
1001 static enum target_xfer_status
1002 target_read_live_memory (enum target_object object,
1003 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1004 ULONGEST *xfered_len)
1005 {
1006 enum target_xfer_status ret;
1007 struct cleanup *cleanup;
1008
1009 /* Switch momentarily out of tfind mode so to access live memory.
1010 Note that this must not clear global state, such as the frame
1011 cache, which must still remain valid for the previous traceframe.
1012 We may be _building_ the frame cache at this point. */
1013 cleanup = make_cleanup_restore_traceframe_number ();
1014 set_traceframe_number (-1);
1015
1016 ret = target_xfer_partial (current_target.beneath, object, NULL,
1017 myaddr, NULL, memaddr, len, xfered_len);
1018
1019 do_cleanups (cleanup);
1020 return ret;
1021 }
1022
1023 /* Using the set of read-only target sections of OPS, read live
1024 read-only memory. Note that the actual reads start from the
1025 top-most target again.
1026
1027 For interface/parameters/return description see target.h,
1028 to_xfer_partial. */
1029
1030 static enum target_xfer_status
1031 memory_xfer_live_readonly_partial (struct target_ops *ops,
1032 enum target_object object,
1033 gdb_byte *readbuf, ULONGEST memaddr,
1034 ULONGEST len, ULONGEST *xfered_len)
1035 {
1036 struct target_section *secp;
1037 struct target_section_table *table;
1038
1039 secp = target_section_by_addr (ops, memaddr);
1040 if (secp != NULL
1041 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1042 secp->the_bfd_section)
1043 & SEC_READONLY))
1044 {
1045 struct target_section *p;
1046 ULONGEST memend = memaddr + len;
1047
1048 table = target_get_section_table (ops);
1049
1050 for (p = table->sections; p < table->sections_end; p++)
1051 {
1052 if (memaddr >= p->addr)
1053 {
1054 if (memend <= p->endaddr)
1055 {
1056 /* Entire transfer is within this section. */
1057 return target_read_live_memory (object, memaddr,
1058 readbuf, len, xfered_len);
1059 }
1060 else if (memaddr >= p->endaddr)
1061 {
1062 /* This section ends before the transfer starts. */
1063 continue;
1064 }
1065 else
1066 {
1067 /* This section overlaps the transfer. Just do half. */
1068 len = p->endaddr - memaddr;
1069 return target_read_live_memory (object, memaddr,
1070 readbuf, len, xfered_len);
1071 }
1072 }
1073 }
1074 }
1075
1076 return TARGET_XFER_EOF;
1077 }
1078
1079 /* Read memory from more than one valid target. A core file, for
1080 instance, could have some of memory but delegate other bits to
1081 the target below it. So, we must manually try all targets. */
1082
1083 static enum target_xfer_status
1084 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1085 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1086 ULONGEST *xfered_len)
1087 {
1088 enum target_xfer_status res;
1089
1090 do
1091 {
1092 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1093 readbuf, writebuf, memaddr, len,
1094 xfered_len);
1095 if (res == TARGET_XFER_OK)
1096 break;
1097
1098 /* Stop if the target reports that the memory is not available. */
1099 if (res == TARGET_XFER_E_UNAVAILABLE)
1100 break;
1101
1102 /* We want to continue past core files to executables, but not
1103 past a running target's memory. */
1104 if (ops->to_has_all_memory (ops))
1105 break;
1106
1107 ops = ops->beneath;
1108 }
1109 while (ops != NULL);
1110
1111 return res;
1112 }
1113
1114 /* Perform a partial memory transfer.
1115 For docs see target.h, to_xfer_partial. */
1116
1117 static enum target_xfer_status
1118 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1119 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1120 ULONGEST len, ULONGEST *xfered_len)
1121 {
1122 enum target_xfer_status res;
1123 int reg_len;
1124 struct mem_region *region;
1125 struct inferior *inf;
1126
1127 /* For accesses to unmapped overlay sections, read directly from
1128 files. Must do this first, as MEMADDR may need adjustment. */
1129 if (readbuf != NULL && overlay_debugging)
1130 {
1131 struct obj_section *section = find_pc_overlay (memaddr);
1132
1133 if (pc_in_unmapped_range (memaddr, section))
1134 {
1135 struct target_section_table *table
1136 = target_get_section_table (ops);
1137 const char *section_name = section->the_bfd_section->name;
1138
1139 memaddr = overlay_mapped_address (memaddr, section);
1140 return section_table_xfer_memory_partial (readbuf, writebuf,
1141 memaddr, len, xfered_len,
1142 table->sections,
1143 table->sections_end,
1144 section_name);
1145 }
1146 }
1147
1148 /* Try the executable files, if "trust-readonly-sections" is set. */
1149 if (readbuf != NULL && trust_readonly)
1150 {
1151 struct target_section *secp;
1152 struct target_section_table *table;
1153
1154 secp = target_section_by_addr (ops, memaddr);
1155 if (secp != NULL
1156 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1157 secp->the_bfd_section)
1158 & SEC_READONLY))
1159 {
1160 table = target_get_section_table (ops);
1161 return section_table_xfer_memory_partial (readbuf, writebuf,
1162 memaddr, len, xfered_len,
1163 table->sections,
1164 table->sections_end,
1165 NULL);
1166 }
1167 }
1168
1169 /* If reading unavailable memory in the context of traceframes, and
1170 this address falls within a read-only section, fallback to
1171 reading from live memory. */
1172 if (readbuf != NULL && get_traceframe_number () != -1)
1173 {
1174 VEC(mem_range_s) *available;
1175
1176 /* If we fail to get the set of available memory, then the
1177 target does not support querying traceframe info, and so we
1178 attempt reading from the traceframe anyway (assuming the
1179 target implements the old QTro packet then). */
1180 if (traceframe_available_memory (&available, memaddr, len))
1181 {
1182 struct cleanup *old_chain;
1183
1184 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1185
1186 if (VEC_empty (mem_range_s, available)
1187 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1188 {
1189 /* Don't read into the traceframe's available
1190 memory. */
1191 if (!VEC_empty (mem_range_s, available))
1192 {
1193 LONGEST oldlen = len;
1194
1195 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1196 gdb_assert (len <= oldlen);
1197 }
1198
1199 do_cleanups (old_chain);
1200
1201 /* This goes through the topmost target again. */
1202 res = memory_xfer_live_readonly_partial (ops, object,
1203 readbuf, memaddr,
1204 len, xfered_len);
1205 if (res == TARGET_XFER_OK)
1206 return TARGET_XFER_OK;
1207 else
1208 {
1209 /* No use trying further, we know some memory starting
1210 at MEMADDR isn't available. */
1211 *xfered_len = len;
1212 return TARGET_XFER_E_UNAVAILABLE;
1213 }
1214 }
1215
1216 /* Don't try to read more than how much is available, in
1217 case the target implements the deprecated QTro packet to
1218 cater for older GDBs (the target's knowledge of read-only
1219 sections may be outdated by now). */
1220 len = VEC_index (mem_range_s, available, 0)->length;
1221
1222 do_cleanups (old_chain);
1223 }
1224 }
1225
1226 /* Try GDB's internal data cache. */
1227 region = lookup_mem_region (memaddr);
1228 /* region->hi == 0 means there's no upper bound. */
1229 if (memaddr + len < region->hi || region->hi == 0)
1230 reg_len = len;
1231 else
1232 reg_len = region->hi - memaddr;
1233
1234 switch (region->attrib.mode)
1235 {
1236 case MEM_RO:
1237 if (writebuf != NULL)
1238 return TARGET_XFER_E_IO;
1239 break;
1240
1241 case MEM_WO:
1242 if (readbuf != NULL)
1243 return TARGET_XFER_E_IO;
1244 break;
1245
1246 case MEM_FLASH:
1247 /* We only support writing to flash during "load" for now. */
1248 if (writebuf != NULL)
1249 error (_("Writing to flash memory forbidden in this context"));
1250 break;
1251
1252 case MEM_NONE:
1253 return TARGET_XFER_E_IO;
1254 }
1255
1256 if (!ptid_equal (inferior_ptid, null_ptid))
1257 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1258 else
1259 inf = NULL;
1260
1261 if (inf != NULL
1262 /* The dcache reads whole cache lines; that doesn't play well
1263 with reading from a trace buffer, because reading outside of
1264 the collected memory range fails. */
1265 && get_traceframe_number () == -1
1266 && (region->attrib.cache
1267 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1268 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1269 {
1270 DCACHE *dcache = target_dcache_get_or_init ();
1271 int l;
1272
1273 if (readbuf != NULL)
1274 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1275 else
1276 /* FIXME drow/2006-08-09: If we're going to preserve const
1277 correctness dcache_xfer_memory should take readbuf and
1278 writebuf. */
1279 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1280 reg_len, 1);
1281 if (l <= 0)
1282 return TARGET_XFER_E_IO;
1283 else
1284 {
1285 *xfered_len = (ULONGEST) l;
1286 return TARGET_XFER_OK;
1287 }
1288 }
1289
1290 /* If none of those methods found the memory we wanted, fall back
1291 to a target partial transfer. Normally a single call to
1292 to_xfer_partial is enough; if it doesn't recognize an object
1293 it will call the to_xfer_partial of the next target down.
1294 But for memory this won't do. Memory is the only target
1295 object which can be read from more than one valid target.
1296 A core file, for instance, could have some of memory but
1297 delegate other bits to the target below it. So, we must
1298 manually try all targets. */
1299
1300 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1301 xfered_len);
1302
1303 /* Make sure the cache gets updated no matter what - if we are writing
1304 to the stack. Even if this write is not tagged as such, we still need
1305 to update the cache. */
1306
1307 if (res == TARGET_XFER_OK
1308 && inf != NULL
1309 && writebuf != NULL
1310 && target_dcache_init_p ()
1311 && !region->attrib.cache
1312 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1313 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1314 {
1315 DCACHE *dcache = target_dcache_get ();
1316
1317 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1318 }
1319
1320 /* If we still haven't got anything, return the last error. We
1321 give up. */
1322 return res;
1323 }
1324
1325 /* Perform a partial memory transfer. For docs see target.h,
1326 to_xfer_partial. */
1327
1328 static enum target_xfer_status
1329 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1330 gdb_byte *readbuf, const gdb_byte *writebuf,
1331 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1332 {
1333 enum target_xfer_status res;
1334
1335 /* Zero length requests are ok and require no work. */
1336 if (len == 0)
1337 return TARGET_XFER_EOF;
1338
1339 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1340 breakpoint insns, thus hiding out from higher layers whether
1341 there are software breakpoints inserted in the code stream. */
1342 if (readbuf != NULL)
1343 {
1344 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1345 xfered_len);
1346
1347 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1348 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1349 }
1350 else
1351 {
1352 void *buf;
1353 struct cleanup *old_chain;
1354
1355 /* A large write request is likely to be partially satisfied
1356 by memory_xfer_partial_1. We will continually malloc
1357 and free a copy of the entire write request for breakpoint
1358 shadow handling even though we only end up writing a small
1359 subset of it. Cap writes to 4KB to mitigate this. */
1360 len = min (4096, len);
1361
1362 buf = xmalloc (len);
1363 old_chain = make_cleanup (xfree, buf);
1364 memcpy (buf, writebuf, len);
1365
1366 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1367 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1368 xfered_len);
1369
1370 do_cleanups (old_chain);
1371 }
1372
1373 return res;
1374 }
1375
1376 static void
1377 restore_show_memory_breakpoints (void *arg)
1378 {
1379 show_memory_breakpoints = (uintptr_t) arg;
1380 }
1381
1382 struct cleanup *
1383 make_show_memory_breakpoints_cleanup (int show)
1384 {
1385 int current = show_memory_breakpoints;
1386
1387 show_memory_breakpoints = show;
1388 return make_cleanup (restore_show_memory_breakpoints,
1389 (void *) (uintptr_t) current);
1390 }
1391
1392 /* For docs see target.h, to_xfer_partial. */
1393
1394 enum target_xfer_status
1395 target_xfer_partial (struct target_ops *ops,
1396 enum target_object object, const char *annex,
1397 gdb_byte *readbuf, const gdb_byte *writebuf,
1398 ULONGEST offset, ULONGEST len,
1399 ULONGEST *xfered_len)
1400 {
1401 enum target_xfer_status retval;
1402
1403 gdb_assert (ops->to_xfer_partial != NULL);
1404
1405 /* Transfer is done when LEN is zero. */
1406 if (len == 0)
1407 return TARGET_XFER_EOF;
1408
1409 if (writebuf && !may_write_memory)
1410 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1411 core_addr_to_string_nz (offset), plongest (len));
1412
1413 *xfered_len = 0;
1414
1415 /* If this is a memory transfer, let the memory-specific code
1416 have a look at it instead. Memory transfers are more
1417 complicated. */
1418 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1419 || object == TARGET_OBJECT_CODE_MEMORY)
1420 retval = memory_xfer_partial (ops, object, readbuf,
1421 writebuf, offset, len, xfered_len);
1422 else if (object == TARGET_OBJECT_RAW_MEMORY)
1423 {
1424 /* Request the normal memory object from other layers. */
1425 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1426 xfered_len);
1427 }
1428 else
1429 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1430 writebuf, offset, len, xfered_len);
1431
1432 if (targetdebug)
1433 {
1434 const unsigned char *myaddr = NULL;
1435
1436 fprintf_unfiltered (gdb_stdlog,
1437 "%s:target_xfer_partial "
1438 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1439 ops->to_shortname,
1440 (int) object,
1441 (annex ? annex : "(null)"),
1442 host_address_to_string (readbuf),
1443 host_address_to_string (writebuf),
1444 core_addr_to_string_nz (offset),
1445 pulongest (len), retval,
1446 pulongest (*xfered_len));
1447
1448 if (readbuf)
1449 myaddr = readbuf;
1450 if (writebuf)
1451 myaddr = writebuf;
1452 if (retval == TARGET_XFER_OK && myaddr != NULL)
1453 {
1454 int i;
1455
1456 fputs_unfiltered (", bytes =", gdb_stdlog);
1457 for (i = 0; i < *xfered_len; i++)
1458 {
1459 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1460 {
1461 if (targetdebug < 2 && i > 0)
1462 {
1463 fprintf_unfiltered (gdb_stdlog, " ...");
1464 break;
1465 }
1466 fprintf_unfiltered (gdb_stdlog, "\n");
1467 }
1468
1469 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1470 }
1471 }
1472
1473 fputc_unfiltered ('\n', gdb_stdlog);
1474 }
1475
1476 /* Check implementations of to_xfer_partial update *XFERED_LEN
1477 properly. Do assertion after printing debug messages, so that we
1478 can find more clues on assertion failure from debugging messages. */
1479 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1480 gdb_assert (*xfered_len > 0);
1481
1482 return retval;
1483 }
1484
1485 /* Read LEN bytes of target memory at address MEMADDR, placing the
1486 results in GDB's memory at MYADDR. Returns either 0 for success or
1487 TARGET_XFER_E_IO if any error occurs.
1488
1489 If an error occurs, no guarantee is made about the contents of the data at
1490 MYADDR. In particular, the caller should not depend upon partial reads
1491 filling the buffer with good data. There is no way for the caller to know
1492 how much good data might have been transfered anyway. Callers that can
1493 deal with partial reads should call target_read (which will retry until
1494 it makes no progress, and then return how much was transferred). */
1495
1496 int
1497 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1498 {
1499 /* Dispatch to the topmost target, not the flattened current_target.
1500 Memory accesses check target->to_has_(all_)memory, and the
1501 flattened target doesn't inherit those. */
1502 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1503 myaddr, memaddr, len) == len)
1504 return 0;
1505 else
1506 return TARGET_XFER_E_IO;
1507 }
1508
1509 /* Like target_read_memory, but specify explicitly that this is a read
1510 from the target's raw memory. That is, this read bypasses the
1511 dcache, breakpoint shadowing, etc. */
1512
1513 int
1514 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1515 {
1516 /* See comment in target_read_memory about why the request starts at
1517 current_target.beneath. */
1518 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1519 myaddr, memaddr, len) == len)
1520 return 0;
1521 else
1522 return TARGET_XFER_E_IO;
1523 }
1524
1525 /* Like target_read_memory, but specify explicitly that this is a read from
1526 the target's stack. This may trigger different cache behavior. */
1527
1528 int
1529 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1530 {
1531 /* See comment in target_read_memory about why the request starts at
1532 current_target.beneath. */
1533 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1534 myaddr, memaddr, len) == len)
1535 return 0;
1536 else
1537 return TARGET_XFER_E_IO;
1538 }
1539
1540 /* Like target_read_memory, but specify explicitly that this is a read from
1541 the target's code. This may trigger different cache behavior. */
1542
1543 int
1544 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1545 {
1546 /* See comment in target_read_memory about why the request starts at
1547 current_target.beneath. */
1548 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1549 myaddr, memaddr, len) == len)
1550 return 0;
1551 else
1552 return TARGET_XFER_E_IO;
1553 }
1554
1555 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1556 Returns either 0 for success or TARGET_XFER_E_IO if any
1557 error occurs. If an error occurs, no guarantee is made about how
1558 much data got written. Callers that can deal with partial writes
1559 should call target_write. */
1560
1561 int
1562 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1563 {
1564 /* See comment in target_read_memory about why the request starts at
1565 current_target.beneath. */
1566 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1567 myaddr, memaddr, len) == len)
1568 return 0;
1569 else
1570 return TARGET_XFER_E_IO;
1571 }
1572
1573 /* Write LEN bytes from MYADDR to target raw memory at address
1574 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1575 if any error occurs. If an error occurs, no guarantee is made
1576 about how much data got written. Callers that can deal with
1577 partial writes should call target_write. */
1578
1579 int
1580 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1581 {
1582 /* See comment in target_read_memory about why the request starts at
1583 current_target.beneath. */
1584 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1585 myaddr, memaddr, len) == len)
1586 return 0;
1587 else
1588 return TARGET_XFER_E_IO;
1589 }
1590
1591 /* Fetch the target's memory map. */
1592
1593 VEC(mem_region_s) *
1594 target_memory_map (void)
1595 {
1596 VEC(mem_region_s) *result;
1597 struct mem_region *last_one, *this_one;
1598 int ix;
1599 struct target_ops *t;
1600
1601 if (targetdebug)
1602 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1603
1604 result = current_target.to_memory_map (&current_target);
1605 if (result == NULL)
1606 return NULL;
1607
1608 qsort (VEC_address (mem_region_s, result),
1609 VEC_length (mem_region_s, result),
1610 sizeof (struct mem_region), mem_region_cmp);
1611
1612 /* Check that regions do not overlap. Simultaneously assign
1613 a numbering for the "mem" commands to use to refer to
1614 each region. */
1615 last_one = NULL;
1616 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1617 {
1618 this_one->number = ix;
1619
1620 if (last_one && last_one->hi > this_one->lo)
1621 {
1622 warning (_("Overlapping regions in memory map: ignoring"));
1623 VEC_free (mem_region_s, result);
1624 return NULL;
1625 }
1626 last_one = this_one;
1627 }
1628
1629 return result;
1630 }
1631
1632 void
1633 target_flash_erase (ULONGEST address, LONGEST length)
1634 {
1635 if (targetdebug)
1636 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1637 hex_string (address), phex (length, 0));
1638 current_target.to_flash_erase (&current_target, address, length);
1639 }
1640
1641 void
1642 target_flash_done (void)
1643 {
1644 if (targetdebug)
1645 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1646 current_target.to_flash_done (&current_target);
1647 }
1648
1649 static void
1650 show_trust_readonly (struct ui_file *file, int from_tty,
1651 struct cmd_list_element *c, const char *value)
1652 {
1653 fprintf_filtered (file,
1654 _("Mode for reading from readonly sections is %s.\n"),
1655 value);
1656 }
1657
1658 /* More generic transfers. */
1659
1660 static enum target_xfer_status
1661 default_xfer_partial (struct target_ops *ops, enum target_object object,
1662 const char *annex, gdb_byte *readbuf,
1663 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
1664 ULONGEST *xfered_len)
1665 {
1666 if (object == TARGET_OBJECT_MEMORY
1667 && ops->deprecated_xfer_memory != NULL)
1668 /* If available, fall back to the target's
1669 "deprecated_xfer_memory" method. */
1670 {
1671 int xfered = -1;
1672
1673 errno = 0;
1674 if (writebuf != NULL)
1675 {
1676 void *buffer = xmalloc (len);
1677 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1678
1679 memcpy (buffer, writebuf, len);
1680 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1681 1/*write*/, NULL, ops);
1682 do_cleanups (cleanup);
1683 }
1684 if (readbuf != NULL)
1685 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1686 0/*read*/, NULL, ops);
1687 if (xfered > 0)
1688 {
1689 *xfered_len = (ULONGEST) xfered;
1690 return TARGET_XFER_E_IO;
1691 }
1692 else if (xfered == 0 && errno == 0)
1693 /* "deprecated_xfer_memory" uses 0, cross checked against
1694 ERRNO as one indication of an error. */
1695 return TARGET_XFER_EOF;
1696 else
1697 return TARGET_XFER_E_IO;
1698 }
1699 else
1700 {
1701 gdb_assert (ops->beneath != NULL);
1702 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1703 readbuf, writebuf, offset, len,
1704 xfered_len);
1705 }
1706 }
1707
1708 /* Target vector read/write partial wrapper functions. */
1709
1710 static enum target_xfer_status
1711 target_read_partial (struct target_ops *ops,
1712 enum target_object object,
1713 const char *annex, gdb_byte *buf,
1714 ULONGEST offset, ULONGEST len,
1715 ULONGEST *xfered_len)
1716 {
1717 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1718 xfered_len);
1719 }
1720
1721 static enum target_xfer_status
1722 target_write_partial (struct target_ops *ops,
1723 enum target_object object,
1724 const char *annex, const gdb_byte *buf,
1725 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1726 {
1727 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1728 xfered_len);
1729 }
1730
1731 /* Wrappers to perform the full transfer. */
1732
1733 /* For docs on target_read see target.h. */
1734
1735 LONGEST
1736 target_read (struct target_ops *ops,
1737 enum target_object object,
1738 const char *annex, gdb_byte *buf,
1739 ULONGEST offset, LONGEST len)
1740 {
1741 LONGEST xfered = 0;
1742
1743 while (xfered < len)
1744 {
1745 ULONGEST xfered_len;
1746 enum target_xfer_status status;
1747
1748 status = target_read_partial (ops, object, annex,
1749 (gdb_byte *) buf + xfered,
1750 offset + xfered, len - xfered,
1751 &xfered_len);
1752
1753 /* Call an observer, notifying them of the xfer progress? */
1754 if (status == TARGET_XFER_EOF)
1755 return xfered;
1756 else if (status == TARGET_XFER_OK)
1757 {
1758 xfered += xfered_len;
1759 QUIT;
1760 }
1761 else
1762 return -1;
1763
1764 }
1765 return len;
1766 }
1767
1768 /* Assuming that the entire [begin, end) range of memory cannot be
1769 read, try to read whatever subrange is possible to read.
1770
1771 The function returns, in RESULT, either zero or one memory block.
1772 If there's a readable subrange at the beginning, it is completely
1773 read and returned. Any further readable subrange will not be read.
1774 Otherwise, if there's a readable subrange at the end, it will be
1775 completely read and returned. Any readable subranges before it
1776 (obviously, not starting at the beginning), will be ignored. In
1777 other cases -- either no readable subrange, or readable subrange(s)
1778 that is neither at the beginning, or end, nothing is returned.
1779
1780 The purpose of this function is to handle a read across a boundary
1781 of accessible memory in a case when memory map is not available.
1782 The above restrictions are fine for this case, but will give
1783 incorrect results if the memory is 'patchy'. However, supporting
1784 'patchy' memory would require trying to read every single byte,
1785 and it seems unacceptable solution. Explicit memory map is
1786 recommended for this case -- and target_read_memory_robust will
1787 take care of reading multiple ranges then. */
1788
1789 static void
1790 read_whatever_is_readable (struct target_ops *ops,
1791 ULONGEST begin, ULONGEST end,
1792 VEC(memory_read_result_s) **result)
1793 {
1794 gdb_byte *buf = xmalloc (end - begin);
1795 ULONGEST current_begin = begin;
1796 ULONGEST current_end = end;
1797 int forward;
1798 memory_read_result_s r;
1799 ULONGEST xfered_len;
1800
1801 /* If we previously failed to read 1 byte, nothing can be done here. */
1802 if (end - begin <= 1)
1803 {
1804 xfree (buf);
1805 return;
1806 }
1807
1808 /* Check that either first or the last byte is readable, and give up
1809 if not. This heuristic is meant to permit reading accessible memory
1810 at the boundary of accessible region. */
1811 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1812 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
1813 {
1814 forward = 1;
1815 ++current_begin;
1816 }
1817 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1818 buf + (end-begin) - 1, end - 1, 1,
1819 &xfered_len) == TARGET_XFER_OK)
1820 {
1821 forward = 0;
1822 --current_end;
1823 }
1824 else
1825 {
1826 xfree (buf);
1827 return;
1828 }
1829
1830 /* Loop invariant is that the [current_begin, current_end) was previously
1831 found to be not readable as a whole.
1832
1833 Note loop condition -- if the range has 1 byte, we can't divide the range
1834 so there's no point trying further. */
1835 while (current_end - current_begin > 1)
1836 {
1837 ULONGEST first_half_begin, first_half_end;
1838 ULONGEST second_half_begin, second_half_end;
1839 LONGEST xfer;
1840 ULONGEST middle = current_begin + (current_end - current_begin)/2;
1841
1842 if (forward)
1843 {
1844 first_half_begin = current_begin;
1845 first_half_end = middle;
1846 second_half_begin = middle;
1847 second_half_end = current_end;
1848 }
1849 else
1850 {
1851 first_half_begin = middle;
1852 first_half_end = current_end;
1853 second_half_begin = current_begin;
1854 second_half_end = middle;
1855 }
1856
1857 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1858 buf + (first_half_begin - begin),
1859 first_half_begin,
1860 first_half_end - first_half_begin);
1861
1862 if (xfer == first_half_end - first_half_begin)
1863 {
1864 /* This half reads up fine. So, the error must be in the
1865 other half. */
1866 current_begin = second_half_begin;
1867 current_end = second_half_end;
1868 }
1869 else
1870 {
1871 /* This half is not readable. Because we've tried one byte, we
1872 know some part of this half if actually redable. Go to the next
1873 iteration to divide again and try to read.
1874
1875 We don't handle the other half, because this function only tries
1876 to read a single readable subrange. */
1877 current_begin = first_half_begin;
1878 current_end = first_half_end;
1879 }
1880 }
1881
1882 if (forward)
1883 {
1884 /* The [begin, current_begin) range has been read. */
1885 r.begin = begin;
1886 r.end = current_begin;
1887 r.data = buf;
1888 }
1889 else
1890 {
1891 /* The [current_end, end) range has been read. */
1892 LONGEST rlen = end - current_end;
1893
1894 r.data = xmalloc (rlen);
1895 memcpy (r.data, buf + current_end - begin, rlen);
1896 r.begin = current_end;
1897 r.end = end;
1898 xfree (buf);
1899 }
1900 VEC_safe_push(memory_read_result_s, (*result), &r);
1901 }
1902
1903 void
1904 free_memory_read_result_vector (void *x)
1905 {
1906 VEC(memory_read_result_s) *v = x;
1907 memory_read_result_s *current;
1908 int ix;
1909
1910 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
1911 {
1912 xfree (current->data);
1913 }
1914 VEC_free (memory_read_result_s, v);
1915 }
1916
1917 VEC(memory_read_result_s) *
1918 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
1919 {
1920 VEC(memory_read_result_s) *result = 0;
1921
1922 LONGEST xfered = 0;
1923 while (xfered < len)
1924 {
1925 struct mem_region *region = lookup_mem_region (offset + xfered);
1926 LONGEST rlen;
1927
1928 /* If there is no explicit region, a fake one should be created. */
1929 gdb_assert (region);
1930
1931 if (region->hi == 0)
1932 rlen = len - xfered;
1933 else
1934 rlen = region->hi - offset;
1935
1936 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
1937 {
1938 /* Cannot read this region. Note that we can end up here only
1939 if the region is explicitly marked inaccessible, or
1940 'inaccessible-by-default' is in effect. */
1941 xfered += rlen;
1942 }
1943 else
1944 {
1945 LONGEST to_read = min (len - xfered, rlen);
1946 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
1947
1948 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1949 (gdb_byte *) buffer,
1950 offset + xfered, to_read);
1951 /* Call an observer, notifying them of the xfer progress? */
1952 if (xfer <= 0)
1953 {
1954 /* Got an error reading full chunk. See if maybe we can read
1955 some subrange. */
1956 xfree (buffer);
1957 read_whatever_is_readable (ops, offset + xfered,
1958 offset + xfered + to_read, &result);
1959 xfered += to_read;
1960 }
1961 else
1962 {
1963 struct memory_read_result r;
1964 r.data = buffer;
1965 r.begin = offset + xfered;
1966 r.end = r.begin + xfer;
1967 VEC_safe_push (memory_read_result_s, result, &r);
1968 xfered += xfer;
1969 }
1970 QUIT;
1971 }
1972 }
1973 return result;
1974 }
1975
1976
1977 /* An alternative to target_write with progress callbacks. */
1978
1979 LONGEST
1980 target_write_with_progress (struct target_ops *ops,
1981 enum target_object object,
1982 const char *annex, const gdb_byte *buf,
1983 ULONGEST offset, LONGEST len,
1984 void (*progress) (ULONGEST, void *), void *baton)
1985 {
1986 LONGEST xfered = 0;
1987
1988 /* Give the progress callback a chance to set up. */
1989 if (progress)
1990 (*progress) (0, baton);
1991
1992 while (xfered < len)
1993 {
1994 ULONGEST xfered_len;
1995 enum target_xfer_status status;
1996
1997 status = target_write_partial (ops, object, annex,
1998 (gdb_byte *) buf + xfered,
1999 offset + xfered, len - xfered,
2000 &xfered_len);
2001
2002 if (status == TARGET_XFER_EOF)
2003 return xfered;
2004 if (TARGET_XFER_STATUS_ERROR_P (status))
2005 return -1;
2006
2007 gdb_assert (status == TARGET_XFER_OK);
2008 if (progress)
2009 (*progress) (xfered_len, baton);
2010
2011 xfered += xfered_len;
2012 QUIT;
2013 }
2014 return len;
2015 }
2016
2017 /* For docs on target_write see target.h. */
2018
2019 LONGEST
2020 target_write (struct target_ops *ops,
2021 enum target_object object,
2022 const char *annex, const gdb_byte *buf,
2023 ULONGEST offset, LONGEST len)
2024 {
2025 return target_write_with_progress (ops, object, annex, buf, offset, len,
2026 NULL, NULL);
2027 }
2028
2029 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2030 the size of the transferred data. PADDING additional bytes are
2031 available in *BUF_P. This is a helper function for
2032 target_read_alloc; see the declaration of that function for more
2033 information. */
2034
2035 static LONGEST
2036 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2037 const char *annex, gdb_byte **buf_p, int padding)
2038 {
2039 size_t buf_alloc, buf_pos;
2040 gdb_byte *buf;
2041
2042 /* This function does not have a length parameter; it reads the
2043 entire OBJECT). Also, it doesn't support objects fetched partly
2044 from one target and partly from another (in a different stratum,
2045 e.g. a core file and an executable). Both reasons make it
2046 unsuitable for reading memory. */
2047 gdb_assert (object != TARGET_OBJECT_MEMORY);
2048
2049 /* Start by reading up to 4K at a time. The target will throttle
2050 this number down if necessary. */
2051 buf_alloc = 4096;
2052 buf = xmalloc (buf_alloc);
2053 buf_pos = 0;
2054 while (1)
2055 {
2056 ULONGEST xfered_len;
2057 enum target_xfer_status status;
2058
2059 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2060 buf_pos, buf_alloc - buf_pos - padding,
2061 &xfered_len);
2062
2063 if (status == TARGET_XFER_EOF)
2064 {
2065 /* Read all there was. */
2066 if (buf_pos == 0)
2067 xfree (buf);
2068 else
2069 *buf_p = buf;
2070 return buf_pos;
2071 }
2072 else if (status != TARGET_XFER_OK)
2073 {
2074 /* An error occurred. */
2075 xfree (buf);
2076 return TARGET_XFER_E_IO;
2077 }
2078
2079 buf_pos += xfered_len;
2080
2081 /* If the buffer is filling up, expand it. */
2082 if (buf_alloc < buf_pos * 2)
2083 {
2084 buf_alloc *= 2;
2085 buf = xrealloc (buf, buf_alloc);
2086 }
2087
2088 QUIT;
2089 }
2090 }
2091
2092 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2093 the size of the transferred data. See the declaration in "target.h"
2094 function for more information about the return value. */
2095
2096 LONGEST
2097 target_read_alloc (struct target_ops *ops, enum target_object object,
2098 const char *annex, gdb_byte **buf_p)
2099 {
2100 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2101 }
2102
2103 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2104 returned as a string, allocated using xmalloc. If an error occurs
2105 or the transfer is unsupported, NULL is returned. Empty objects
2106 are returned as allocated but empty strings. A warning is issued
2107 if the result contains any embedded NUL bytes. */
2108
2109 char *
2110 target_read_stralloc (struct target_ops *ops, enum target_object object,
2111 const char *annex)
2112 {
2113 gdb_byte *buffer;
2114 char *bufstr;
2115 LONGEST i, transferred;
2116
2117 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2118 bufstr = (char *) buffer;
2119
2120 if (transferred < 0)
2121 return NULL;
2122
2123 if (transferred == 0)
2124 return xstrdup ("");
2125
2126 bufstr[transferred] = 0;
2127
2128 /* Check for embedded NUL bytes; but allow trailing NULs. */
2129 for (i = strlen (bufstr); i < transferred; i++)
2130 if (bufstr[i] != 0)
2131 {
2132 warning (_("target object %d, annex %s, "
2133 "contained unexpected null characters"),
2134 (int) object, annex ? annex : "(none)");
2135 break;
2136 }
2137
2138 return bufstr;
2139 }
2140
2141 /* Memory transfer methods. */
2142
2143 void
2144 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2145 LONGEST len)
2146 {
2147 /* This method is used to read from an alternate, non-current
2148 target. This read must bypass the overlay support (as symbols
2149 don't match this target), and GDB's internal cache (wrong cache
2150 for this target). */
2151 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2152 != len)
2153 memory_error (TARGET_XFER_E_IO, addr);
2154 }
2155
2156 ULONGEST
2157 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2158 int len, enum bfd_endian byte_order)
2159 {
2160 gdb_byte buf[sizeof (ULONGEST)];
2161
2162 gdb_assert (len <= sizeof (buf));
2163 get_target_memory (ops, addr, buf, len);
2164 return extract_unsigned_integer (buf, len, byte_order);
2165 }
2166
2167 /* See target.h. */
2168
2169 int
2170 target_insert_breakpoint (struct gdbarch *gdbarch,
2171 struct bp_target_info *bp_tgt)
2172 {
2173 if (!may_insert_breakpoints)
2174 {
2175 warning (_("May not insert breakpoints"));
2176 return 1;
2177 }
2178
2179 return current_target.to_insert_breakpoint (&current_target,
2180 gdbarch, bp_tgt);
2181 }
2182
2183 /* See target.h. */
2184
2185 int
2186 target_remove_breakpoint (struct gdbarch *gdbarch,
2187 struct bp_target_info *bp_tgt)
2188 {
2189 /* This is kind of a weird case to handle, but the permission might
2190 have been changed after breakpoints were inserted - in which case
2191 we should just take the user literally and assume that any
2192 breakpoints should be left in place. */
2193 if (!may_insert_breakpoints)
2194 {
2195 warning (_("May not remove breakpoints"));
2196 return 1;
2197 }
2198
2199 return current_target.to_remove_breakpoint (&current_target,
2200 gdbarch, bp_tgt);
2201 }
2202
2203 static void
2204 target_info (char *args, int from_tty)
2205 {
2206 struct target_ops *t;
2207 int has_all_mem = 0;
2208
2209 if (symfile_objfile != NULL)
2210 printf_unfiltered (_("Symbols from \"%s\".\n"),
2211 objfile_name (symfile_objfile));
2212
2213 for (t = target_stack; t != NULL; t = t->beneath)
2214 {
2215 if (!(*t->to_has_memory) (t))
2216 continue;
2217
2218 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2219 continue;
2220 if (has_all_mem)
2221 printf_unfiltered (_("\tWhile running this, "
2222 "GDB does not access memory from...\n"));
2223 printf_unfiltered ("%s:\n", t->to_longname);
2224 (t->to_files_info) (t);
2225 has_all_mem = (*t->to_has_all_memory) (t);
2226 }
2227 }
2228
2229 /* This function is called before any new inferior is created, e.g.
2230 by running a program, attaching, or connecting to a target.
2231 It cleans up any state from previous invocations which might
2232 change between runs. This is a subset of what target_preopen
2233 resets (things which might change between targets). */
2234
2235 void
2236 target_pre_inferior (int from_tty)
2237 {
2238 /* Clear out solib state. Otherwise the solib state of the previous
2239 inferior might have survived and is entirely wrong for the new
2240 target. This has been observed on GNU/Linux using glibc 2.3. How
2241 to reproduce:
2242
2243 bash$ ./foo&
2244 [1] 4711
2245 bash$ ./foo&
2246 [1] 4712
2247 bash$ gdb ./foo
2248 [...]
2249 (gdb) attach 4711
2250 (gdb) detach
2251 (gdb) attach 4712
2252 Cannot access memory at address 0xdeadbeef
2253 */
2254
2255 /* In some OSs, the shared library list is the same/global/shared
2256 across inferiors. If code is shared between processes, so are
2257 memory regions and features. */
2258 if (!gdbarch_has_global_solist (target_gdbarch ()))
2259 {
2260 no_shared_libraries (NULL, from_tty);
2261
2262 invalidate_target_mem_regions ();
2263
2264 target_clear_description ();
2265 }
2266
2267 agent_capability_invalidate ();
2268 }
2269
2270 /* Callback for iterate_over_inferiors. Gets rid of the given
2271 inferior. */
2272
2273 static int
2274 dispose_inferior (struct inferior *inf, void *args)
2275 {
2276 struct thread_info *thread;
2277
2278 thread = any_thread_of_process (inf->pid);
2279 if (thread)
2280 {
2281 switch_to_thread (thread->ptid);
2282
2283 /* Core inferiors actually should be detached, not killed. */
2284 if (target_has_execution)
2285 target_kill ();
2286 else
2287 target_detach (NULL, 0);
2288 }
2289
2290 return 0;
2291 }
2292
2293 /* This is to be called by the open routine before it does
2294 anything. */
2295
2296 void
2297 target_preopen (int from_tty)
2298 {
2299 dont_repeat ();
2300
2301 if (have_inferiors ())
2302 {
2303 if (!from_tty
2304 || !have_live_inferiors ()
2305 || query (_("A program is being debugged already. Kill it? ")))
2306 iterate_over_inferiors (dispose_inferior, NULL);
2307 else
2308 error (_("Program not killed."));
2309 }
2310
2311 /* Calling target_kill may remove the target from the stack. But if
2312 it doesn't (which seems like a win for UDI), remove it now. */
2313 /* Leave the exec target, though. The user may be switching from a
2314 live process to a core of the same program. */
2315 pop_all_targets_above (file_stratum);
2316
2317 target_pre_inferior (from_tty);
2318 }
2319
2320 /* Detach a target after doing deferred register stores. */
2321
2322 void
2323 target_detach (const char *args, int from_tty)
2324 {
2325 struct target_ops* t;
2326
2327 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2328 /* Don't remove global breakpoints here. They're removed on
2329 disconnection from the target. */
2330 ;
2331 else
2332 /* If we're in breakpoints-always-inserted mode, have to remove
2333 them before detaching. */
2334 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2335
2336 prepare_for_detach ();
2337
2338 current_target.to_detach (&current_target, args, from_tty);
2339 if (targetdebug)
2340 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2341 args, from_tty);
2342 }
2343
2344 void
2345 target_disconnect (char *args, int from_tty)
2346 {
2347 /* If we're in breakpoints-always-inserted mode or if breakpoints
2348 are global across processes, we have to remove them before
2349 disconnecting. */
2350 remove_breakpoints ();
2351
2352 if (targetdebug)
2353 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2354 args, from_tty);
2355 current_target.to_disconnect (&current_target, args, from_tty);
2356 }
2357
2358 ptid_t
2359 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2360 {
2361 struct target_ops *t;
2362 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2363 status, options);
2364
2365 if (targetdebug)
2366 {
2367 char *status_string;
2368 char *options_string;
2369
2370 status_string = target_waitstatus_to_string (status);
2371 options_string = target_options_to_string (options);
2372 fprintf_unfiltered (gdb_stdlog,
2373 "target_wait (%d, status, options={%s})"
2374 " = %d, %s\n",
2375 ptid_get_pid (ptid), options_string,
2376 ptid_get_pid (retval), status_string);
2377 xfree (status_string);
2378 xfree (options_string);
2379 }
2380
2381 return retval;
2382 }
2383
2384 char *
2385 target_pid_to_str (ptid_t ptid)
2386 {
2387 return (*current_target.to_pid_to_str) (&current_target, ptid);
2388 }
2389
2390 char *
2391 target_thread_name (struct thread_info *info)
2392 {
2393 return current_target.to_thread_name (&current_target, info);
2394 }
2395
2396 void
2397 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2398 {
2399 struct target_ops *t;
2400
2401 target_dcache_invalidate ();
2402
2403 current_target.to_resume (&current_target, ptid, step, signal);
2404 if (targetdebug)
2405 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2406 ptid_get_pid (ptid),
2407 step ? "step" : "continue",
2408 gdb_signal_to_name (signal));
2409
2410 registers_changed_ptid (ptid);
2411 set_executing (ptid, 1);
2412 set_running (ptid, 1);
2413 clear_inline_frame_state (ptid);
2414 }
2415
2416 void
2417 target_pass_signals (int numsigs, unsigned char *pass_signals)
2418 {
2419 if (targetdebug)
2420 {
2421 int i;
2422
2423 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2424 numsigs);
2425
2426 for (i = 0; i < numsigs; i++)
2427 if (pass_signals[i])
2428 fprintf_unfiltered (gdb_stdlog, " %s",
2429 gdb_signal_to_name (i));
2430
2431 fprintf_unfiltered (gdb_stdlog, " })\n");
2432 }
2433
2434 (*current_target.to_pass_signals) (&current_target, numsigs, pass_signals);
2435 }
2436
2437 void
2438 target_program_signals (int numsigs, unsigned char *program_signals)
2439 {
2440 if (targetdebug)
2441 {
2442 int i;
2443
2444 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2445 numsigs);
2446
2447 for (i = 0; i < numsigs; i++)
2448 if (program_signals[i])
2449 fprintf_unfiltered (gdb_stdlog, " %s",
2450 gdb_signal_to_name (i));
2451
2452 fprintf_unfiltered (gdb_stdlog, " })\n");
2453 }
2454
2455 (*current_target.to_program_signals) (&current_target,
2456 numsigs, program_signals);
2457 }
2458
2459 static int
2460 default_follow_fork (struct target_ops *self, int follow_child,
2461 int detach_fork)
2462 {
2463 /* Some target returned a fork event, but did not know how to follow it. */
2464 internal_error (__FILE__, __LINE__,
2465 _("could not find a target to follow fork"));
2466 }
2467
2468 /* Look through the list of possible targets for a target that can
2469 follow forks. */
2470
2471 int
2472 target_follow_fork (int follow_child, int detach_fork)
2473 {
2474 int retval = current_target.to_follow_fork (&current_target,
2475 follow_child, detach_fork);
2476
2477 if (targetdebug)
2478 fprintf_unfiltered (gdb_stdlog,
2479 "target_follow_fork (%d, %d) = %d\n",
2480 follow_child, detach_fork, retval);
2481 return retval;
2482 }
2483
2484 static void
2485 default_mourn_inferior (struct target_ops *self)
2486 {
2487 internal_error (__FILE__, __LINE__,
2488 _("could not find a target to follow mourn inferior"));
2489 }
2490
2491 void
2492 target_mourn_inferior (void)
2493 {
2494 current_target.to_mourn_inferior (&current_target);
2495 if (targetdebug)
2496 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2497
2498 /* We no longer need to keep handles on any of the object files.
2499 Make sure to release them to avoid unnecessarily locking any
2500 of them while we're not actually debugging. */
2501 bfd_cache_close_all ();
2502 }
2503
2504 /* Look for a target which can describe architectural features, starting
2505 from TARGET. If we find one, return its description. */
2506
2507 const struct target_desc *
2508 target_read_description (struct target_ops *target)
2509 {
2510 return target->to_read_description (target);
2511 }
2512
2513 /* This implements a basic search of memory, reading target memory and
2514 performing the search here (as opposed to performing the search in on the
2515 target side with, for example, gdbserver). */
2516
2517 int
2518 simple_search_memory (struct target_ops *ops,
2519 CORE_ADDR start_addr, ULONGEST search_space_len,
2520 const gdb_byte *pattern, ULONGEST pattern_len,
2521 CORE_ADDR *found_addrp)
2522 {
2523 /* NOTE: also defined in find.c testcase. */
2524 #define SEARCH_CHUNK_SIZE 16000
2525 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2526 /* Buffer to hold memory contents for searching. */
2527 gdb_byte *search_buf;
2528 unsigned search_buf_size;
2529 struct cleanup *old_cleanups;
2530
2531 search_buf_size = chunk_size + pattern_len - 1;
2532
2533 /* No point in trying to allocate a buffer larger than the search space. */
2534 if (search_space_len < search_buf_size)
2535 search_buf_size = search_space_len;
2536
2537 search_buf = malloc (search_buf_size);
2538 if (search_buf == NULL)
2539 error (_("Unable to allocate memory to perform the search."));
2540 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2541
2542 /* Prime the search buffer. */
2543
2544 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2545 search_buf, start_addr, search_buf_size) != search_buf_size)
2546 {
2547 warning (_("Unable to access %s bytes of target "
2548 "memory at %s, halting search."),
2549 pulongest (search_buf_size), hex_string (start_addr));
2550 do_cleanups (old_cleanups);
2551 return -1;
2552 }
2553
2554 /* Perform the search.
2555
2556 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2557 When we've scanned N bytes we copy the trailing bytes to the start and
2558 read in another N bytes. */
2559
2560 while (search_space_len >= pattern_len)
2561 {
2562 gdb_byte *found_ptr;
2563 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2564
2565 found_ptr = memmem (search_buf, nr_search_bytes,
2566 pattern, pattern_len);
2567
2568 if (found_ptr != NULL)
2569 {
2570 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2571
2572 *found_addrp = found_addr;
2573 do_cleanups (old_cleanups);
2574 return 1;
2575 }
2576
2577 /* Not found in this chunk, skip to next chunk. */
2578
2579 /* Don't let search_space_len wrap here, it's unsigned. */
2580 if (search_space_len >= chunk_size)
2581 search_space_len -= chunk_size;
2582 else
2583 search_space_len = 0;
2584
2585 if (search_space_len >= pattern_len)
2586 {
2587 unsigned keep_len = search_buf_size - chunk_size;
2588 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2589 int nr_to_read;
2590
2591 /* Copy the trailing part of the previous iteration to the front
2592 of the buffer for the next iteration. */
2593 gdb_assert (keep_len == pattern_len - 1);
2594 memcpy (search_buf, search_buf + chunk_size, keep_len);
2595
2596 nr_to_read = min (search_space_len - keep_len, chunk_size);
2597
2598 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2599 search_buf + keep_len, read_addr,
2600 nr_to_read) != nr_to_read)
2601 {
2602 warning (_("Unable to access %s bytes of target "
2603 "memory at %s, halting search."),
2604 plongest (nr_to_read),
2605 hex_string (read_addr));
2606 do_cleanups (old_cleanups);
2607 return -1;
2608 }
2609
2610 start_addr += chunk_size;
2611 }
2612 }
2613
2614 /* Not found. */
2615
2616 do_cleanups (old_cleanups);
2617 return 0;
2618 }
2619
2620 /* Default implementation of memory-searching. */
2621
2622 static int
2623 default_search_memory (struct target_ops *self,
2624 CORE_ADDR start_addr, ULONGEST search_space_len,
2625 const gdb_byte *pattern, ULONGEST pattern_len,
2626 CORE_ADDR *found_addrp)
2627 {
2628 /* Start over from the top of the target stack. */
2629 return simple_search_memory (current_target.beneath,
2630 start_addr, search_space_len,
2631 pattern, pattern_len, found_addrp);
2632 }
2633
2634 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2635 sequence of bytes in PATTERN with length PATTERN_LEN.
2636
2637 The result is 1 if found, 0 if not found, and -1 if there was an error
2638 requiring halting of the search (e.g. memory read error).
2639 If the pattern is found the address is recorded in FOUND_ADDRP. */
2640
2641 int
2642 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2643 const gdb_byte *pattern, ULONGEST pattern_len,
2644 CORE_ADDR *found_addrp)
2645 {
2646 int found;
2647
2648 if (targetdebug)
2649 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2650 hex_string (start_addr));
2651
2652 found = current_target.to_search_memory (&current_target, start_addr,
2653 search_space_len,
2654 pattern, pattern_len, found_addrp);
2655
2656 if (targetdebug)
2657 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2658
2659 return found;
2660 }
2661
2662 /* Look through the currently pushed targets. If none of them will
2663 be able to restart the currently running process, issue an error
2664 message. */
2665
2666 void
2667 target_require_runnable (void)
2668 {
2669 struct target_ops *t;
2670
2671 for (t = target_stack; t != NULL; t = t->beneath)
2672 {
2673 /* If this target knows how to create a new program, then
2674 assume we will still be able to after killing the current
2675 one. Either killing and mourning will not pop T, or else
2676 find_default_run_target will find it again. */
2677 if (t->to_create_inferior != NULL)
2678 return;
2679
2680 /* Do not worry about thread_stratum targets that can not
2681 create inferiors. Assume they will be pushed again if
2682 necessary, and continue to the process_stratum. */
2683 if (t->to_stratum == thread_stratum
2684 || t->to_stratum == arch_stratum)
2685 continue;
2686
2687 error (_("The \"%s\" target does not support \"run\". "
2688 "Try \"help target\" or \"continue\"."),
2689 t->to_shortname);
2690 }
2691
2692 /* This function is only called if the target is running. In that
2693 case there should have been a process_stratum target and it
2694 should either know how to create inferiors, or not... */
2695 internal_error (__FILE__, __LINE__, _("No targets found"));
2696 }
2697
2698 /* Look through the list of possible targets for a target that can
2699 execute a run or attach command without any other data. This is
2700 used to locate the default process stratum.
2701
2702 If DO_MESG is not NULL, the result is always valid (error() is
2703 called for errors); else, return NULL on error. */
2704
2705 static struct target_ops *
2706 find_default_run_target (char *do_mesg)
2707 {
2708 struct target_ops **t;
2709 struct target_ops *runable = NULL;
2710 int count;
2711
2712 count = 0;
2713
2714 for (t = target_structs; t < target_structs + target_struct_size;
2715 ++t)
2716 {
2717 if ((*t)->to_can_run != delegate_can_run && target_can_run (*t))
2718 {
2719 runable = *t;
2720 ++count;
2721 }
2722 }
2723
2724 if (count != 1)
2725 {
2726 if (do_mesg)
2727 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2728 else
2729 return NULL;
2730 }
2731
2732 return runable;
2733 }
2734
2735 void
2736 find_default_attach (struct target_ops *ops, char *args, int from_tty)
2737 {
2738 struct target_ops *t;
2739
2740 t = find_default_run_target ("attach");
2741 (t->to_attach) (t, args, from_tty);
2742 return;
2743 }
2744
2745 void
2746 find_default_create_inferior (struct target_ops *ops,
2747 char *exec_file, char *allargs, char **env,
2748 int from_tty)
2749 {
2750 struct target_ops *t;
2751
2752 t = find_default_run_target ("run");
2753 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
2754 return;
2755 }
2756
2757 static int
2758 find_default_can_async_p (struct target_ops *ignore)
2759 {
2760 struct target_ops *t;
2761
2762 /* This may be called before the target is pushed on the stack;
2763 look for the default process stratum. If there's none, gdb isn't
2764 configured with a native debugger, and target remote isn't
2765 connected yet. */
2766 t = find_default_run_target (NULL);
2767 if (t && t->to_can_async_p != delegate_can_async_p)
2768 return (t->to_can_async_p) (t);
2769 return 0;
2770 }
2771
2772 static int
2773 find_default_is_async_p (struct target_ops *ignore)
2774 {
2775 struct target_ops *t;
2776
2777 /* This may be called before the target is pushed on the stack;
2778 look for the default process stratum. If there's none, gdb isn't
2779 configured with a native debugger, and target remote isn't
2780 connected yet. */
2781 t = find_default_run_target (NULL);
2782 if (t && t->to_is_async_p != delegate_is_async_p)
2783 return (t->to_is_async_p) (t);
2784 return 0;
2785 }
2786
2787 static int
2788 find_default_supports_non_stop (struct target_ops *self)
2789 {
2790 struct target_ops *t;
2791
2792 t = find_default_run_target (NULL);
2793 if (t && t->to_supports_non_stop)
2794 return (t->to_supports_non_stop) (t);
2795 return 0;
2796 }
2797
2798 int
2799 target_supports_non_stop (void)
2800 {
2801 struct target_ops *t;
2802
2803 for (t = &current_target; t != NULL; t = t->beneath)
2804 if (t->to_supports_non_stop)
2805 return t->to_supports_non_stop (t);
2806
2807 return 0;
2808 }
2809
2810 /* Implement the "info proc" command. */
2811
2812 int
2813 target_info_proc (char *args, enum info_proc_what what)
2814 {
2815 struct target_ops *t;
2816
2817 /* If we're already connected to something that can get us OS
2818 related data, use it. Otherwise, try using the native
2819 target. */
2820 if (current_target.to_stratum >= process_stratum)
2821 t = current_target.beneath;
2822 else
2823 t = find_default_run_target (NULL);
2824
2825 for (; t != NULL; t = t->beneath)
2826 {
2827 if (t->to_info_proc != NULL)
2828 {
2829 t->to_info_proc (t, args, what);
2830
2831 if (targetdebug)
2832 fprintf_unfiltered (gdb_stdlog,
2833 "target_info_proc (\"%s\", %d)\n", args, what);
2834
2835 return 1;
2836 }
2837 }
2838
2839 return 0;
2840 }
2841
2842 static int
2843 find_default_supports_disable_randomization (struct target_ops *self)
2844 {
2845 struct target_ops *t;
2846
2847 t = find_default_run_target (NULL);
2848 if (t && t->to_supports_disable_randomization)
2849 return (t->to_supports_disable_randomization) (t);
2850 return 0;
2851 }
2852
2853 int
2854 target_supports_disable_randomization (void)
2855 {
2856 struct target_ops *t;
2857
2858 for (t = &current_target; t != NULL; t = t->beneath)
2859 if (t->to_supports_disable_randomization)
2860 return t->to_supports_disable_randomization (t);
2861
2862 return 0;
2863 }
2864
2865 char *
2866 target_get_osdata (const char *type)
2867 {
2868 struct target_ops *t;
2869
2870 /* If we're already connected to something that can get us OS
2871 related data, use it. Otherwise, try using the native
2872 target. */
2873 if (current_target.to_stratum >= process_stratum)
2874 t = current_target.beneath;
2875 else
2876 t = find_default_run_target ("get OS data");
2877
2878 if (!t)
2879 return NULL;
2880
2881 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
2882 }
2883
2884 /* Determine the current address space of thread PTID. */
2885
2886 struct address_space *
2887 target_thread_address_space (ptid_t ptid)
2888 {
2889 struct address_space *aspace;
2890 struct inferior *inf;
2891 struct target_ops *t;
2892
2893 for (t = current_target.beneath; t != NULL; t = t->beneath)
2894 {
2895 if (t->to_thread_address_space != NULL)
2896 {
2897 aspace = t->to_thread_address_space (t, ptid);
2898 gdb_assert (aspace);
2899
2900 if (targetdebug)
2901 fprintf_unfiltered (gdb_stdlog,
2902 "target_thread_address_space (%s) = %d\n",
2903 target_pid_to_str (ptid),
2904 address_space_num (aspace));
2905 return aspace;
2906 }
2907 }
2908
2909 /* Fall-back to the "main" address space of the inferior. */
2910 inf = find_inferior_pid (ptid_get_pid (ptid));
2911
2912 if (inf == NULL || inf->aspace == NULL)
2913 internal_error (__FILE__, __LINE__,
2914 _("Can't determine the current "
2915 "address space of thread %s\n"),
2916 target_pid_to_str (ptid));
2917
2918 return inf->aspace;
2919 }
2920
2921
2922 /* Target file operations. */
2923
2924 static struct target_ops *
2925 default_fileio_target (void)
2926 {
2927 /* If we're already connected to something that can perform
2928 file I/O, use it. Otherwise, try using the native target. */
2929 if (current_target.to_stratum >= process_stratum)
2930 return current_target.beneath;
2931 else
2932 return find_default_run_target ("file I/O");
2933 }
2934
2935 /* Open FILENAME on the target, using FLAGS and MODE. Return a
2936 target file descriptor, or -1 if an error occurs (and set
2937 *TARGET_ERRNO). */
2938 int
2939 target_fileio_open (const char *filename, int flags, int mode,
2940 int *target_errno)
2941 {
2942 struct target_ops *t;
2943
2944 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2945 {
2946 if (t->to_fileio_open != NULL)
2947 {
2948 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
2949
2950 if (targetdebug)
2951 fprintf_unfiltered (gdb_stdlog,
2952 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
2953 filename, flags, mode,
2954 fd, fd != -1 ? 0 : *target_errno);
2955 return fd;
2956 }
2957 }
2958
2959 *target_errno = FILEIO_ENOSYS;
2960 return -1;
2961 }
2962
2963 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
2964 Return the number of bytes written, or -1 if an error occurs
2965 (and set *TARGET_ERRNO). */
2966 int
2967 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
2968 ULONGEST offset, int *target_errno)
2969 {
2970 struct target_ops *t;
2971
2972 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2973 {
2974 if (t->to_fileio_pwrite != NULL)
2975 {
2976 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
2977 target_errno);
2978
2979 if (targetdebug)
2980 fprintf_unfiltered (gdb_stdlog,
2981 "target_fileio_pwrite (%d,...,%d,%s) "
2982 "= %d (%d)\n",
2983 fd, len, pulongest (offset),
2984 ret, ret != -1 ? 0 : *target_errno);
2985 return ret;
2986 }
2987 }
2988
2989 *target_errno = FILEIO_ENOSYS;
2990 return -1;
2991 }
2992
2993 /* Read up to LEN bytes FD on the target into READ_BUF.
2994 Return the number of bytes read, or -1 if an error occurs
2995 (and set *TARGET_ERRNO). */
2996 int
2997 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
2998 ULONGEST offset, int *target_errno)
2999 {
3000 struct target_ops *t;
3001
3002 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3003 {
3004 if (t->to_fileio_pread != NULL)
3005 {
3006 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3007 target_errno);
3008
3009 if (targetdebug)
3010 fprintf_unfiltered (gdb_stdlog,
3011 "target_fileio_pread (%d,...,%d,%s) "
3012 "= %d (%d)\n",
3013 fd, len, pulongest (offset),
3014 ret, ret != -1 ? 0 : *target_errno);
3015 return ret;
3016 }
3017 }
3018
3019 *target_errno = FILEIO_ENOSYS;
3020 return -1;
3021 }
3022
3023 /* Close FD on the target. Return 0, or -1 if an error occurs
3024 (and set *TARGET_ERRNO). */
3025 int
3026 target_fileio_close (int fd, int *target_errno)
3027 {
3028 struct target_ops *t;
3029
3030 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3031 {
3032 if (t->to_fileio_close != NULL)
3033 {
3034 int ret = t->to_fileio_close (t, fd, target_errno);
3035
3036 if (targetdebug)
3037 fprintf_unfiltered (gdb_stdlog,
3038 "target_fileio_close (%d) = %d (%d)\n",
3039 fd, ret, ret != -1 ? 0 : *target_errno);
3040 return ret;
3041 }
3042 }
3043
3044 *target_errno = FILEIO_ENOSYS;
3045 return -1;
3046 }
3047
3048 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3049 occurs (and set *TARGET_ERRNO). */
3050 int
3051 target_fileio_unlink (const char *filename, int *target_errno)
3052 {
3053 struct target_ops *t;
3054
3055 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3056 {
3057 if (t->to_fileio_unlink != NULL)
3058 {
3059 int ret = t->to_fileio_unlink (t, filename, target_errno);
3060
3061 if (targetdebug)
3062 fprintf_unfiltered (gdb_stdlog,
3063 "target_fileio_unlink (%s) = %d (%d)\n",
3064 filename, ret, ret != -1 ? 0 : *target_errno);
3065 return ret;
3066 }
3067 }
3068
3069 *target_errno = FILEIO_ENOSYS;
3070 return -1;
3071 }
3072
3073 /* Read value of symbolic link FILENAME on the target. Return a
3074 null-terminated string allocated via xmalloc, or NULL if an error
3075 occurs (and set *TARGET_ERRNO). */
3076 char *
3077 target_fileio_readlink (const char *filename, int *target_errno)
3078 {
3079 struct target_ops *t;
3080
3081 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3082 {
3083 if (t->to_fileio_readlink != NULL)
3084 {
3085 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3086
3087 if (targetdebug)
3088 fprintf_unfiltered (gdb_stdlog,
3089 "target_fileio_readlink (%s) = %s (%d)\n",
3090 filename, ret? ret : "(nil)",
3091 ret? 0 : *target_errno);
3092 return ret;
3093 }
3094 }
3095
3096 *target_errno = FILEIO_ENOSYS;
3097 return NULL;
3098 }
3099
3100 static void
3101 target_fileio_close_cleanup (void *opaque)
3102 {
3103 int fd = *(int *) opaque;
3104 int target_errno;
3105
3106 target_fileio_close (fd, &target_errno);
3107 }
3108
3109 /* Read target file FILENAME. Store the result in *BUF_P and
3110 return the size of the transferred data. PADDING additional bytes are
3111 available in *BUF_P. This is a helper function for
3112 target_fileio_read_alloc; see the declaration of that function for more
3113 information. */
3114
3115 static LONGEST
3116 target_fileio_read_alloc_1 (const char *filename,
3117 gdb_byte **buf_p, int padding)
3118 {
3119 struct cleanup *close_cleanup;
3120 size_t buf_alloc, buf_pos;
3121 gdb_byte *buf;
3122 LONGEST n;
3123 int fd;
3124 int target_errno;
3125
3126 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3127 if (fd == -1)
3128 return -1;
3129
3130 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3131
3132 /* Start by reading up to 4K at a time. The target will throttle
3133 this number down if necessary. */
3134 buf_alloc = 4096;
3135 buf = xmalloc (buf_alloc);
3136 buf_pos = 0;
3137 while (1)
3138 {
3139 n = target_fileio_pread (fd, &buf[buf_pos],
3140 buf_alloc - buf_pos - padding, buf_pos,
3141 &target_errno);
3142 if (n < 0)
3143 {
3144 /* An error occurred. */
3145 do_cleanups (close_cleanup);
3146 xfree (buf);
3147 return -1;
3148 }
3149 else if (n == 0)
3150 {
3151 /* Read all there was. */
3152 do_cleanups (close_cleanup);
3153 if (buf_pos == 0)
3154 xfree (buf);
3155 else
3156 *buf_p = buf;
3157 return buf_pos;
3158 }
3159
3160 buf_pos += n;
3161
3162 /* If the buffer is filling up, expand it. */
3163 if (buf_alloc < buf_pos * 2)
3164 {
3165 buf_alloc *= 2;
3166 buf = xrealloc (buf, buf_alloc);
3167 }
3168
3169 QUIT;
3170 }
3171 }
3172
3173 /* Read target file FILENAME. Store the result in *BUF_P and return
3174 the size of the transferred data. See the declaration in "target.h"
3175 function for more information about the return value. */
3176
3177 LONGEST
3178 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3179 {
3180 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3181 }
3182
3183 /* Read target file FILENAME. The result is NUL-terminated and
3184 returned as a string, allocated using xmalloc. If an error occurs
3185 or the transfer is unsupported, NULL is returned. Empty objects
3186 are returned as allocated but empty strings. A warning is issued
3187 if the result contains any embedded NUL bytes. */
3188
3189 char *
3190 target_fileio_read_stralloc (const char *filename)
3191 {
3192 gdb_byte *buffer;
3193 char *bufstr;
3194 LONGEST i, transferred;
3195
3196 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3197 bufstr = (char *) buffer;
3198
3199 if (transferred < 0)
3200 return NULL;
3201
3202 if (transferred == 0)
3203 return xstrdup ("");
3204
3205 bufstr[transferred] = 0;
3206
3207 /* Check for embedded NUL bytes; but allow trailing NULs. */
3208 for (i = strlen (bufstr); i < transferred; i++)
3209 if (bufstr[i] != 0)
3210 {
3211 warning (_("target file %s "
3212 "contained unexpected null characters"),
3213 filename);
3214 break;
3215 }
3216
3217 return bufstr;
3218 }
3219
3220
3221 static int
3222 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3223 CORE_ADDR addr, int len)
3224 {
3225 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3226 }
3227
3228 static int
3229 default_watchpoint_addr_within_range (struct target_ops *target,
3230 CORE_ADDR addr,
3231 CORE_ADDR start, int length)
3232 {
3233 return addr >= start && addr < start + length;
3234 }
3235
3236 static struct gdbarch *
3237 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3238 {
3239 return target_gdbarch ();
3240 }
3241
3242 static int
3243 return_zero (struct target_ops *ignore)
3244 {
3245 return 0;
3246 }
3247
3248 static int
3249 return_zero_has_execution (struct target_ops *ignore, ptid_t ignore2)
3250 {
3251 return 0;
3252 }
3253
3254 /*
3255 * Find the next target down the stack from the specified target.
3256 */
3257
3258 struct target_ops *
3259 find_target_beneath (struct target_ops *t)
3260 {
3261 return t->beneath;
3262 }
3263
3264 /* See target.h. */
3265
3266 struct target_ops *
3267 find_target_at (enum strata stratum)
3268 {
3269 struct target_ops *t;
3270
3271 for (t = current_target.beneath; t != NULL; t = t->beneath)
3272 if (t->to_stratum == stratum)
3273 return t;
3274
3275 return NULL;
3276 }
3277
3278 \f
3279 /* The inferior process has died. Long live the inferior! */
3280
3281 void
3282 generic_mourn_inferior (void)
3283 {
3284 ptid_t ptid;
3285
3286 ptid = inferior_ptid;
3287 inferior_ptid = null_ptid;
3288
3289 /* Mark breakpoints uninserted in case something tries to delete a
3290 breakpoint while we delete the inferior's threads (which would
3291 fail, since the inferior is long gone). */
3292 mark_breakpoints_out ();
3293
3294 if (!ptid_equal (ptid, null_ptid))
3295 {
3296 int pid = ptid_get_pid (ptid);
3297 exit_inferior (pid);
3298 }
3299
3300 /* Note this wipes step-resume breakpoints, so needs to be done
3301 after exit_inferior, which ends up referencing the step-resume
3302 breakpoints through clear_thread_inferior_resources. */
3303 breakpoint_init_inferior (inf_exited);
3304
3305 registers_changed ();
3306
3307 reopen_exec_file ();
3308 reinit_frame_cache ();
3309
3310 if (deprecated_detach_hook)
3311 deprecated_detach_hook ();
3312 }
3313 \f
3314 /* Convert a normal process ID to a string. Returns the string in a
3315 static buffer. */
3316
3317 char *
3318 normal_pid_to_str (ptid_t ptid)
3319 {
3320 static char buf[32];
3321
3322 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3323 return buf;
3324 }
3325
3326 static char *
3327 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
3328 {
3329 return normal_pid_to_str (ptid);
3330 }
3331
3332 /* Error-catcher for target_find_memory_regions. */
3333 static int
3334 dummy_find_memory_regions (struct target_ops *self,
3335 find_memory_region_ftype ignore1, void *ignore2)
3336 {
3337 error (_("Command not implemented for this target."));
3338 return 0;
3339 }
3340
3341 /* Error-catcher for target_make_corefile_notes. */
3342 static char *
3343 dummy_make_corefile_notes (struct target_ops *self,
3344 bfd *ignore1, int *ignore2)
3345 {
3346 error (_("Command not implemented for this target."));
3347 return NULL;
3348 }
3349
3350 /* Set up the handful of non-empty slots needed by the dummy target
3351 vector. */
3352
3353 static void
3354 init_dummy_target (void)
3355 {
3356 dummy_target.to_shortname = "None";
3357 dummy_target.to_longname = "None";
3358 dummy_target.to_doc = "";
3359 dummy_target.to_create_inferior = find_default_create_inferior;
3360 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3361 dummy_target.to_supports_disable_randomization
3362 = find_default_supports_disable_randomization;
3363 dummy_target.to_stratum = dummy_stratum;
3364 dummy_target.to_has_all_memory = return_zero;
3365 dummy_target.to_has_memory = return_zero;
3366 dummy_target.to_has_stack = return_zero;
3367 dummy_target.to_has_registers = return_zero;
3368 dummy_target.to_has_execution = return_zero_has_execution;
3369 dummy_target.to_magic = OPS_MAGIC;
3370
3371 install_dummy_methods (&dummy_target);
3372 }
3373 \f
3374 static void
3375 debug_to_open (char *args, int from_tty)
3376 {
3377 debug_target.to_open (args, from_tty);
3378
3379 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3380 }
3381
3382 void
3383 target_close (struct target_ops *targ)
3384 {
3385 gdb_assert (!target_is_pushed (targ));
3386
3387 if (targ->to_xclose != NULL)
3388 targ->to_xclose (targ);
3389 else if (targ->to_close != NULL)
3390 targ->to_close (targ);
3391
3392 if (targetdebug)
3393 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3394 }
3395
3396 void
3397 target_attach (char *args, int from_tty)
3398 {
3399 current_target.to_attach (&current_target, args, from_tty);
3400 if (targetdebug)
3401 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3402 args, from_tty);
3403 }
3404
3405 int
3406 target_thread_alive (ptid_t ptid)
3407 {
3408 int retval;
3409
3410 retval = current_target.to_thread_alive (&current_target, ptid);
3411 if (targetdebug)
3412 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3413 ptid_get_pid (ptid), retval);
3414
3415 return retval;
3416 }
3417
3418 void
3419 target_find_new_threads (void)
3420 {
3421 current_target.to_find_new_threads (&current_target);
3422 if (targetdebug)
3423 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3424 }
3425
3426 void
3427 target_stop (ptid_t ptid)
3428 {
3429 if (!may_stop)
3430 {
3431 warning (_("May not interrupt or stop the target, ignoring attempt"));
3432 return;
3433 }
3434
3435 (*current_target.to_stop) (&current_target, ptid);
3436 }
3437
3438 static void
3439 debug_to_post_attach (struct target_ops *self, int pid)
3440 {
3441 debug_target.to_post_attach (&debug_target, pid);
3442
3443 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3444 }
3445
3446 /* Concatenate ELEM to LIST, a comma separate list, and return the
3447 result. The LIST incoming argument is released. */
3448
3449 static char *
3450 str_comma_list_concat_elem (char *list, const char *elem)
3451 {
3452 if (list == NULL)
3453 return xstrdup (elem);
3454 else
3455 return reconcat (list, list, ", ", elem, (char *) NULL);
3456 }
3457
3458 /* Helper for target_options_to_string. If OPT is present in
3459 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3460 Returns the new resulting string. OPT is removed from
3461 TARGET_OPTIONS. */
3462
3463 static char *
3464 do_option (int *target_options, char *ret,
3465 int opt, char *opt_str)
3466 {
3467 if ((*target_options & opt) != 0)
3468 {
3469 ret = str_comma_list_concat_elem (ret, opt_str);
3470 *target_options &= ~opt;
3471 }
3472
3473 return ret;
3474 }
3475
3476 char *
3477 target_options_to_string (int target_options)
3478 {
3479 char *ret = NULL;
3480
3481 #define DO_TARG_OPTION(OPT) \
3482 ret = do_option (&target_options, ret, OPT, #OPT)
3483
3484 DO_TARG_OPTION (TARGET_WNOHANG);
3485
3486 if (target_options != 0)
3487 ret = str_comma_list_concat_elem (ret, "unknown???");
3488
3489 if (ret == NULL)
3490 ret = xstrdup ("");
3491 return ret;
3492 }
3493
3494 static void
3495 debug_print_register (const char * func,
3496 struct regcache *regcache, int regno)
3497 {
3498 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3499
3500 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3501 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3502 && gdbarch_register_name (gdbarch, regno) != NULL
3503 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3504 fprintf_unfiltered (gdb_stdlog, "(%s)",
3505 gdbarch_register_name (gdbarch, regno));
3506 else
3507 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3508 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3509 {
3510 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3511 int i, size = register_size (gdbarch, regno);
3512 gdb_byte buf[MAX_REGISTER_SIZE];
3513
3514 regcache_raw_collect (regcache, regno, buf);
3515 fprintf_unfiltered (gdb_stdlog, " = ");
3516 for (i = 0; i < size; i++)
3517 {
3518 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3519 }
3520 if (size <= sizeof (LONGEST))
3521 {
3522 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3523
3524 fprintf_unfiltered (gdb_stdlog, " %s %s",
3525 core_addr_to_string_nz (val), plongest (val));
3526 }
3527 }
3528 fprintf_unfiltered (gdb_stdlog, "\n");
3529 }
3530
3531 void
3532 target_fetch_registers (struct regcache *regcache, int regno)
3533 {
3534 current_target.to_fetch_registers (&current_target, regcache, regno);
3535 if (targetdebug)
3536 debug_print_register ("target_fetch_registers", regcache, regno);
3537 }
3538
3539 void
3540 target_store_registers (struct regcache *regcache, int regno)
3541 {
3542 struct target_ops *t;
3543
3544 if (!may_write_registers)
3545 error (_("Writing to registers is not allowed (regno %d)"), regno);
3546
3547 current_target.to_store_registers (&current_target, regcache, regno);
3548 if (targetdebug)
3549 {
3550 debug_print_register ("target_store_registers", regcache, regno);
3551 }
3552 }
3553
3554 int
3555 target_core_of_thread (ptid_t ptid)
3556 {
3557 int retval = current_target.to_core_of_thread (&current_target, ptid);
3558
3559 if (targetdebug)
3560 fprintf_unfiltered (gdb_stdlog,
3561 "target_core_of_thread (%d) = %d\n",
3562 ptid_get_pid (ptid), retval);
3563 return retval;
3564 }
3565
3566 int
3567 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3568 {
3569 int retval = current_target.to_verify_memory (&current_target,
3570 data, memaddr, size);
3571
3572 if (targetdebug)
3573 fprintf_unfiltered (gdb_stdlog,
3574 "target_verify_memory (%s, %s) = %d\n",
3575 paddress (target_gdbarch (), memaddr),
3576 pulongest (size),
3577 retval);
3578 return retval;
3579 }
3580
3581 /* The documentation for this function is in its prototype declaration in
3582 target.h. */
3583
3584 int
3585 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3586 {
3587 int ret;
3588
3589 ret = current_target.to_insert_mask_watchpoint (&current_target,
3590 addr, mask, rw);
3591
3592 if (targetdebug)
3593 fprintf_unfiltered (gdb_stdlog, "\
3594 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3595 core_addr_to_string (addr),
3596 core_addr_to_string (mask), rw, ret);
3597
3598 return ret;
3599 }
3600
3601 /* The documentation for this function is in its prototype declaration in
3602 target.h. */
3603
3604 int
3605 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3606 {
3607 int ret;
3608
3609 ret = current_target.to_remove_mask_watchpoint (&current_target,
3610 addr, mask, rw);
3611
3612 if (targetdebug)
3613 fprintf_unfiltered (gdb_stdlog, "\
3614 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
3615 core_addr_to_string (addr),
3616 core_addr_to_string (mask), rw, ret);
3617
3618 return ret;
3619 }
3620
3621 /* The documentation for this function is in its prototype declaration
3622 in target.h. */
3623
3624 int
3625 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
3626 {
3627 return current_target.to_masked_watch_num_registers (&current_target,
3628 addr, mask);
3629 }
3630
3631 /* The documentation for this function is in its prototype declaration
3632 in target.h. */
3633
3634 int
3635 target_ranged_break_num_registers (void)
3636 {
3637 return current_target.to_ranged_break_num_registers (&current_target);
3638 }
3639
3640 /* See target.h. */
3641
3642 struct btrace_target_info *
3643 target_enable_btrace (ptid_t ptid)
3644 {
3645 return current_target.to_enable_btrace (&current_target, ptid);
3646 }
3647
3648 /* See target.h. */
3649
3650 void
3651 target_disable_btrace (struct btrace_target_info *btinfo)
3652 {
3653 current_target.to_disable_btrace (&current_target, btinfo);
3654 }
3655
3656 /* See target.h. */
3657
3658 void
3659 target_teardown_btrace (struct btrace_target_info *btinfo)
3660 {
3661 current_target.to_teardown_btrace (&current_target, btinfo);
3662 }
3663
3664 /* See target.h. */
3665
3666 enum btrace_error
3667 target_read_btrace (VEC (btrace_block_s) **btrace,
3668 struct btrace_target_info *btinfo,
3669 enum btrace_read_type type)
3670 {
3671 return current_target.to_read_btrace (&current_target, btrace, btinfo, type);
3672 }
3673
3674 /* See target.h. */
3675
3676 void
3677 target_stop_recording (void)
3678 {
3679 current_target.to_stop_recording (&current_target);
3680 }
3681
3682 /* See target.h. */
3683
3684 void
3685 target_info_record (void)
3686 {
3687 struct target_ops *t;
3688
3689 for (t = current_target.beneath; t != NULL; t = t->beneath)
3690 if (t->to_info_record != NULL)
3691 {
3692 t->to_info_record (t);
3693 return;
3694 }
3695
3696 tcomplain ();
3697 }
3698
3699 /* See target.h. */
3700
3701 void
3702 target_save_record (const char *filename)
3703 {
3704 current_target.to_save_record (&current_target, filename);
3705 }
3706
3707 /* See target.h. */
3708
3709 int
3710 target_supports_delete_record (void)
3711 {
3712 struct target_ops *t;
3713
3714 for (t = current_target.beneath; t != NULL; t = t->beneath)
3715 if (t->to_delete_record != NULL)
3716 return 1;
3717
3718 return 0;
3719 }
3720
3721 /* See target.h. */
3722
3723 void
3724 target_delete_record (void)
3725 {
3726 current_target.to_delete_record (&current_target);
3727 }
3728
3729 /* See target.h. */
3730
3731 int
3732 target_record_is_replaying (void)
3733 {
3734 return current_target.to_record_is_replaying (&current_target);
3735 }
3736
3737 /* See target.h. */
3738
3739 void
3740 target_goto_record_begin (void)
3741 {
3742 current_target.to_goto_record_begin (&current_target);
3743 }
3744
3745 /* See target.h. */
3746
3747 void
3748 target_goto_record_end (void)
3749 {
3750 current_target.to_goto_record_end (&current_target);
3751 }
3752
3753 /* See target.h. */
3754
3755 void
3756 target_goto_record (ULONGEST insn)
3757 {
3758 current_target.to_goto_record (&current_target, insn);
3759 }
3760
3761 /* See target.h. */
3762
3763 void
3764 target_insn_history (int size, int flags)
3765 {
3766 current_target.to_insn_history (&current_target, size, flags);
3767 }
3768
3769 /* See target.h. */
3770
3771 void
3772 target_insn_history_from (ULONGEST from, int size, int flags)
3773 {
3774 current_target.to_insn_history_from (&current_target, from, size, flags);
3775 }
3776
3777 /* See target.h. */
3778
3779 void
3780 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
3781 {
3782 current_target.to_insn_history_range (&current_target, begin, end, flags);
3783 }
3784
3785 /* See target.h. */
3786
3787 void
3788 target_call_history (int size, int flags)
3789 {
3790 current_target.to_call_history (&current_target, size, flags);
3791 }
3792
3793 /* See target.h. */
3794
3795 void
3796 target_call_history_from (ULONGEST begin, int size, int flags)
3797 {
3798 current_target.to_call_history_from (&current_target, begin, size, flags);
3799 }
3800
3801 /* See target.h. */
3802
3803 void
3804 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
3805 {
3806 current_target.to_call_history_range (&current_target, begin, end, flags);
3807 }
3808
3809 static void
3810 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
3811 {
3812 debug_target.to_prepare_to_store (&debug_target, regcache);
3813
3814 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
3815 }
3816
3817 /* See target.h. */
3818
3819 const struct frame_unwind *
3820 target_get_unwinder (void)
3821 {
3822 struct target_ops *t;
3823
3824 for (t = current_target.beneath; t != NULL; t = t->beneath)
3825 if (t->to_get_unwinder != NULL)
3826 return t->to_get_unwinder;
3827
3828 return NULL;
3829 }
3830
3831 /* See target.h. */
3832
3833 const struct frame_unwind *
3834 target_get_tailcall_unwinder (void)
3835 {
3836 struct target_ops *t;
3837
3838 for (t = current_target.beneath; t != NULL; t = t->beneath)
3839 if (t->to_get_tailcall_unwinder != NULL)
3840 return t->to_get_tailcall_unwinder;
3841
3842 return NULL;
3843 }
3844
3845 /* See target.h. */
3846
3847 CORE_ADDR
3848 forward_target_decr_pc_after_break (struct target_ops *ops,
3849 struct gdbarch *gdbarch)
3850 {
3851 for (; ops != NULL; ops = ops->beneath)
3852 if (ops->to_decr_pc_after_break != NULL)
3853 return ops->to_decr_pc_after_break (ops, gdbarch);
3854
3855 return gdbarch_decr_pc_after_break (gdbarch);
3856 }
3857
3858 /* See target.h. */
3859
3860 CORE_ADDR
3861 target_decr_pc_after_break (struct gdbarch *gdbarch)
3862 {
3863 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
3864 }
3865
3866 static int
3867 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
3868 int write, struct mem_attrib *attrib,
3869 struct target_ops *target)
3870 {
3871 int retval;
3872
3873 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
3874 attrib, target);
3875
3876 fprintf_unfiltered (gdb_stdlog,
3877 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
3878 paddress (target_gdbarch (), memaddr), len,
3879 write ? "write" : "read", retval);
3880
3881 if (retval > 0)
3882 {
3883 int i;
3884
3885 fputs_unfiltered (", bytes =", gdb_stdlog);
3886 for (i = 0; i < retval; i++)
3887 {
3888 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
3889 {
3890 if (targetdebug < 2 && i > 0)
3891 {
3892 fprintf_unfiltered (gdb_stdlog, " ...");
3893 break;
3894 }
3895 fprintf_unfiltered (gdb_stdlog, "\n");
3896 }
3897
3898 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
3899 }
3900 }
3901
3902 fputc_unfiltered ('\n', gdb_stdlog);
3903
3904 return retval;
3905 }
3906
3907 static void
3908 debug_to_files_info (struct target_ops *target)
3909 {
3910 debug_target.to_files_info (target);
3911
3912 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
3913 }
3914
3915 static int
3916 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
3917 struct bp_target_info *bp_tgt)
3918 {
3919 int retval;
3920
3921 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
3922
3923 fprintf_unfiltered (gdb_stdlog,
3924 "target_insert_breakpoint (%s, xxx) = %ld\n",
3925 core_addr_to_string (bp_tgt->placed_address),
3926 (unsigned long) retval);
3927 return retval;
3928 }
3929
3930 static int
3931 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
3932 struct bp_target_info *bp_tgt)
3933 {
3934 int retval;
3935
3936 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
3937
3938 fprintf_unfiltered (gdb_stdlog,
3939 "target_remove_breakpoint (%s, xxx) = %ld\n",
3940 core_addr_to_string (bp_tgt->placed_address),
3941 (unsigned long) retval);
3942 return retval;
3943 }
3944
3945 static int
3946 debug_to_can_use_hw_breakpoint (struct target_ops *self,
3947 int type, int cnt, int from_tty)
3948 {
3949 int retval;
3950
3951 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
3952 type, cnt, from_tty);
3953
3954 fprintf_unfiltered (gdb_stdlog,
3955 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
3956 (unsigned long) type,
3957 (unsigned long) cnt,
3958 (unsigned long) from_tty,
3959 (unsigned long) retval);
3960 return retval;
3961 }
3962
3963 static int
3964 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
3965 CORE_ADDR addr, int len)
3966 {
3967 CORE_ADDR retval;
3968
3969 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
3970 addr, len);
3971
3972 fprintf_unfiltered (gdb_stdlog,
3973 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
3974 core_addr_to_string (addr), (unsigned long) len,
3975 core_addr_to_string (retval));
3976 return retval;
3977 }
3978
3979 static int
3980 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
3981 CORE_ADDR addr, int len, int rw,
3982 struct expression *cond)
3983 {
3984 int retval;
3985
3986 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
3987 addr, len,
3988 rw, cond);
3989
3990 fprintf_unfiltered (gdb_stdlog,
3991 "target_can_accel_watchpoint_condition "
3992 "(%s, %d, %d, %s) = %ld\n",
3993 core_addr_to_string (addr), len, rw,
3994 host_address_to_string (cond), (unsigned long) retval);
3995 return retval;
3996 }
3997
3998 static int
3999 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4000 {
4001 int retval;
4002
4003 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4004
4005 fprintf_unfiltered (gdb_stdlog,
4006 "target_stopped_by_watchpoint () = %ld\n",
4007 (unsigned long) retval);
4008 return retval;
4009 }
4010
4011 static int
4012 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4013 {
4014 int retval;
4015
4016 retval = debug_target.to_stopped_data_address (target, addr);
4017
4018 fprintf_unfiltered (gdb_stdlog,
4019 "target_stopped_data_address ([%s]) = %ld\n",
4020 core_addr_to_string (*addr),
4021 (unsigned long)retval);
4022 return retval;
4023 }
4024
4025 static int
4026 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4027 CORE_ADDR addr,
4028 CORE_ADDR start, int length)
4029 {
4030 int retval;
4031
4032 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4033 start, length);
4034
4035 fprintf_filtered (gdb_stdlog,
4036 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4037 core_addr_to_string (addr), core_addr_to_string (start),
4038 length, retval);
4039 return retval;
4040 }
4041
4042 static int
4043 debug_to_insert_hw_breakpoint (struct target_ops *self,
4044 struct gdbarch *gdbarch,
4045 struct bp_target_info *bp_tgt)
4046 {
4047 int retval;
4048
4049 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4050 gdbarch, bp_tgt);
4051
4052 fprintf_unfiltered (gdb_stdlog,
4053 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4054 core_addr_to_string (bp_tgt->placed_address),
4055 (unsigned long) retval);
4056 return retval;
4057 }
4058
4059 static int
4060 debug_to_remove_hw_breakpoint (struct target_ops *self,
4061 struct gdbarch *gdbarch,
4062 struct bp_target_info *bp_tgt)
4063 {
4064 int retval;
4065
4066 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4067 gdbarch, bp_tgt);
4068
4069 fprintf_unfiltered (gdb_stdlog,
4070 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4071 core_addr_to_string (bp_tgt->placed_address),
4072 (unsigned long) retval);
4073 return retval;
4074 }
4075
4076 static int
4077 debug_to_insert_watchpoint (struct target_ops *self,
4078 CORE_ADDR addr, int len, int type,
4079 struct expression *cond)
4080 {
4081 int retval;
4082
4083 retval = debug_target.to_insert_watchpoint (&debug_target,
4084 addr, len, type, cond);
4085
4086 fprintf_unfiltered (gdb_stdlog,
4087 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4088 core_addr_to_string (addr), len, type,
4089 host_address_to_string (cond), (unsigned long) retval);
4090 return retval;
4091 }
4092
4093 static int
4094 debug_to_remove_watchpoint (struct target_ops *self,
4095 CORE_ADDR addr, int len, int type,
4096 struct expression *cond)
4097 {
4098 int retval;
4099
4100 retval = debug_target.to_remove_watchpoint (&debug_target,
4101 addr, len, type, cond);
4102
4103 fprintf_unfiltered (gdb_stdlog,
4104 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4105 core_addr_to_string (addr), len, type,
4106 host_address_to_string (cond), (unsigned long) retval);
4107 return retval;
4108 }
4109
4110 static void
4111 debug_to_terminal_init (struct target_ops *self)
4112 {
4113 debug_target.to_terminal_init (&debug_target);
4114
4115 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4116 }
4117
4118 static void
4119 debug_to_terminal_inferior (struct target_ops *self)
4120 {
4121 debug_target.to_terminal_inferior (&debug_target);
4122
4123 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4124 }
4125
4126 static void
4127 debug_to_terminal_ours_for_output (struct target_ops *self)
4128 {
4129 debug_target.to_terminal_ours_for_output (&debug_target);
4130
4131 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4132 }
4133
4134 static void
4135 debug_to_terminal_ours (struct target_ops *self)
4136 {
4137 debug_target.to_terminal_ours (&debug_target);
4138
4139 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4140 }
4141
4142 static void
4143 debug_to_terminal_save_ours (struct target_ops *self)
4144 {
4145 debug_target.to_terminal_save_ours (&debug_target);
4146
4147 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4148 }
4149
4150 static void
4151 debug_to_terminal_info (struct target_ops *self,
4152 const char *arg, int from_tty)
4153 {
4154 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4155
4156 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4157 from_tty);
4158 }
4159
4160 static void
4161 debug_to_load (struct target_ops *self, char *args, int from_tty)
4162 {
4163 debug_target.to_load (&debug_target, args, from_tty);
4164
4165 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4166 }
4167
4168 static void
4169 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4170 {
4171 debug_target.to_post_startup_inferior (&debug_target, ptid);
4172
4173 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4174 ptid_get_pid (ptid));
4175 }
4176
4177 static int
4178 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4179 {
4180 int retval;
4181
4182 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4183
4184 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4185 pid, retval);
4186
4187 return retval;
4188 }
4189
4190 static int
4191 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4192 {
4193 int retval;
4194
4195 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4196
4197 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4198 pid, retval);
4199
4200 return retval;
4201 }
4202
4203 static int
4204 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4205 {
4206 int retval;
4207
4208 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4209
4210 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4211 pid, retval);
4212
4213 return retval;
4214 }
4215
4216 static int
4217 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4218 {
4219 int retval;
4220
4221 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4222
4223 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4224 pid, retval);
4225
4226 return retval;
4227 }
4228
4229 static int
4230 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4231 {
4232 int retval;
4233
4234 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4235
4236 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4237 pid, retval);
4238
4239 return retval;
4240 }
4241
4242 static int
4243 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4244 {
4245 int retval;
4246
4247 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4248
4249 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4250 pid, retval);
4251
4252 return retval;
4253 }
4254
4255 static int
4256 debug_to_has_exited (struct target_ops *self,
4257 int pid, int wait_status, int *exit_status)
4258 {
4259 int has_exited;
4260
4261 has_exited = debug_target.to_has_exited (&debug_target,
4262 pid, wait_status, exit_status);
4263
4264 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4265 pid, wait_status, *exit_status, has_exited);
4266
4267 return has_exited;
4268 }
4269
4270 static int
4271 debug_to_can_run (struct target_ops *self)
4272 {
4273 int retval;
4274
4275 retval = debug_target.to_can_run (&debug_target);
4276
4277 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4278
4279 return retval;
4280 }
4281
4282 static struct gdbarch *
4283 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4284 {
4285 struct gdbarch *retval;
4286
4287 retval = debug_target.to_thread_architecture (ops, ptid);
4288
4289 fprintf_unfiltered (gdb_stdlog,
4290 "target_thread_architecture (%s) = %s [%s]\n",
4291 target_pid_to_str (ptid),
4292 host_address_to_string (retval),
4293 gdbarch_bfd_arch_info (retval)->printable_name);
4294 return retval;
4295 }
4296
4297 static void
4298 debug_to_stop (struct target_ops *self, ptid_t ptid)
4299 {
4300 debug_target.to_stop (&debug_target, ptid);
4301
4302 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4303 target_pid_to_str (ptid));
4304 }
4305
4306 static void
4307 debug_to_rcmd (struct target_ops *self, char *command,
4308 struct ui_file *outbuf)
4309 {
4310 debug_target.to_rcmd (&debug_target, command, outbuf);
4311 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4312 }
4313
4314 static char *
4315 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
4316 {
4317 char *exec_file;
4318
4319 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
4320
4321 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4322 pid, exec_file);
4323
4324 return exec_file;
4325 }
4326
4327 static void
4328 setup_target_debug (void)
4329 {
4330 memcpy (&debug_target, &current_target, sizeof debug_target);
4331
4332 current_target.to_open = debug_to_open;
4333 current_target.to_post_attach = debug_to_post_attach;
4334 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4335 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4336 current_target.to_files_info = debug_to_files_info;
4337 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4338 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4339 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4340 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4341 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4342 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4343 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4344 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4345 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4346 current_target.to_watchpoint_addr_within_range
4347 = debug_to_watchpoint_addr_within_range;
4348 current_target.to_region_ok_for_hw_watchpoint
4349 = debug_to_region_ok_for_hw_watchpoint;
4350 current_target.to_can_accel_watchpoint_condition
4351 = debug_to_can_accel_watchpoint_condition;
4352 current_target.to_terminal_init = debug_to_terminal_init;
4353 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4354 current_target.to_terminal_ours_for_output
4355 = debug_to_terminal_ours_for_output;
4356 current_target.to_terminal_ours = debug_to_terminal_ours;
4357 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4358 current_target.to_terminal_info = debug_to_terminal_info;
4359 current_target.to_load = debug_to_load;
4360 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4361 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4362 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4363 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4364 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4365 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4366 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4367 current_target.to_has_exited = debug_to_has_exited;
4368 current_target.to_can_run = debug_to_can_run;
4369 current_target.to_stop = debug_to_stop;
4370 current_target.to_rcmd = debug_to_rcmd;
4371 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4372 current_target.to_thread_architecture = debug_to_thread_architecture;
4373 }
4374 \f
4375
4376 static char targ_desc[] =
4377 "Names of targets and files being debugged.\nShows the entire \
4378 stack of targets currently in use (including the exec-file,\n\
4379 core-file, and process, if any), as well as the symbol file name.";
4380
4381 static void
4382 default_rcmd (struct target_ops *self, char *command, struct ui_file *output)
4383 {
4384 error (_("\"monitor\" command not supported by this target."));
4385 }
4386
4387 static void
4388 do_monitor_command (char *cmd,
4389 int from_tty)
4390 {
4391 target_rcmd (cmd, gdb_stdtarg);
4392 }
4393
4394 /* Print the name of each layers of our target stack. */
4395
4396 static void
4397 maintenance_print_target_stack (char *cmd, int from_tty)
4398 {
4399 struct target_ops *t;
4400
4401 printf_filtered (_("The current target stack is:\n"));
4402
4403 for (t = target_stack; t != NULL; t = t->beneath)
4404 {
4405 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4406 }
4407 }
4408
4409 /* Controls if async mode is permitted. */
4410 int target_async_permitted = 0;
4411
4412 /* The set command writes to this variable. If the inferior is
4413 executing, target_async_permitted is *not* updated. */
4414 static int target_async_permitted_1 = 0;
4415
4416 static void
4417 set_target_async_command (char *args, int from_tty,
4418 struct cmd_list_element *c)
4419 {
4420 if (have_live_inferiors ())
4421 {
4422 target_async_permitted_1 = target_async_permitted;
4423 error (_("Cannot change this setting while the inferior is running."));
4424 }
4425
4426 target_async_permitted = target_async_permitted_1;
4427 }
4428
4429 static void
4430 show_target_async_command (struct ui_file *file, int from_tty,
4431 struct cmd_list_element *c,
4432 const char *value)
4433 {
4434 fprintf_filtered (file,
4435 _("Controlling the inferior in "
4436 "asynchronous mode is %s.\n"), value);
4437 }
4438
4439 /* Temporary copies of permission settings. */
4440
4441 static int may_write_registers_1 = 1;
4442 static int may_write_memory_1 = 1;
4443 static int may_insert_breakpoints_1 = 1;
4444 static int may_insert_tracepoints_1 = 1;
4445 static int may_insert_fast_tracepoints_1 = 1;
4446 static int may_stop_1 = 1;
4447
4448 /* Make the user-set values match the real values again. */
4449
4450 void
4451 update_target_permissions (void)
4452 {
4453 may_write_registers_1 = may_write_registers;
4454 may_write_memory_1 = may_write_memory;
4455 may_insert_breakpoints_1 = may_insert_breakpoints;
4456 may_insert_tracepoints_1 = may_insert_tracepoints;
4457 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4458 may_stop_1 = may_stop;
4459 }
4460
4461 /* The one function handles (most of) the permission flags in the same
4462 way. */
4463
4464 static void
4465 set_target_permissions (char *args, int from_tty,
4466 struct cmd_list_element *c)
4467 {
4468 if (target_has_execution)
4469 {
4470 update_target_permissions ();
4471 error (_("Cannot change this setting while the inferior is running."));
4472 }
4473
4474 /* Make the real values match the user-changed values. */
4475 may_write_registers = may_write_registers_1;
4476 may_insert_breakpoints = may_insert_breakpoints_1;
4477 may_insert_tracepoints = may_insert_tracepoints_1;
4478 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4479 may_stop = may_stop_1;
4480 update_observer_mode ();
4481 }
4482
4483 /* Set memory write permission independently of observer mode. */
4484
4485 static void
4486 set_write_memory_permission (char *args, int from_tty,
4487 struct cmd_list_element *c)
4488 {
4489 /* Make the real values match the user-changed values. */
4490 may_write_memory = may_write_memory_1;
4491 update_observer_mode ();
4492 }
4493
4494
4495 void
4496 initialize_targets (void)
4497 {
4498 init_dummy_target ();
4499 push_target (&dummy_target);
4500
4501 add_info ("target", target_info, targ_desc);
4502 add_info ("files", target_info, targ_desc);
4503
4504 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4505 Set target debugging."), _("\
4506 Show target debugging."), _("\
4507 When non-zero, target debugging is enabled. Higher numbers are more\n\
4508 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4509 command."),
4510 NULL,
4511 show_targetdebug,
4512 &setdebuglist, &showdebuglist);
4513
4514 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4515 &trust_readonly, _("\
4516 Set mode for reading from readonly sections."), _("\
4517 Show mode for reading from readonly sections."), _("\
4518 When this mode is on, memory reads from readonly sections (such as .text)\n\
4519 will be read from the object file instead of from the target. This will\n\
4520 result in significant performance improvement for remote targets."),
4521 NULL,
4522 show_trust_readonly,
4523 &setlist, &showlist);
4524
4525 add_com ("monitor", class_obscure, do_monitor_command,
4526 _("Send a command to the remote monitor (remote targets only)."));
4527
4528 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4529 _("Print the name of each layer of the internal target stack."),
4530 &maintenanceprintlist);
4531
4532 add_setshow_boolean_cmd ("target-async", no_class,
4533 &target_async_permitted_1, _("\
4534 Set whether gdb controls the inferior in asynchronous mode."), _("\
4535 Show whether gdb controls the inferior in asynchronous mode."), _("\
4536 Tells gdb whether to control the inferior in asynchronous mode."),
4537 set_target_async_command,
4538 show_target_async_command,
4539 &setlist,
4540 &showlist);
4541
4542 add_setshow_boolean_cmd ("may-write-registers", class_support,
4543 &may_write_registers_1, _("\
4544 Set permission to write into registers."), _("\
4545 Show permission to write into registers."), _("\
4546 When this permission is on, GDB may write into the target's registers.\n\
4547 Otherwise, any sort of write attempt will result in an error."),
4548 set_target_permissions, NULL,
4549 &setlist, &showlist);
4550
4551 add_setshow_boolean_cmd ("may-write-memory", class_support,
4552 &may_write_memory_1, _("\
4553 Set permission to write into target memory."), _("\
4554 Show permission to write into target memory."), _("\
4555 When this permission is on, GDB may write into the target's memory.\n\
4556 Otherwise, any sort of write attempt will result in an error."),
4557 set_write_memory_permission, NULL,
4558 &setlist, &showlist);
4559
4560 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4561 &may_insert_breakpoints_1, _("\
4562 Set permission to insert breakpoints in the target."), _("\
4563 Show permission to insert breakpoints in the target."), _("\
4564 When this permission is on, GDB may insert breakpoints in the program.\n\
4565 Otherwise, any sort of insertion attempt will result in an error."),
4566 set_target_permissions, NULL,
4567 &setlist, &showlist);
4568
4569 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4570 &may_insert_tracepoints_1, _("\
4571 Set permission to insert tracepoints in the target."), _("\
4572 Show permission to insert tracepoints in the target."), _("\
4573 When this permission is on, GDB may insert tracepoints in the program.\n\
4574 Otherwise, any sort of insertion attempt will result in an error."),
4575 set_target_permissions, NULL,
4576 &setlist, &showlist);
4577
4578 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4579 &may_insert_fast_tracepoints_1, _("\
4580 Set permission to insert fast tracepoints in the target."), _("\
4581 Show permission to insert fast tracepoints in the target."), _("\
4582 When this permission is on, GDB may insert fast tracepoints.\n\
4583 Otherwise, any sort of insertion attempt will result in an error."),
4584 set_target_permissions, NULL,
4585 &setlist, &showlist);
4586
4587 add_setshow_boolean_cmd ("may-interrupt", class_support,
4588 &may_stop_1, _("\
4589 Set permission to interrupt or signal the target."), _("\
4590 Show permission to interrupt or signal the target."), _("\
4591 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4592 Otherwise, any attempt to interrupt or stop will be ignored."),
4593 set_target_permissions, NULL,
4594 &setlist, &showlist);
4595 }