]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/target.c
remove function casts from target.c
[thirdparty/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47 #include "auxv.h"
48
49 static void target_info (char *, int);
50
51 static void default_terminal_info (struct target_ops *, const char *, int);
52
53 static int default_watchpoint_addr_within_range (struct target_ops *,
54 CORE_ADDR, CORE_ADDR, int);
55
56 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
57 CORE_ADDR, int);
58
59 static void default_rcmd (struct target_ops *, char *, struct ui_file *);
60
61 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
62 long lwp, long tid);
63
64 static int default_follow_fork (struct target_ops *self, int follow_child,
65 int detach_fork);
66
67 static void default_mourn_inferior (struct target_ops *self);
68
69 static int default_search_memory (struct target_ops *ops,
70 CORE_ADDR start_addr,
71 ULONGEST search_space_len,
72 const gdb_byte *pattern,
73 ULONGEST pattern_len,
74 CORE_ADDR *found_addrp);
75
76 static void tcomplain (void) ATTRIBUTE_NORETURN;
77
78 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
79
80 static int return_zero (struct target_ops *);
81
82 static int return_zero_has_execution (struct target_ops *, ptid_t);
83
84 void target_ignore (void);
85
86 static void target_command (char *, int);
87
88 static struct target_ops *find_default_run_target (char *);
89
90 static target_xfer_partial_ftype default_xfer_partial;
91
92 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
93 ptid_t ptid);
94
95 static int dummy_find_memory_regions (struct target_ops *self,
96 find_memory_region_ftype ignore1,
97 void *ignore2);
98
99 static char *dummy_make_corefile_notes (struct target_ops *self,
100 bfd *ignore1, int *ignore2);
101
102 static char *default_pid_to_str (struct target_ops *ops, ptid_t ptid);
103
104 static int find_default_can_async_p (struct target_ops *ignore);
105
106 static int find_default_is_async_p (struct target_ops *ignore);
107
108 static enum exec_direction_kind default_execution_direction
109 (struct target_ops *self);
110
111 #include "target-delegates.c"
112
113 static void init_dummy_target (void);
114
115 static struct target_ops debug_target;
116
117 static void debug_to_open (char *, int);
118
119 static void debug_to_prepare_to_store (struct target_ops *self,
120 struct regcache *);
121
122 static void debug_to_files_info (struct target_ops *);
123
124 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
125 struct bp_target_info *);
126
127 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
128 struct bp_target_info *);
129
130 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
131 int, int, int);
132
133 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
134 struct gdbarch *,
135 struct bp_target_info *);
136
137 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
138 struct gdbarch *,
139 struct bp_target_info *);
140
141 static int debug_to_insert_watchpoint (struct target_ops *self,
142 CORE_ADDR, int, int,
143 struct expression *);
144
145 static int debug_to_remove_watchpoint (struct target_ops *self,
146 CORE_ADDR, int, int,
147 struct expression *);
148
149 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
150
151 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
152 CORE_ADDR, CORE_ADDR, int);
153
154 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
155 CORE_ADDR, int);
156
157 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
158 CORE_ADDR, int, int,
159 struct expression *);
160
161 static void debug_to_terminal_init (struct target_ops *self);
162
163 static void debug_to_terminal_inferior (struct target_ops *self);
164
165 static void debug_to_terminal_ours_for_output (struct target_ops *self);
166
167 static void debug_to_terminal_save_ours (struct target_ops *self);
168
169 static void debug_to_terminal_ours (struct target_ops *self);
170
171 static void debug_to_load (struct target_ops *self, char *, int);
172
173 static int debug_to_can_run (struct target_ops *self);
174
175 static void debug_to_stop (struct target_ops *self, ptid_t);
176
177 /* Pointer to array of target architecture structures; the size of the
178 array; the current index into the array; the allocated size of the
179 array. */
180 struct target_ops **target_structs;
181 unsigned target_struct_size;
182 unsigned target_struct_allocsize;
183 #define DEFAULT_ALLOCSIZE 10
184
185 /* The initial current target, so that there is always a semi-valid
186 current target. */
187
188 static struct target_ops dummy_target;
189
190 /* Top of target stack. */
191
192 static struct target_ops *target_stack;
193
194 /* The target structure we are currently using to talk to a process
195 or file or whatever "inferior" we have. */
196
197 struct target_ops current_target;
198
199 /* Command list for target. */
200
201 static struct cmd_list_element *targetlist = NULL;
202
203 /* Nonzero if we should trust readonly sections from the
204 executable when reading memory. */
205
206 static int trust_readonly = 0;
207
208 /* Nonzero if we should show true memory content including
209 memory breakpoint inserted by gdb. */
210
211 static int show_memory_breakpoints = 0;
212
213 /* These globals control whether GDB attempts to perform these
214 operations; they are useful for targets that need to prevent
215 inadvertant disruption, such as in non-stop mode. */
216
217 int may_write_registers = 1;
218
219 int may_write_memory = 1;
220
221 int may_insert_breakpoints = 1;
222
223 int may_insert_tracepoints = 1;
224
225 int may_insert_fast_tracepoints = 1;
226
227 int may_stop = 1;
228
229 /* Non-zero if we want to see trace of target level stuff. */
230
231 static unsigned int targetdebug = 0;
232 static void
233 show_targetdebug (struct ui_file *file, int from_tty,
234 struct cmd_list_element *c, const char *value)
235 {
236 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
237 }
238
239 static void setup_target_debug (void);
240
241 /* The user just typed 'target' without the name of a target. */
242
243 static void
244 target_command (char *arg, int from_tty)
245 {
246 fputs_filtered ("Argument required (target name). Try `help target'\n",
247 gdb_stdout);
248 }
249
250 /* Default target_has_* methods for process_stratum targets. */
251
252 int
253 default_child_has_all_memory (struct target_ops *ops)
254 {
255 /* If no inferior selected, then we can't read memory here. */
256 if (ptid_equal (inferior_ptid, null_ptid))
257 return 0;
258
259 return 1;
260 }
261
262 int
263 default_child_has_memory (struct target_ops *ops)
264 {
265 /* If no inferior selected, then we can't read memory here. */
266 if (ptid_equal (inferior_ptid, null_ptid))
267 return 0;
268
269 return 1;
270 }
271
272 int
273 default_child_has_stack (struct target_ops *ops)
274 {
275 /* If no inferior selected, there's no stack. */
276 if (ptid_equal (inferior_ptid, null_ptid))
277 return 0;
278
279 return 1;
280 }
281
282 int
283 default_child_has_registers (struct target_ops *ops)
284 {
285 /* Can't read registers from no inferior. */
286 if (ptid_equal (inferior_ptid, null_ptid))
287 return 0;
288
289 return 1;
290 }
291
292 int
293 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
294 {
295 /* If there's no thread selected, then we can't make it run through
296 hoops. */
297 if (ptid_equal (the_ptid, null_ptid))
298 return 0;
299
300 return 1;
301 }
302
303
304 int
305 target_has_all_memory_1 (void)
306 {
307 struct target_ops *t;
308
309 for (t = current_target.beneath; t != NULL; t = t->beneath)
310 if (t->to_has_all_memory (t))
311 return 1;
312
313 return 0;
314 }
315
316 int
317 target_has_memory_1 (void)
318 {
319 struct target_ops *t;
320
321 for (t = current_target.beneath; t != NULL; t = t->beneath)
322 if (t->to_has_memory (t))
323 return 1;
324
325 return 0;
326 }
327
328 int
329 target_has_stack_1 (void)
330 {
331 struct target_ops *t;
332
333 for (t = current_target.beneath; t != NULL; t = t->beneath)
334 if (t->to_has_stack (t))
335 return 1;
336
337 return 0;
338 }
339
340 int
341 target_has_registers_1 (void)
342 {
343 struct target_ops *t;
344
345 for (t = current_target.beneath; t != NULL; t = t->beneath)
346 if (t->to_has_registers (t))
347 return 1;
348
349 return 0;
350 }
351
352 int
353 target_has_execution_1 (ptid_t the_ptid)
354 {
355 struct target_ops *t;
356
357 for (t = current_target.beneath; t != NULL; t = t->beneath)
358 if (t->to_has_execution (t, the_ptid))
359 return 1;
360
361 return 0;
362 }
363
364 int
365 target_has_execution_current (void)
366 {
367 return target_has_execution_1 (inferior_ptid);
368 }
369
370 /* Complete initialization of T. This ensures that various fields in
371 T are set, if needed by the target implementation. */
372
373 void
374 complete_target_initialization (struct target_ops *t)
375 {
376 /* Provide default values for all "must have" methods. */
377 if (t->to_xfer_partial == NULL)
378 t->to_xfer_partial = default_xfer_partial;
379
380 if (t->to_has_all_memory == NULL)
381 t->to_has_all_memory = return_zero;
382
383 if (t->to_has_memory == NULL)
384 t->to_has_memory = return_zero;
385
386 if (t->to_has_stack == NULL)
387 t->to_has_stack = return_zero;
388
389 if (t->to_has_registers == NULL)
390 t->to_has_registers = return_zero;
391
392 if (t->to_has_execution == NULL)
393 t->to_has_execution = return_zero_has_execution;
394
395 install_delegators (t);
396 }
397
398 /* Add possible target architecture T to the list and add a new
399 command 'target T->to_shortname'. Set COMPLETER as the command's
400 completer if not NULL. */
401
402 void
403 add_target_with_completer (struct target_ops *t,
404 completer_ftype *completer)
405 {
406 struct cmd_list_element *c;
407
408 complete_target_initialization (t);
409
410 if (!target_structs)
411 {
412 target_struct_allocsize = DEFAULT_ALLOCSIZE;
413 target_structs = (struct target_ops **) xmalloc
414 (target_struct_allocsize * sizeof (*target_structs));
415 }
416 if (target_struct_size >= target_struct_allocsize)
417 {
418 target_struct_allocsize *= 2;
419 target_structs = (struct target_ops **)
420 xrealloc ((char *) target_structs,
421 target_struct_allocsize * sizeof (*target_structs));
422 }
423 target_structs[target_struct_size++] = t;
424
425 if (targetlist == NULL)
426 add_prefix_cmd ("target", class_run, target_command, _("\
427 Connect to a target machine or process.\n\
428 The first argument is the type or protocol of the target machine.\n\
429 Remaining arguments are interpreted by the target protocol. For more\n\
430 information on the arguments for a particular protocol, type\n\
431 `help target ' followed by the protocol name."),
432 &targetlist, "target ", 0, &cmdlist);
433 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
434 &targetlist);
435 if (completer != NULL)
436 set_cmd_completer (c, completer);
437 }
438
439 /* Add a possible target architecture to the list. */
440
441 void
442 add_target (struct target_ops *t)
443 {
444 add_target_with_completer (t, NULL);
445 }
446
447 /* See target.h. */
448
449 void
450 add_deprecated_target_alias (struct target_ops *t, char *alias)
451 {
452 struct cmd_list_element *c;
453 char *alt;
454
455 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
456 see PR cli/15104. */
457 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
458 alt = xstrprintf ("target %s", t->to_shortname);
459 deprecate_cmd (c, alt);
460 }
461
462 /* Stub functions */
463
464 void
465 target_ignore (void)
466 {
467 }
468
469 void
470 target_kill (void)
471 {
472 if (targetdebug)
473 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
474
475 current_target.to_kill (&current_target);
476 }
477
478 void
479 target_load (char *arg, int from_tty)
480 {
481 target_dcache_invalidate ();
482 (*current_target.to_load) (&current_target, arg, from_tty);
483 }
484
485 void
486 target_create_inferior (char *exec_file, char *args,
487 char **env, int from_tty)
488 {
489 struct target_ops *t;
490
491 for (t = current_target.beneath; t != NULL; t = t->beneath)
492 {
493 if (t->to_create_inferior != NULL)
494 {
495 t->to_create_inferior (t, exec_file, args, env, from_tty);
496 if (targetdebug)
497 fprintf_unfiltered (gdb_stdlog,
498 "target_create_inferior (%s, %s, xxx, %d)\n",
499 exec_file, args, from_tty);
500 return;
501 }
502 }
503
504 internal_error (__FILE__, __LINE__,
505 _("could not find a target to create inferior"));
506 }
507
508 void
509 target_terminal_inferior (void)
510 {
511 /* A background resume (``run&'') should leave GDB in control of the
512 terminal. Use target_can_async_p, not target_is_async_p, since at
513 this point the target is not async yet. However, if sync_execution
514 is not set, we know it will become async prior to resume. */
515 if (target_can_async_p () && !sync_execution)
516 return;
517
518 /* If GDB is resuming the inferior in the foreground, install
519 inferior's terminal modes. */
520 (*current_target.to_terminal_inferior) (&current_target);
521 }
522
523 static int
524 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
525 struct target_ops *t)
526 {
527 errno = EIO; /* Can't read/write this location. */
528 return 0; /* No bytes handled. */
529 }
530
531 static void
532 tcomplain (void)
533 {
534 error (_("You can't do that when your target is `%s'"),
535 current_target.to_shortname);
536 }
537
538 void
539 noprocess (void)
540 {
541 error (_("You can't do that without a process to debug."));
542 }
543
544 static void
545 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
546 {
547 printf_unfiltered (_("No saved terminal information.\n"));
548 }
549
550 /* A default implementation for the to_get_ada_task_ptid target method.
551
552 This function builds the PTID by using both LWP and TID as part of
553 the PTID lwp and tid elements. The pid used is the pid of the
554 inferior_ptid. */
555
556 static ptid_t
557 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
558 {
559 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
560 }
561
562 static enum exec_direction_kind
563 default_execution_direction (struct target_ops *self)
564 {
565 if (!target_can_execute_reverse)
566 return EXEC_FORWARD;
567 else if (!target_can_async_p ())
568 return EXEC_FORWARD;
569 else
570 gdb_assert_not_reached ("\
571 to_execution_direction must be implemented for reverse async");
572 }
573
574 /* Go through the target stack from top to bottom, copying over zero
575 entries in current_target, then filling in still empty entries. In
576 effect, we are doing class inheritance through the pushed target
577 vectors.
578
579 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
580 is currently implemented, is that it discards any knowledge of
581 which target an inherited method originally belonged to.
582 Consequently, new new target methods should instead explicitly and
583 locally search the target stack for the target that can handle the
584 request. */
585
586 static void
587 update_current_target (void)
588 {
589 struct target_ops *t;
590
591 /* First, reset current's contents. */
592 memset (&current_target, 0, sizeof (current_target));
593
594 /* Install the delegators. */
595 install_delegators (&current_target);
596
597 current_target.to_stratum = target_stack->to_stratum;
598
599 #define INHERIT(FIELD, TARGET) \
600 if (!current_target.FIELD) \
601 current_target.FIELD = (TARGET)->FIELD
602
603 /* Do not add any new INHERITs here. Instead, use the delegation
604 mechanism provided by make-target-delegates. */
605 for (t = target_stack; t; t = t->beneath)
606 {
607 INHERIT (to_shortname, t);
608 INHERIT (to_longname, t);
609 INHERIT (to_doc, t);
610 INHERIT (to_attach_no_wait, t);
611 INHERIT (deprecated_xfer_memory, t);
612 INHERIT (to_have_steppable_watchpoint, t);
613 INHERIT (to_have_continuable_watchpoint, t);
614 INHERIT (to_has_thread_control, t);
615 INHERIT (to_magic, t);
616 }
617 #undef INHERIT
618
619 /* Clean up a target struct so it no longer has any zero pointers in
620 it. Do not add any new de_faults here. Instead, use the
621 delegation mechanism provided by make-target-delegates. */
622
623 #define de_fault(field, value) \
624 if (!current_target.field) \
625 current_target.field = value
626
627 de_fault (to_open,
628 (void (*) (char *, int))
629 tcomplain);
630 de_fault (to_close,
631 (void (*) (struct target_ops *))
632 target_ignore);
633 de_fault (deprecated_xfer_memory,
634 (int (*) (CORE_ADDR, gdb_byte *, int, int,
635 struct mem_attrib *, struct target_ops *))
636 nomemory);
637
638 #undef de_fault
639
640 /* Finally, position the target-stack beneath the squashed
641 "current_target". That way code looking for a non-inherited
642 target method can quickly and simply find it. */
643 current_target.beneath = target_stack;
644
645 if (targetdebug)
646 setup_target_debug ();
647 }
648
649 /* Push a new target type into the stack of the existing target accessors,
650 possibly superseding some of the existing accessors.
651
652 Rather than allow an empty stack, we always have the dummy target at
653 the bottom stratum, so we can call the function vectors without
654 checking them. */
655
656 void
657 push_target (struct target_ops *t)
658 {
659 struct target_ops **cur;
660
661 /* Check magic number. If wrong, it probably means someone changed
662 the struct definition, but not all the places that initialize one. */
663 if (t->to_magic != OPS_MAGIC)
664 {
665 fprintf_unfiltered (gdb_stderr,
666 "Magic number of %s target struct wrong\n",
667 t->to_shortname);
668 internal_error (__FILE__, __LINE__,
669 _("failed internal consistency check"));
670 }
671
672 /* Find the proper stratum to install this target in. */
673 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
674 {
675 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
676 break;
677 }
678
679 /* If there's already targets at this stratum, remove them. */
680 /* FIXME: cagney/2003-10-15: I think this should be popping all
681 targets to CUR, and not just those at this stratum level. */
682 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
683 {
684 /* There's already something at this stratum level. Close it,
685 and un-hook it from the stack. */
686 struct target_ops *tmp = (*cur);
687
688 (*cur) = (*cur)->beneath;
689 tmp->beneath = NULL;
690 target_close (tmp);
691 }
692
693 /* We have removed all targets in our stratum, now add the new one. */
694 t->beneath = (*cur);
695 (*cur) = t;
696
697 update_current_target ();
698 }
699
700 /* Remove a target_ops vector from the stack, wherever it may be.
701 Return how many times it was removed (0 or 1). */
702
703 int
704 unpush_target (struct target_ops *t)
705 {
706 struct target_ops **cur;
707 struct target_ops *tmp;
708
709 if (t->to_stratum == dummy_stratum)
710 internal_error (__FILE__, __LINE__,
711 _("Attempt to unpush the dummy target"));
712
713 /* Look for the specified target. Note that we assume that a target
714 can only occur once in the target stack. */
715
716 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
717 {
718 if ((*cur) == t)
719 break;
720 }
721
722 /* If we don't find target_ops, quit. Only open targets should be
723 closed. */
724 if ((*cur) == NULL)
725 return 0;
726
727 /* Unchain the target. */
728 tmp = (*cur);
729 (*cur) = (*cur)->beneath;
730 tmp->beneath = NULL;
731
732 update_current_target ();
733
734 /* Finally close the target. Note we do this after unchaining, so
735 any target method calls from within the target_close
736 implementation don't end up in T anymore. */
737 target_close (t);
738
739 return 1;
740 }
741
742 void
743 pop_all_targets_above (enum strata above_stratum)
744 {
745 while ((int) (current_target.to_stratum) > (int) above_stratum)
746 {
747 if (!unpush_target (target_stack))
748 {
749 fprintf_unfiltered (gdb_stderr,
750 "pop_all_targets couldn't find target %s\n",
751 target_stack->to_shortname);
752 internal_error (__FILE__, __LINE__,
753 _("failed internal consistency check"));
754 break;
755 }
756 }
757 }
758
759 void
760 pop_all_targets (void)
761 {
762 pop_all_targets_above (dummy_stratum);
763 }
764
765 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
766
767 int
768 target_is_pushed (struct target_ops *t)
769 {
770 struct target_ops **cur;
771
772 /* Check magic number. If wrong, it probably means someone changed
773 the struct definition, but not all the places that initialize one. */
774 if (t->to_magic != OPS_MAGIC)
775 {
776 fprintf_unfiltered (gdb_stderr,
777 "Magic number of %s target struct wrong\n",
778 t->to_shortname);
779 internal_error (__FILE__, __LINE__,
780 _("failed internal consistency check"));
781 }
782
783 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
784 if (*cur == t)
785 return 1;
786
787 return 0;
788 }
789
790 /* Using the objfile specified in OBJFILE, find the address for the
791 current thread's thread-local storage with offset OFFSET. */
792 CORE_ADDR
793 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
794 {
795 volatile CORE_ADDR addr = 0;
796 struct target_ops *target;
797
798 for (target = current_target.beneath;
799 target != NULL;
800 target = target->beneath)
801 {
802 if (target->to_get_thread_local_address != NULL)
803 break;
804 }
805
806 if (target != NULL
807 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
808 {
809 ptid_t ptid = inferior_ptid;
810 volatile struct gdb_exception ex;
811
812 TRY_CATCH (ex, RETURN_MASK_ALL)
813 {
814 CORE_ADDR lm_addr;
815
816 /* Fetch the load module address for this objfile. */
817 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
818 objfile);
819 /* If it's 0, throw the appropriate exception. */
820 if (lm_addr == 0)
821 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
822 _("TLS load module not found"));
823
824 addr = target->to_get_thread_local_address (target, ptid,
825 lm_addr, offset);
826 }
827 /* If an error occurred, print TLS related messages here. Otherwise,
828 throw the error to some higher catcher. */
829 if (ex.reason < 0)
830 {
831 int objfile_is_library = (objfile->flags & OBJF_SHARED);
832
833 switch (ex.error)
834 {
835 case TLS_NO_LIBRARY_SUPPORT_ERROR:
836 error (_("Cannot find thread-local variables "
837 "in this thread library."));
838 break;
839 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
840 if (objfile_is_library)
841 error (_("Cannot find shared library `%s' in dynamic"
842 " linker's load module list"), objfile_name (objfile));
843 else
844 error (_("Cannot find executable file `%s' in dynamic"
845 " linker's load module list"), objfile_name (objfile));
846 break;
847 case TLS_NOT_ALLOCATED_YET_ERROR:
848 if (objfile_is_library)
849 error (_("The inferior has not yet allocated storage for"
850 " thread-local variables in\n"
851 "the shared library `%s'\n"
852 "for %s"),
853 objfile_name (objfile), target_pid_to_str (ptid));
854 else
855 error (_("The inferior has not yet allocated storage for"
856 " thread-local variables in\n"
857 "the executable `%s'\n"
858 "for %s"),
859 objfile_name (objfile), target_pid_to_str (ptid));
860 break;
861 case TLS_GENERIC_ERROR:
862 if (objfile_is_library)
863 error (_("Cannot find thread-local storage for %s, "
864 "shared library %s:\n%s"),
865 target_pid_to_str (ptid),
866 objfile_name (objfile), ex.message);
867 else
868 error (_("Cannot find thread-local storage for %s, "
869 "executable file %s:\n%s"),
870 target_pid_to_str (ptid),
871 objfile_name (objfile), ex.message);
872 break;
873 default:
874 throw_exception (ex);
875 break;
876 }
877 }
878 }
879 /* It wouldn't be wrong here to try a gdbarch method, too; finding
880 TLS is an ABI-specific thing. But we don't do that yet. */
881 else
882 error (_("Cannot find thread-local variables on this target"));
883
884 return addr;
885 }
886
887 const char *
888 target_xfer_status_to_string (enum target_xfer_status err)
889 {
890 #define CASE(X) case X: return #X
891 switch (err)
892 {
893 CASE(TARGET_XFER_E_IO);
894 CASE(TARGET_XFER_E_UNAVAILABLE);
895 default:
896 return "<unknown>";
897 }
898 #undef CASE
899 };
900
901
902 #undef MIN
903 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
904
905 /* target_read_string -- read a null terminated string, up to LEN bytes,
906 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
907 Set *STRING to a pointer to malloc'd memory containing the data; the caller
908 is responsible for freeing it. Return the number of bytes successfully
909 read. */
910
911 int
912 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
913 {
914 int tlen, offset, i;
915 gdb_byte buf[4];
916 int errcode = 0;
917 char *buffer;
918 int buffer_allocated;
919 char *bufptr;
920 unsigned int nbytes_read = 0;
921
922 gdb_assert (string);
923
924 /* Small for testing. */
925 buffer_allocated = 4;
926 buffer = xmalloc (buffer_allocated);
927 bufptr = buffer;
928
929 while (len > 0)
930 {
931 tlen = MIN (len, 4 - (memaddr & 3));
932 offset = memaddr & 3;
933
934 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
935 if (errcode != 0)
936 {
937 /* The transfer request might have crossed the boundary to an
938 unallocated region of memory. Retry the transfer, requesting
939 a single byte. */
940 tlen = 1;
941 offset = 0;
942 errcode = target_read_memory (memaddr, buf, 1);
943 if (errcode != 0)
944 goto done;
945 }
946
947 if (bufptr - buffer + tlen > buffer_allocated)
948 {
949 unsigned int bytes;
950
951 bytes = bufptr - buffer;
952 buffer_allocated *= 2;
953 buffer = xrealloc (buffer, buffer_allocated);
954 bufptr = buffer + bytes;
955 }
956
957 for (i = 0; i < tlen; i++)
958 {
959 *bufptr++ = buf[i + offset];
960 if (buf[i + offset] == '\000')
961 {
962 nbytes_read += i + 1;
963 goto done;
964 }
965 }
966
967 memaddr += tlen;
968 len -= tlen;
969 nbytes_read += tlen;
970 }
971 done:
972 *string = buffer;
973 if (errnop != NULL)
974 *errnop = errcode;
975 return nbytes_read;
976 }
977
978 struct target_section_table *
979 target_get_section_table (struct target_ops *target)
980 {
981 if (targetdebug)
982 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
983
984 return (*target->to_get_section_table) (target);
985 }
986
987 /* Find a section containing ADDR. */
988
989 struct target_section *
990 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
991 {
992 struct target_section_table *table = target_get_section_table (target);
993 struct target_section *secp;
994
995 if (table == NULL)
996 return NULL;
997
998 for (secp = table->sections; secp < table->sections_end; secp++)
999 {
1000 if (addr >= secp->addr && addr < secp->endaddr)
1001 return secp;
1002 }
1003 return NULL;
1004 }
1005
1006 /* Read memory from the live target, even if currently inspecting a
1007 traceframe. The return is the same as that of target_read. */
1008
1009 static enum target_xfer_status
1010 target_read_live_memory (enum target_object object,
1011 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1012 ULONGEST *xfered_len)
1013 {
1014 enum target_xfer_status ret;
1015 struct cleanup *cleanup;
1016
1017 /* Switch momentarily out of tfind mode so to access live memory.
1018 Note that this must not clear global state, such as the frame
1019 cache, which must still remain valid for the previous traceframe.
1020 We may be _building_ the frame cache at this point. */
1021 cleanup = make_cleanup_restore_traceframe_number ();
1022 set_traceframe_number (-1);
1023
1024 ret = target_xfer_partial (current_target.beneath, object, NULL,
1025 myaddr, NULL, memaddr, len, xfered_len);
1026
1027 do_cleanups (cleanup);
1028 return ret;
1029 }
1030
1031 /* Using the set of read-only target sections of OPS, read live
1032 read-only memory. Note that the actual reads start from the
1033 top-most target again.
1034
1035 For interface/parameters/return description see target.h,
1036 to_xfer_partial. */
1037
1038 static enum target_xfer_status
1039 memory_xfer_live_readonly_partial (struct target_ops *ops,
1040 enum target_object object,
1041 gdb_byte *readbuf, ULONGEST memaddr,
1042 ULONGEST len, ULONGEST *xfered_len)
1043 {
1044 struct target_section *secp;
1045 struct target_section_table *table;
1046
1047 secp = target_section_by_addr (ops, memaddr);
1048 if (secp != NULL
1049 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1050 secp->the_bfd_section)
1051 & SEC_READONLY))
1052 {
1053 struct target_section *p;
1054 ULONGEST memend = memaddr + len;
1055
1056 table = target_get_section_table (ops);
1057
1058 for (p = table->sections; p < table->sections_end; p++)
1059 {
1060 if (memaddr >= p->addr)
1061 {
1062 if (memend <= p->endaddr)
1063 {
1064 /* Entire transfer is within this section. */
1065 return target_read_live_memory (object, memaddr,
1066 readbuf, len, xfered_len);
1067 }
1068 else if (memaddr >= p->endaddr)
1069 {
1070 /* This section ends before the transfer starts. */
1071 continue;
1072 }
1073 else
1074 {
1075 /* This section overlaps the transfer. Just do half. */
1076 len = p->endaddr - memaddr;
1077 return target_read_live_memory (object, memaddr,
1078 readbuf, len, xfered_len);
1079 }
1080 }
1081 }
1082 }
1083
1084 return TARGET_XFER_EOF;
1085 }
1086
1087 /* Read memory from more than one valid target. A core file, for
1088 instance, could have some of memory but delegate other bits to
1089 the target below it. So, we must manually try all targets. */
1090
1091 static enum target_xfer_status
1092 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1093 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1094 ULONGEST *xfered_len)
1095 {
1096 enum target_xfer_status res;
1097
1098 do
1099 {
1100 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1101 readbuf, writebuf, memaddr, len,
1102 xfered_len);
1103 if (res == TARGET_XFER_OK)
1104 break;
1105
1106 /* Stop if the target reports that the memory is not available. */
1107 if (res == TARGET_XFER_E_UNAVAILABLE)
1108 break;
1109
1110 /* We want to continue past core files to executables, but not
1111 past a running target's memory. */
1112 if (ops->to_has_all_memory (ops))
1113 break;
1114
1115 ops = ops->beneath;
1116 }
1117 while (ops != NULL);
1118
1119 return res;
1120 }
1121
1122 /* Perform a partial memory transfer.
1123 For docs see target.h, to_xfer_partial. */
1124
1125 static enum target_xfer_status
1126 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1127 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1128 ULONGEST len, ULONGEST *xfered_len)
1129 {
1130 enum target_xfer_status res;
1131 int reg_len;
1132 struct mem_region *region;
1133 struct inferior *inf;
1134
1135 /* For accesses to unmapped overlay sections, read directly from
1136 files. Must do this first, as MEMADDR may need adjustment. */
1137 if (readbuf != NULL && overlay_debugging)
1138 {
1139 struct obj_section *section = find_pc_overlay (memaddr);
1140
1141 if (pc_in_unmapped_range (memaddr, section))
1142 {
1143 struct target_section_table *table
1144 = target_get_section_table (ops);
1145 const char *section_name = section->the_bfd_section->name;
1146
1147 memaddr = overlay_mapped_address (memaddr, section);
1148 return section_table_xfer_memory_partial (readbuf, writebuf,
1149 memaddr, len, xfered_len,
1150 table->sections,
1151 table->sections_end,
1152 section_name);
1153 }
1154 }
1155
1156 /* Try the executable files, if "trust-readonly-sections" is set. */
1157 if (readbuf != NULL && trust_readonly)
1158 {
1159 struct target_section *secp;
1160 struct target_section_table *table;
1161
1162 secp = target_section_by_addr (ops, memaddr);
1163 if (secp != NULL
1164 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1165 secp->the_bfd_section)
1166 & SEC_READONLY))
1167 {
1168 table = target_get_section_table (ops);
1169 return section_table_xfer_memory_partial (readbuf, writebuf,
1170 memaddr, len, xfered_len,
1171 table->sections,
1172 table->sections_end,
1173 NULL);
1174 }
1175 }
1176
1177 /* If reading unavailable memory in the context of traceframes, and
1178 this address falls within a read-only section, fallback to
1179 reading from live memory. */
1180 if (readbuf != NULL && get_traceframe_number () != -1)
1181 {
1182 VEC(mem_range_s) *available;
1183
1184 /* If we fail to get the set of available memory, then the
1185 target does not support querying traceframe info, and so we
1186 attempt reading from the traceframe anyway (assuming the
1187 target implements the old QTro packet then). */
1188 if (traceframe_available_memory (&available, memaddr, len))
1189 {
1190 struct cleanup *old_chain;
1191
1192 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1193
1194 if (VEC_empty (mem_range_s, available)
1195 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1196 {
1197 /* Don't read into the traceframe's available
1198 memory. */
1199 if (!VEC_empty (mem_range_s, available))
1200 {
1201 LONGEST oldlen = len;
1202
1203 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1204 gdb_assert (len <= oldlen);
1205 }
1206
1207 do_cleanups (old_chain);
1208
1209 /* This goes through the topmost target again. */
1210 res = memory_xfer_live_readonly_partial (ops, object,
1211 readbuf, memaddr,
1212 len, xfered_len);
1213 if (res == TARGET_XFER_OK)
1214 return TARGET_XFER_OK;
1215 else
1216 {
1217 /* No use trying further, we know some memory starting
1218 at MEMADDR isn't available. */
1219 *xfered_len = len;
1220 return TARGET_XFER_E_UNAVAILABLE;
1221 }
1222 }
1223
1224 /* Don't try to read more than how much is available, in
1225 case the target implements the deprecated QTro packet to
1226 cater for older GDBs (the target's knowledge of read-only
1227 sections may be outdated by now). */
1228 len = VEC_index (mem_range_s, available, 0)->length;
1229
1230 do_cleanups (old_chain);
1231 }
1232 }
1233
1234 /* Try GDB's internal data cache. */
1235 region = lookup_mem_region (memaddr);
1236 /* region->hi == 0 means there's no upper bound. */
1237 if (memaddr + len < region->hi || region->hi == 0)
1238 reg_len = len;
1239 else
1240 reg_len = region->hi - memaddr;
1241
1242 switch (region->attrib.mode)
1243 {
1244 case MEM_RO:
1245 if (writebuf != NULL)
1246 return TARGET_XFER_E_IO;
1247 break;
1248
1249 case MEM_WO:
1250 if (readbuf != NULL)
1251 return TARGET_XFER_E_IO;
1252 break;
1253
1254 case MEM_FLASH:
1255 /* We only support writing to flash during "load" for now. */
1256 if (writebuf != NULL)
1257 error (_("Writing to flash memory forbidden in this context"));
1258 break;
1259
1260 case MEM_NONE:
1261 return TARGET_XFER_E_IO;
1262 }
1263
1264 if (!ptid_equal (inferior_ptid, null_ptid))
1265 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1266 else
1267 inf = NULL;
1268
1269 if (inf != NULL
1270 /* The dcache reads whole cache lines; that doesn't play well
1271 with reading from a trace buffer, because reading outside of
1272 the collected memory range fails. */
1273 && get_traceframe_number () == -1
1274 && (region->attrib.cache
1275 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1276 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1277 {
1278 DCACHE *dcache = target_dcache_get_or_init ();
1279 int l;
1280
1281 if (readbuf != NULL)
1282 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1283 else
1284 /* FIXME drow/2006-08-09: If we're going to preserve const
1285 correctness dcache_xfer_memory should take readbuf and
1286 writebuf. */
1287 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1288 reg_len, 1);
1289 if (l <= 0)
1290 return TARGET_XFER_E_IO;
1291 else
1292 {
1293 *xfered_len = (ULONGEST) l;
1294 return TARGET_XFER_OK;
1295 }
1296 }
1297
1298 /* If none of those methods found the memory we wanted, fall back
1299 to a target partial transfer. Normally a single call to
1300 to_xfer_partial is enough; if it doesn't recognize an object
1301 it will call the to_xfer_partial of the next target down.
1302 But for memory this won't do. Memory is the only target
1303 object which can be read from more than one valid target.
1304 A core file, for instance, could have some of memory but
1305 delegate other bits to the target below it. So, we must
1306 manually try all targets. */
1307
1308 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1309 xfered_len);
1310
1311 /* Make sure the cache gets updated no matter what - if we are writing
1312 to the stack. Even if this write is not tagged as such, we still need
1313 to update the cache. */
1314
1315 if (res == TARGET_XFER_OK
1316 && inf != NULL
1317 && writebuf != NULL
1318 && target_dcache_init_p ()
1319 && !region->attrib.cache
1320 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1321 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1322 {
1323 DCACHE *dcache = target_dcache_get ();
1324
1325 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1326 }
1327
1328 /* If we still haven't got anything, return the last error. We
1329 give up. */
1330 return res;
1331 }
1332
1333 /* Perform a partial memory transfer. For docs see target.h,
1334 to_xfer_partial. */
1335
1336 static enum target_xfer_status
1337 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1338 gdb_byte *readbuf, const gdb_byte *writebuf,
1339 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1340 {
1341 enum target_xfer_status res;
1342
1343 /* Zero length requests are ok and require no work. */
1344 if (len == 0)
1345 return TARGET_XFER_EOF;
1346
1347 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1348 breakpoint insns, thus hiding out from higher layers whether
1349 there are software breakpoints inserted in the code stream. */
1350 if (readbuf != NULL)
1351 {
1352 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1353 xfered_len);
1354
1355 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1356 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1357 }
1358 else
1359 {
1360 void *buf;
1361 struct cleanup *old_chain;
1362
1363 /* A large write request is likely to be partially satisfied
1364 by memory_xfer_partial_1. We will continually malloc
1365 and free a copy of the entire write request for breakpoint
1366 shadow handling even though we only end up writing a small
1367 subset of it. Cap writes to 4KB to mitigate this. */
1368 len = min (4096, len);
1369
1370 buf = xmalloc (len);
1371 old_chain = make_cleanup (xfree, buf);
1372 memcpy (buf, writebuf, len);
1373
1374 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1375 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1376 xfered_len);
1377
1378 do_cleanups (old_chain);
1379 }
1380
1381 return res;
1382 }
1383
1384 static void
1385 restore_show_memory_breakpoints (void *arg)
1386 {
1387 show_memory_breakpoints = (uintptr_t) arg;
1388 }
1389
1390 struct cleanup *
1391 make_show_memory_breakpoints_cleanup (int show)
1392 {
1393 int current = show_memory_breakpoints;
1394
1395 show_memory_breakpoints = show;
1396 return make_cleanup (restore_show_memory_breakpoints,
1397 (void *) (uintptr_t) current);
1398 }
1399
1400 /* For docs see target.h, to_xfer_partial. */
1401
1402 enum target_xfer_status
1403 target_xfer_partial (struct target_ops *ops,
1404 enum target_object object, const char *annex,
1405 gdb_byte *readbuf, const gdb_byte *writebuf,
1406 ULONGEST offset, ULONGEST len,
1407 ULONGEST *xfered_len)
1408 {
1409 enum target_xfer_status retval;
1410
1411 gdb_assert (ops->to_xfer_partial != NULL);
1412
1413 /* Transfer is done when LEN is zero. */
1414 if (len == 0)
1415 return TARGET_XFER_EOF;
1416
1417 if (writebuf && !may_write_memory)
1418 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1419 core_addr_to_string_nz (offset), plongest (len));
1420
1421 *xfered_len = 0;
1422
1423 /* If this is a memory transfer, let the memory-specific code
1424 have a look at it instead. Memory transfers are more
1425 complicated. */
1426 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1427 || object == TARGET_OBJECT_CODE_MEMORY)
1428 retval = memory_xfer_partial (ops, object, readbuf,
1429 writebuf, offset, len, xfered_len);
1430 else if (object == TARGET_OBJECT_RAW_MEMORY)
1431 {
1432 /* Request the normal memory object from other layers. */
1433 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1434 xfered_len);
1435 }
1436 else
1437 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1438 writebuf, offset, len, xfered_len);
1439
1440 if (targetdebug)
1441 {
1442 const unsigned char *myaddr = NULL;
1443
1444 fprintf_unfiltered (gdb_stdlog,
1445 "%s:target_xfer_partial "
1446 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1447 ops->to_shortname,
1448 (int) object,
1449 (annex ? annex : "(null)"),
1450 host_address_to_string (readbuf),
1451 host_address_to_string (writebuf),
1452 core_addr_to_string_nz (offset),
1453 pulongest (len), retval,
1454 pulongest (*xfered_len));
1455
1456 if (readbuf)
1457 myaddr = readbuf;
1458 if (writebuf)
1459 myaddr = writebuf;
1460 if (retval == TARGET_XFER_OK && myaddr != NULL)
1461 {
1462 int i;
1463
1464 fputs_unfiltered (", bytes =", gdb_stdlog);
1465 for (i = 0; i < *xfered_len; i++)
1466 {
1467 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1468 {
1469 if (targetdebug < 2 && i > 0)
1470 {
1471 fprintf_unfiltered (gdb_stdlog, " ...");
1472 break;
1473 }
1474 fprintf_unfiltered (gdb_stdlog, "\n");
1475 }
1476
1477 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1478 }
1479 }
1480
1481 fputc_unfiltered ('\n', gdb_stdlog);
1482 }
1483
1484 /* Check implementations of to_xfer_partial update *XFERED_LEN
1485 properly. Do assertion after printing debug messages, so that we
1486 can find more clues on assertion failure from debugging messages. */
1487 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1488 gdb_assert (*xfered_len > 0);
1489
1490 return retval;
1491 }
1492
1493 /* Read LEN bytes of target memory at address MEMADDR, placing the
1494 results in GDB's memory at MYADDR. Returns either 0 for success or
1495 TARGET_XFER_E_IO if any error occurs.
1496
1497 If an error occurs, no guarantee is made about the contents of the data at
1498 MYADDR. In particular, the caller should not depend upon partial reads
1499 filling the buffer with good data. There is no way for the caller to know
1500 how much good data might have been transfered anyway. Callers that can
1501 deal with partial reads should call target_read (which will retry until
1502 it makes no progress, and then return how much was transferred). */
1503
1504 int
1505 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1506 {
1507 /* Dispatch to the topmost target, not the flattened current_target.
1508 Memory accesses check target->to_has_(all_)memory, and the
1509 flattened target doesn't inherit those. */
1510 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1511 myaddr, memaddr, len) == len)
1512 return 0;
1513 else
1514 return TARGET_XFER_E_IO;
1515 }
1516
1517 /* Like target_read_memory, but specify explicitly that this is a read
1518 from the target's raw memory. That is, this read bypasses the
1519 dcache, breakpoint shadowing, etc. */
1520
1521 int
1522 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1523 {
1524 /* See comment in target_read_memory about why the request starts at
1525 current_target.beneath. */
1526 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1527 myaddr, memaddr, len) == len)
1528 return 0;
1529 else
1530 return TARGET_XFER_E_IO;
1531 }
1532
1533 /* Like target_read_memory, but specify explicitly that this is a read from
1534 the target's stack. This may trigger different cache behavior. */
1535
1536 int
1537 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1538 {
1539 /* See comment in target_read_memory about why the request starts at
1540 current_target.beneath. */
1541 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1542 myaddr, memaddr, len) == len)
1543 return 0;
1544 else
1545 return TARGET_XFER_E_IO;
1546 }
1547
1548 /* Like target_read_memory, but specify explicitly that this is a read from
1549 the target's code. This may trigger different cache behavior. */
1550
1551 int
1552 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1553 {
1554 /* See comment in target_read_memory about why the request starts at
1555 current_target.beneath. */
1556 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1557 myaddr, memaddr, len) == len)
1558 return 0;
1559 else
1560 return TARGET_XFER_E_IO;
1561 }
1562
1563 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1564 Returns either 0 for success or TARGET_XFER_E_IO if any
1565 error occurs. If an error occurs, no guarantee is made about how
1566 much data got written. Callers that can deal with partial writes
1567 should call target_write. */
1568
1569 int
1570 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1571 {
1572 /* See comment in target_read_memory about why the request starts at
1573 current_target.beneath. */
1574 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1575 myaddr, memaddr, len) == len)
1576 return 0;
1577 else
1578 return TARGET_XFER_E_IO;
1579 }
1580
1581 /* Write LEN bytes from MYADDR to target raw memory at address
1582 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1583 if any error occurs. If an error occurs, no guarantee is made
1584 about how much data got written. Callers that can deal with
1585 partial writes should call target_write. */
1586
1587 int
1588 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1589 {
1590 /* See comment in target_read_memory about why the request starts at
1591 current_target.beneath. */
1592 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1593 myaddr, memaddr, len) == len)
1594 return 0;
1595 else
1596 return TARGET_XFER_E_IO;
1597 }
1598
1599 /* Fetch the target's memory map. */
1600
1601 VEC(mem_region_s) *
1602 target_memory_map (void)
1603 {
1604 VEC(mem_region_s) *result;
1605 struct mem_region *last_one, *this_one;
1606 int ix;
1607 struct target_ops *t;
1608
1609 if (targetdebug)
1610 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1611
1612 result = current_target.to_memory_map (&current_target);
1613 if (result == NULL)
1614 return NULL;
1615
1616 qsort (VEC_address (mem_region_s, result),
1617 VEC_length (mem_region_s, result),
1618 sizeof (struct mem_region), mem_region_cmp);
1619
1620 /* Check that regions do not overlap. Simultaneously assign
1621 a numbering for the "mem" commands to use to refer to
1622 each region. */
1623 last_one = NULL;
1624 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1625 {
1626 this_one->number = ix;
1627
1628 if (last_one && last_one->hi > this_one->lo)
1629 {
1630 warning (_("Overlapping regions in memory map: ignoring"));
1631 VEC_free (mem_region_s, result);
1632 return NULL;
1633 }
1634 last_one = this_one;
1635 }
1636
1637 return result;
1638 }
1639
1640 void
1641 target_flash_erase (ULONGEST address, LONGEST length)
1642 {
1643 if (targetdebug)
1644 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1645 hex_string (address), phex (length, 0));
1646 current_target.to_flash_erase (&current_target, address, length);
1647 }
1648
1649 void
1650 target_flash_done (void)
1651 {
1652 if (targetdebug)
1653 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1654 current_target.to_flash_done (&current_target);
1655 }
1656
1657 static void
1658 show_trust_readonly (struct ui_file *file, int from_tty,
1659 struct cmd_list_element *c, const char *value)
1660 {
1661 fprintf_filtered (file,
1662 _("Mode for reading from readonly sections is %s.\n"),
1663 value);
1664 }
1665
1666 /* More generic transfers. */
1667
1668 static enum target_xfer_status
1669 default_xfer_partial (struct target_ops *ops, enum target_object object,
1670 const char *annex, gdb_byte *readbuf,
1671 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
1672 ULONGEST *xfered_len)
1673 {
1674 if (object == TARGET_OBJECT_MEMORY
1675 && ops->deprecated_xfer_memory != NULL)
1676 /* If available, fall back to the target's
1677 "deprecated_xfer_memory" method. */
1678 {
1679 int xfered = -1;
1680
1681 errno = 0;
1682 if (writebuf != NULL)
1683 {
1684 void *buffer = xmalloc (len);
1685 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1686
1687 memcpy (buffer, writebuf, len);
1688 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1689 1/*write*/, NULL, ops);
1690 do_cleanups (cleanup);
1691 }
1692 if (readbuf != NULL)
1693 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1694 0/*read*/, NULL, ops);
1695 if (xfered > 0)
1696 {
1697 *xfered_len = (ULONGEST) xfered;
1698 return TARGET_XFER_E_IO;
1699 }
1700 else if (xfered == 0 && errno == 0)
1701 /* "deprecated_xfer_memory" uses 0, cross checked against
1702 ERRNO as one indication of an error. */
1703 return TARGET_XFER_EOF;
1704 else
1705 return TARGET_XFER_E_IO;
1706 }
1707 else
1708 {
1709 gdb_assert (ops->beneath != NULL);
1710 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1711 readbuf, writebuf, offset, len,
1712 xfered_len);
1713 }
1714 }
1715
1716 /* Target vector read/write partial wrapper functions. */
1717
1718 static enum target_xfer_status
1719 target_read_partial (struct target_ops *ops,
1720 enum target_object object,
1721 const char *annex, gdb_byte *buf,
1722 ULONGEST offset, ULONGEST len,
1723 ULONGEST *xfered_len)
1724 {
1725 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1726 xfered_len);
1727 }
1728
1729 static enum target_xfer_status
1730 target_write_partial (struct target_ops *ops,
1731 enum target_object object,
1732 const char *annex, const gdb_byte *buf,
1733 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1734 {
1735 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1736 xfered_len);
1737 }
1738
1739 /* Wrappers to perform the full transfer. */
1740
1741 /* For docs on target_read see target.h. */
1742
1743 LONGEST
1744 target_read (struct target_ops *ops,
1745 enum target_object object,
1746 const char *annex, gdb_byte *buf,
1747 ULONGEST offset, LONGEST len)
1748 {
1749 LONGEST xfered = 0;
1750
1751 while (xfered < len)
1752 {
1753 ULONGEST xfered_len;
1754 enum target_xfer_status status;
1755
1756 status = target_read_partial (ops, object, annex,
1757 (gdb_byte *) buf + xfered,
1758 offset + xfered, len - xfered,
1759 &xfered_len);
1760
1761 /* Call an observer, notifying them of the xfer progress? */
1762 if (status == TARGET_XFER_EOF)
1763 return xfered;
1764 else if (status == TARGET_XFER_OK)
1765 {
1766 xfered += xfered_len;
1767 QUIT;
1768 }
1769 else
1770 return -1;
1771
1772 }
1773 return len;
1774 }
1775
1776 /* Assuming that the entire [begin, end) range of memory cannot be
1777 read, try to read whatever subrange is possible to read.
1778
1779 The function returns, in RESULT, either zero or one memory block.
1780 If there's a readable subrange at the beginning, it is completely
1781 read and returned. Any further readable subrange will not be read.
1782 Otherwise, if there's a readable subrange at the end, it will be
1783 completely read and returned. Any readable subranges before it
1784 (obviously, not starting at the beginning), will be ignored. In
1785 other cases -- either no readable subrange, or readable subrange(s)
1786 that is neither at the beginning, or end, nothing is returned.
1787
1788 The purpose of this function is to handle a read across a boundary
1789 of accessible memory in a case when memory map is not available.
1790 The above restrictions are fine for this case, but will give
1791 incorrect results if the memory is 'patchy'. However, supporting
1792 'patchy' memory would require trying to read every single byte,
1793 and it seems unacceptable solution. Explicit memory map is
1794 recommended for this case -- and target_read_memory_robust will
1795 take care of reading multiple ranges then. */
1796
1797 static void
1798 read_whatever_is_readable (struct target_ops *ops,
1799 ULONGEST begin, ULONGEST end,
1800 VEC(memory_read_result_s) **result)
1801 {
1802 gdb_byte *buf = xmalloc (end - begin);
1803 ULONGEST current_begin = begin;
1804 ULONGEST current_end = end;
1805 int forward;
1806 memory_read_result_s r;
1807 ULONGEST xfered_len;
1808
1809 /* If we previously failed to read 1 byte, nothing can be done here. */
1810 if (end - begin <= 1)
1811 {
1812 xfree (buf);
1813 return;
1814 }
1815
1816 /* Check that either first or the last byte is readable, and give up
1817 if not. This heuristic is meant to permit reading accessible memory
1818 at the boundary of accessible region. */
1819 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1820 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
1821 {
1822 forward = 1;
1823 ++current_begin;
1824 }
1825 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1826 buf + (end-begin) - 1, end - 1, 1,
1827 &xfered_len) == TARGET_XFER_OK)
1828 {
1829 forward = 0;
1830 --current_end;
1831 }
1832 else
1833 {
1834 xfree (buf);
1835 return;
1836 }
1837
1838 /* Loop invariant is that the [current_begin, current_end) was previously
1839 found to be not readable as a whole.
1840
1841 Note loop condition -- if the range has 1 byte, we can't divide the range
1842 so there's no point trying further. */
1843 while (current_end - current_begin > 1)
1844 {
1845 ULONGEST first_half_begin, first_half_end;
1846 ULONGEST second_half_begin, second_half_end;
1847 LONGEST xfer;
1848 ULONGEST middle = current_begin + (current_end - current_begin)/2;
1849
1850 if (forward)
1851 {
1852 first_half_begin = current_begin;
1853 first_half_end = middle;
1854 second_half_begin = middle;
1855 second_half_end = current_end;
1856 }
1857 else
1858 {
1859 first_half_begin = middle;
1860 first_half_end = current_end;
1861 second_half_begin = current_begin;
1862 second_half_end = middle;
1863 }
1864
1865 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1866 buf + (first_half_begin - begin),
1867 first_half_begin,
1868 first_half_end - first_half_begin);
1869
1870 if (xfer == first_half_end - first_half_begin)
1871 {
1872 /* This half reads up fine. So, the error must be in the
1873 other half. */
1874 current_begin = second_half_begin;
1875 current_end = second_half_end;
1876 }
1877 else
1878 {
1879 /* This half is not readable. Because we've tried one byte, we
1880 know some part of this half if actually redable. Go to the next
1881 iteration to divide again and try to read.
1882
1883 We don't handle the other half, because this function only tries
1884 to read a single readable subrange. */
1885 current_begin = first_half_begin;
1886 current_end = first_half_end;
1887 }
1888 }
1889
1890 if (forward)
1891 {
1892 /* The [begin, current_begin) range has been read. */
1893 r.begin = begin;
1894 r.end = current_begin;
1895 r.data = buf;
1896 }
1897 else
1898 {
1899 /* The [current_end, end) range has been read. */
1900 LONGEST rlen = end - current_end;
1901
1902 r.data = xmalloc (rlen);
1903 memcpy (r.data, buf + current_end - begin, rlen);
1904 r.begin = current_end;
1905 r.end = end;
1906 xfree (buf);
1907 }
1908 VEC_safe_push(memory_read_result_s, (*result), &r);
1909 }
1910
1911 void
1912 free_memory_read_result_vector (void *x)
1913 {
1914 VEC(memory_read_result_s) *v = x;
1915 memory_read_result_s *current;
1916 int ix;
1917
1918 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
1919 {
1920 xfree (current->data);
1921 }
1922 VEC_free (memory_read_result_s, v);
1923 }
1924
1925 VEC(memory_read_result_s) *
1926 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
1927 {
1928 VEC(memory_read_result_s) *result = 0;
1929
1930 LONGEST xfered = 0;
1931 while (xfered < len)
1932 {
1933 struct mem_region *region = lookup_mem_region (offset + xfered);
1934 LONGEST rlen;
1935
1936 /* If there is no explicit region, a fake one should be created. */
1937 gdb_assert (region);
1938
1939 if (region->hi == 0)
1940 rlen = len - xfered;
1941 else
1942 rlen = region->hi - offset;
1943
1944 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
1945 {
1946 /* Cannot read this region. Note that we can end up here only
1947 if the region is explicitly marked inaccessible, or
1948 'inaccessible-by-default' is in effect. */
1949 xfered += rlen;
1950 }
1951 else
1952 {
1953 LONGEST to_read = min (len - xfered, rlen);
1954 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
1955
1956 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1957 (gdb_byte *) buffer,
1958 offset + xfered, to_read);
1959 /* Call an observer, notifying them of the xfer progress? */
1960 if (xfer <= 0)
1961 {
1962 /* Got an error reading full chunk. See if maybe we can read
1963 some subrange. */
1964 xfree (buffer);
1965 read_whatever_is_readable (ops, offset + xfered,
1966 offset + xfered + to_read, &result);
1967 xfered += to_read;
1968 }
1969 else
1970 {
1971 struct memory_read_result r;
1972 r.data = buffer;
1973 r.begin = offset + xfered;
1974 r.end = r.begin + xfer;
1975 VEC_safe_push (memory_read_result_s, result, &r);
1976 xfered += xfer;
1977 }
1978 QUIT;
1979 }
1980 }
1981 return result;
1982 }
1983
1984
1985 /* An alternative to target_write with progress callbacks. */
1986
1987 LONGEST
1988 target_write_with_progress (struct target_ops *ops,
1989 enum target_object object,
1990 const char *annex, const gdb_byte *buf,
1991 ULONGEST offset, LONGEST len,
1992 void (*progress) (ULONGEST, void *), void *baton)
1993 {
1994 LONGEST xfered = 0;
1995
1996 /* Give the progress callback a chance to set up. */
1997 if (progress)
1998 (*progress) (0, baton);
1999
2000 while (xfered < len)
2001 {
2002 ULONGEST xfered_len;
2003 enum target_xfer_status status;
2004
2005 status = target_write_partial (ops, object, annex,
2006 (gdb_byte *) buf + xfered,
2007 offset + xfered, len - xfered,
2008 &xfered_len);
2009
2010 if (status == TARGET_XFER_EOF)
2011 return xfered;
2012 if (TARGET_XFER_STATUS_ERROR_P (status))
2013 return -1;
2014
2015 gdb_assert (status == TARGET_XFER_OK);
2016 if (progress)
2017 (*progress) (xfered_len, baton);
2018
2019 xfered += xfered_len;
2020 QUIT;
2021 }
2022 return len;
2023 }
2024
2025 /* For docs on target_write see target.h. */
2026
2027 LONGEST
2028 target_write (struct target_ops *ops,
2029 enum target_object object,
2030 const char *annex, const gdb_byte *buf,
2031 ULONGEST offset, LONGEST len)
2032 {
2033 return target_write_with_progress (ops, object, annex, buf, offset, len,
2034 NULL, NULL);
2035 }
2036
2037 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2038 the size of the transferred data. PADDING additional bytes are
2039 available in *BUF_P. This is a helper function for
2040 target_read_alloc; see the declaration of that function for more
2041 information. */
2042
2043 static LONGEST
2044 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2045 const char *annex, gdb_byte **buf_p, int padding)
2046 {
2047 size_t buf_alloc, buf_pos;
2048 gdb_byte *buf;
2049
2050 /* This function does not have a length parameter; it reads the
2051 entire OBJECT). Also, it doesn't support objects fetched partly
2052 from one target and partly from another (in a different stratum,
2053 e.g. a core file and an executable). Both reasons make it
2054 unsuitable for reading memory. */
2055 gdb_assert (object != TARGET_OBJECT_MEMORY);
2056
2057 /* Start by reading up to 4K at a time. The target will throttle
2058 this number down if necessary. */
2059 buf_alloc = 4096;
2060 buf = xmalloc (buf_alloc);
2061 buf_pos = 0;
2062 while (1)
2063 {
2064 ULONGEST xfered_len;
2065 enum target_xfer_status status;
2066
2067 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2068 buf_pos, buf_alloc - buf_pos - padding,
2069 &xfered_len);
2070
2071 if (status == TARGET_XFER_EOF)
2072 {
2073 /* Read all there was. */
2074 if (buf_pos == 0)
2075 xfree (buf);
2076 else
2077 *buf_p = buf;
2078 return buf_pos;
2079 }
2080 else if (status != TARGET_XFER_OK)
2081 {
2082 /* An error occurred. */
2083 xfree (buf);
2084 return TARGET_XFER_E_IO;
2085 }
2086
2087 buf_pos += xfered_len;
2088
2089 /* If the buffer is filling up, expand it. */
2090 if (buf_alloc < buf_pos * 2)
2091 {
2092 buf_alloc *= 2;
2093 buf = xrealloc (buf, buf_alloc);
2094 }
2095
2096 QUIT;
2097 }
2098 }
2099
2100 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2101 the size of the transferred data. See the declaration in "target.h"
2102 function for more information about the return value. */
2103
2104 LONGEST
2105 target_read_alloc (struct target_ops *ops, enum target_object object,
2106 const char *annex, gdb_byte **buf_p)
2107 {
2108 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2109 }
2110
2111 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2112 returned as a string, allocated using xmalloc. If an error occurs
2113 or the transfer is unsupported, NULL is returned. Empty objects
2114 are returned as allocated but empty strings. A warning is issued
2115 if the result contains any embedded NUL bytes. */
2116
2117 char *
2118 target_read_stralloc (struct target_ops *ops, enum target_object object,
2119 const char *annex)
2120 {
2121 gdb_byte *buffer;
2122 char *bufstr;
2123 LONGEST i, transferred;
2124
2125 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2126 bufstr = (char *) buffer;
2127
2128 if (transferred < 0)
2129 return NULL;
2130
2131 if (transferred == 0)
2132 return xstrdup ("");
2133
2134 bufstr[transferred] = 0;
2135
2136 /* Check for embedded NUL bytes; but allow trailing NULs. */
2137 for (i = strlen (bufstr); i < transferred; i++)
2138 if (bufstr[i] != 0)
2139 {
2140 warning (_("target object %d, annex %s, "
2141 "contained unexpected null characters"),
2142 (int) object, annex ? annex : "(none)");
2143 break;
2144 }
2145
2146 return bufstr;
2147 }
2148
2149 /* Memory transfer methods. */
2150
2151 void
2152 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2153 LONGEST len)
2154 {
2155 /* This method is used to read from an alternate, non-current
2156 target. This read must bypass the overlay support (as symbols
2157 don't match this target), and GDB's internal cache (wrong cache
2158 for this target). */
2159 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2160 != len)
2161 memory_error (TARGET_XFER_E_IO, addr);
2162 }
2163
2164 ULONGEST
2165 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2166 int len, enum bfd_endian byte_order)
2167 {
2168 gdb_byte buf[sizeof (ULONGEST)];
2169
2170 gdb_assert (len <= sizeof (buf));
2171 get_target_memory (ops, addr, buf, len);
2172 return extract_unsigned_integer (buf, len, byte_order);
2173 }
2174
2175 /* See target.h. */
2176
2177 int
2178 target_insert_breakpoint (struct gdbarch *gdbarch,
2179 struct bp_target_info *bp_tgt)
2180 {
2181 if (!may_insert_breakpoints)
2182 {
2183 warning (_("May not insert breakpoints"));
2184 return 1;
2185 }
2186
2187 return current_target.to_insert_breakpoint (&current_target,
2188 gdbarch, bp_tgt);
2189 }
2190
2191 /* See target.h. */
2192
2193 int
2194 target_remove_breakpoint (struct gdbarch *gdbarch,
2195 struct bp_target_info *bp_tgt)
2196 {
2197 /* This is kind of a weird case to handle, but the permission might
2198 have been changed after breakpoints were inserted - in which case
2199 we should just take the user literally and assume that any
2200 breakpoints should be left in place. */
2201 if (!may_insert_breakpoints)
2202 {
2203 warning (_("May not remove breakpoints"));
2204 return 1;
2205 }
2206
2207 return current_target.to_remove_breakpoint (&current_target,
2208 gdbarch, bp_tgt);
2209 }
2210
2211 static void
2212 target_info (char *args, int from_tty)
2213 {
2214 struct target_ops *t;
2215 int has_all_mem = 0;
2216
2217 if (symfile_objfile != NULL)
2218 printf_unfiltered (_("Symbols from \"%s\".\n"),
2219 objfile_name (symfile_objfile));
2220
2221 for (t = target_stack; t != NULL; t = t->beneath)
2222 {
2223 if (!(*t->to_has_memory) (t))
2224 continue;
2225
2226 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2227 continue;
2228 if (has_all_mem)
2229 printf_unfiltered (_("\tWhile running this, "
2230 "GDB does not access memory from...\n"));
2231 printf_unfiltered ("%s:\n", t->to_longname);
2232 (t->to_files_info) (t);
2233 has_all_mem = (*t->to_has_all_memory) (t);
2234 }
2235 }
2236
2237 /* This function is called before any new inferior is created, e.g.
2238 by running a program, attaching, or connecting to a target.
2239 It cleans up any state from previous invocations which might
2240 change between runs. This is a subset of what target_preopen
2241 resets (things which might change between targets). */
2242
2243 void
2244 target_pre_inferior (int from_tty)
2245 {
2246 /* Clear out solib state. Otherwise the solib state of the previous
2247 inferior might have survived and is entirely wrong for the new
2248 target. This has been observed on GNU/Linux using glibc 2.3. How
2249 to reproduce:
2250
2251 bash$ ./foo&
2252 [1] 4711
2253 bash$ ./foo&
2254 [1] 4712
2255 bash$ gdb ./foo
2256 [...]
2257 (gdb) attach 4711
2258 (gdb) detach
2259 (gdb) attach 4712
2260 Cannot access memory at address 0xdeadbeef
2261 */
2262
2263 /* In some OSs, the shared library list is the same/global/shared
2264 across inferiors. If code is shared between processes, so are
2265 memory regions and features. */
2266 if (!gdbarch_has_global_solist (target_gdbarch ()))
2267 {
2268 no_shared_libraries (NULL, from_tty);
2269
2270 invalidate_target_mem_regions ();
2271
2272 target_clear_description ();
2273 }
2274
2275 agent_capability_invalidate ();
2276 }
2277
2278 /* Callback for iterate_over_inferiors. Gets rid of the given
2279 inferior. */
2280
2281 static int
2282 dispose_inferior (struct inferior *inf, void *args)
2283 {
2284 struct thread_info *thread;
2285
2286 thread = any_thread_of_process (inf->pid);
2287 if (thread)
2288 {
2289 switch_to_thread (thread->ptid);
2290
2291 /* Core inferiors actually should be detached, not killed. */
2292 if (target_has_execution)
2293 target_kill ();
2294 else
2295 target_detach (NULL, 0);
2296 }
2297
2298 return 0;
2299 }
2300
2301 /* This is to be called by the open routine before it does
2302 anything. */
2303
2304 void
2305 target_preopen (int from_tty)
2306 {
2307 dont_repeat ();
2308
2309 if (have_inferiors ())
2310 {
2311 if (!from_tty
2312 || !have_live_inferiors ()
2313 || query (_("A program is being debugged already. Kill it? ")))
2314 iterate_over_inferiors (dispose_inferior, NULL);
2315 else
2316 error (_("Program not killed."));
2317 }
2318
2319 /* Calling target_kill may remove the target from the stack. But if
2320 it doesn't (which seems like a win for UDI), remove it now. */
2321 /* Leave the exec target, though. The user may be switching from a
2322 live process to a core of the same program. */
2323 pop_all_targets_above (file_stratum);
2324
2325 target_pre_inferior (from_tty);
2326 }
2327
2328 /* Detach a target after doing deferred register stores. */
2329
2330 void
2331 target_detach (const char *args, int from_tty)
2332 {
2333 struct target_ops* t;
2334
2335 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2336 /* Don't remove global breakpoints here. They're removed on
2337 disconnection from the target. */
2338 ;
2339 else
2340 /* If we're in breakpoints-always-inserted mode, have to remove
2341 them before detaching. */
2342 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2343
2344 prepare_for_detach ();
2345
2346 current_target.to_detach (&current_target, args, from_tty);
2347 if (targetdebug)
2348 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2349 args, from_tty);
2350 }
2351
2352 void
2353 target_disconnect (char *args, int from_tty)
2354 {
2355 /* If we're in breakpoints-always-inserted mode or if breakpoints
2356 are global across processes, we have to remove them before
2357 disconnecting. */
2358 remove_breakpoints ();
2359
2360 if (targetdebug)
2361 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2362 args, from_tty);
2363 current_target.to_disconnect (&current_target, args, from_tty);
2364 }
2365
2366 ptid_t
2367 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2368 {
2369 struct target_ops *t;
2370 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2371 status, options);
2372
2373 if (targetdebug)
2374 {
2375 char *status_string;
2376 char *options_string;
2377
2378 status_string = target_waitstatus_to_string (status);
2379 options_string = target_options_to_string (options);
2380 fprintf_unfiltered (gdb_stdlog,
2381 "target_wait (%d, status, options={%s})"
2382 " = %d, %s\n",
2383 ptid_get_pid (ptid), options_string,
2384 ptid_get_pid (retval), status_string);
2385 xfree (status_string);
2386 xfree (options_string);
2387 }
2388
2389 return retval;
2390 }
2391
2392 char *
2393 target_pid_to_str (ptid_t ptid)
2394 {
2395 return (*current_target.to_pid_to_str) (&current_target, ptid);
2396 }
2397
2398 char *
2399 target_thread_name (struct thread_info *info)
2400 {
2401 return current_target.to_thread_name (&current_target, info);
2402 }
2403
2404 void
2405 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2406 {
2407 struct target_ops *t;
2408
2409 target_dcache_invalidate ();
2410
2411 current_target.to_resume (&current_target, ptid, step, signal);
2412 if (targetdebug)
2413 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2414 ptid_get_pid (ptid),
2415 step ? "step" : "continue",
2416 gdb_signal_to_name (signal));
2417
2418 registers_changed_ptid (ptid);
2419 set_executing (ptid, 1);
2420 set_running (ptid, 1);
2421 clear_inline_frame_state (ptid);
2422 }
2423
2424 void
2425 target_pass_signals (int numsigs, unsigned char *pass_signals)
2426 {
2427 if (targetdebug)
2428 {
2429 int i;
2430
2431 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2432 numsigs);
2433
2434 for (i = 0; i < numsigs; i++)
2435 if (pass_signals[i])
2436 fprintf_unfiltered (gdb_stdlog, " %s",
2437 gdb_signal_to_name (i));
2438
2439 fprintf_unfiltered (gdb_stdlog, " })\n");
2440 }
2441
2442 (*current_target.to_pass_signals) (&current_target, numsigs, pass_signals);
2443 }
2444
2445 void
2446 target_program_signals (int numsigs, unsigned char *program_signals)
2447 {
2448 if (targetdebug)
2449 {
2450 int i;
2451
2452 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2453 numsigs);
2454
2455 for (i = 0; i < numsigs; i++)
2456 if (program_signals[i])
2457 fprintf_unfiltered (gdb_stdlog, " %s",
2458 gdb_signal_to_name (i));
2459
2460 fprintf_unfiltered (gdb_stdlog, " })\n");
2461 }
2462
2463 (*current_target.to_program_signals) (&current_target,
2464 numsigs, program_signals);
2465 }
2466
2467 static int
2468 default_follow_fork (struct target_ops *self, int follow_child,
2469 int detach_fork)
2470 {
2471 /* Some target returned a fork event, but did not know how to follow it. */
2472 internal_error (__FILE__, __LINE__,
2473 _("could not find a target to follow fork"));
2474 }
2475
2476 /* Look through the list of possible targets for a target that can
2477 follow forks. */
2478
2479 int
2480 target_follow_fork (int follow_child, int detach_fork)
2481 {
2482 int retval = current_target.to_follow_fork (&current_target,
2483 follow_child, detach_fork);
2484
2485 if (targetdebug)
2486 fprintf_unfiltered (gdb_stdlog,
2487 "target_follow_fork (%d, %d) = %d\n",
2488 follow_child, detach_fork, retval);
2489 return retval;
2490 }
2491
2492 static void
2493 default_mourn_inferior (struct target_ops *self)
2494 {
2495 internal_error (__FILE__, __LINE__,
2496 _("could not find a target to follow mourn inferior"));
2497 }
2498
2499 void
2500 target_mourn_inferior (void)
2501 {
2502 current_target.to_mourn_inferior (&current_target);
2503 if (targetdebug)
2504 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2505
2506 /* We no longer need to keep handles on any of the object files.
2507 Make sure to release them to avoid unnecessarily locking any
2508 of them while we're not actually debugging. */
2509 bfd_cache_close_all ();
2510 }
2511
2512 /* Look for a target which can describe architectural features, starting
2513 from TARGET. If we find one, return its description. */
2514
2515 const struct target_desc *
2516 target_read_description (struct target_ops *target)
2517 {
2518 return target->to_read_description (target);
2519 }
2520
2521 /* This implements a basic search of memory, reading target memory and
2522 performing the search here (as opposed to performing the search in on the
2523 target side with, for example, gdbserver). */
2524
2525 int
2526 simple_search_memory (struct target_ops *ops,
2527 CORE_ADDR start_addr, ULONGEST search_space_len,
2528 const gdb_byte *pattern, ULONGEST pattern_len,
2529 CORE_ADDR *found_addrp)
2530 {
2531 /* NOTE: also defined in find.c testcase. */
2532 #define SEARCH_CHUNK_SIZE 16000
2533 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2534 /* Buffer to hold memory contents for searching. */
2535 gdb_byte *search_buf;
2536 unsigned search_buf_size;
2537 struct cleanup *old_cleanups;
2538
2539 search_buf_size = chunk_size + pattern_len - 1;
2540
2541 /* No point in trying to allocate a buffer larger than the search space. */
2542 if (search_space_len < search_buf_size)
2543 search_buf_size = search_space_len;
2544
2545 search_buf = malloc (search_buf_size);
2546 if (search_buf == NULL)
2547 error (_("Unable to allocate memory to perform the search."));
2548 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2549
2550 /* Prime the search buffer. */
2551
2552 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2553 search_buf, start_addr, search_buf_size) != search_buf_size)
2554 {
2555 warning (_("Unable to access %s bytes of target "
2556 "memory at %s, halting search."),
2557 pulongest (search_buf_size), hex_string (start_addr));
2558 do_cleanups (old_cleanups);
2559 return -1;
2560 }
2561
2562 /* Perform the search.
2563
2564 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2565 When we've scanned N bytes we copy the trailing bytes to the start and
2566 read in another N bytes. */
2567
2568 while (search_space_len >= pattern_len)
2569 {
2570 gdb_byte *found_ptr;
2571 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2572
2573 found_ptr = memmem (search_buf, nr_search_bytes,
2574 pattern, pattern_len);
2575
2576 if (found_ptr != NULL)
2577 {
2578 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2579
2580 *found_addrp = found_addr;
2581 do_cleanups (old_cleanups);
2582 return 1;
2583 }
2584
2585 /* Not found in this chunk, skip to next chunk. */
2586
2587 /* Don't let search_space_len wrap here, it's unsigned. */
2588 if (search_space_len >= chunk_size)
2589 search_space_len -= chunk_size;
2590 else
2591 search_space_len = 0;
2592
2593 if (search_space_len >= pattern_len)
2594 {
2595 unsigned keep_len = search_buf_size - chunk_size;
2596 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2597 int nr_to_read;
2598
2599 /* Copy the trailing part of the previous iteration to the front
2600 of the buffer for the next iteration. */
2601 gdb_assert (keep_len == pattern_len - 1);
2602 memcpy (search_buf, search_buf + chunk_size, keep_len);
2603
2604 nr_to_read = min (search_space_len - keep_len, chunk_size);
2605
2606 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2607 search_buf + keep_len, read_addr,
2608 nr_to_read) != nr_to_read)
2609 {
2610 warning (_("Unable to access %s bytes of target "
2611 "memory at %s, halting search."),
2612 plongest (nr_to_read),
2613 hex_string (read_addr));
2614 do_cleanups (old_cleanups);
2615 return -1;
2616 }
2617
2618 start_addr += chunk_size;
2619 }
2620 }
2621
2622 /* Not found. */
2623
2624 do_cleanups (old_cleanups);
2625 return 0;
2626 }
2627
2628 /* Default implementation of memory-searching. */
2629
2630 static int
2631 default_search_memory (struct target_ops *self,
2632 CORE_ADDR start_addr, ULONGEST search_space_len,
2633 const gdb_byte *pattern, ULONGEST pattern_len,
2634 CORE_ADDR *found_addrp)
2635 {
2636 /* Start over from the top of the target stack. */
2637 return simple_search_memory (current_target.beneath,
2638 start_addr, search_space_len,
2639 pattern, pattern_len, found_addrp);
2640 }
2641
2642 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2643 sequence of bytes in PATTERN with length PATTERN_LEN.
2644
2645 The result is 1 if found, 0 if not found, and -1 if there was an error
2646 requiring halting of the search (e.g. memory read error).
2647 If the pattern is found the address is recorded in FOUND_ADDRP. */
2648
2649 int
2650 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2651 const gdb_byte *pattern, ULONGEST pattern_len,
2652 CORE_ADDR *found_addrp)
2653 {
2654 int found;
2655
2656 if (targetdebug)
2657 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2658 hex_string (start_addr));
2659
2660 found = current_target.to_search_memory (&current_target, start_addr,
2661 search_space_len,
2662 pattern, pattern_len, found_addrp);
2663
2664 if (targetdebug)
2665 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2666
2667 return found;
2668 }
2669
2670 /* Look through the currently pushed targets. If none of them will
2671 be able to restart the currently running process, issue an error
2672 message. */
2673
2674 void
2675 target_require_runnable (void)
2676 {
2677 struct target_ops *t;
2678
2679 for (t = target_stack; t != NULL; t = t->beneath)
2680 {
2681 /* If this target knows how to create a new program, then
2682 assume we will still be able to after killing the current
2683 one. Either killing and mourning will not pop T, or else
2684 find_default_run_target will find it again. */
2685 if (t->to_create_inferior != NULL)
2686 return;
2687
2688 /* Do not worry about thread_stratum targets that can not
2689 create inferiors. Assume they will be pushed again if
2690 necessary, and continue to the process_stratum. */
2691 if (t->to_stratum == thread_stratum
2692 || t->to_stratum == arch_stratum)
2693 continue;
2694
2695 error (_("The \"%s\" target does not support \"run\". "
2696 "Try \"help target\" or \"continue\"."),
2697 t->to_shortname);
2698 }
2699
2700 /* This function is only called if the target is running. In that
2701 case there should have been a process_stratum target and it
2702 should either know how to create inferiors, or not... */
2703 internal_error (__FILE__, __LINE__, _("No targets found"));
2704 }
2705
2706 /* Look through the list of possible targets for a target that can
2707 execute a run or attach command without any other data. This is
2708 used to locate the default process stratum.
2709
2710 If DO_MESG is not NULL, the result is always valid (error() is
2711 called for errors); else, return NULL on error. */
2712
2713 static struct target_ops *
2714 find_default_run_target (char *do_mesg)
2715 {
2716 struct target_ops **t;
2717 struct target_ops *runable = NULL;
2718 int count;
2719
2720 count = 0;
2721
2722 for (t = target_structs; t < target_structs + target_struct_size;
2723 ++t)
2724 {
2725 if ((*t)->to_can_run != delegate_can_run && target_can_run (*t))
2726 {
2727 runable = *t;
2728 ++count;
2729 }
2730 }
2731
2732 if (count != 1)
2733 {
2734 if (do_mesg)
2735 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2736 else
2737 return NULL;
2738 }
2739
2740 return runable;
2741 }
2742
2743 void
2744 find_default_attach (struct target_ops *ops, char *args, int from_tty)
2745 {
2746 struct target_ops *t;
2747
2748 t = find_default_run_target ("attach");
2749 (t->to_attach) (t, args, from_tty);
2750 return;
2751 }
2752
2753 void
2754 find_default_create_inferior (struct target_ops *ops,
2755 char *exec_file, char *allargs, char **env,
2756 int from_tty)
2757 {
2758 struct target_ops *t;
2759
2760 t = find_default_run_target ("run");
2761 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
2762 return;
2763 }
2764
2765 static int
2766 find_default_can_async_p (struct target_ops *ignore)
2767 {
2768 struct target_ops *t;
2769
2770 /* This may be called before the target is pushed on the stack;
2771 look for the default process stratum. If there's none, gdb isn't
2772 configured with a native debugger, and target remote isn't
2773 connected yet. */
2774 t = find_default_run_target (NULL);
2775 if (t && t->to_can_async_p != delegate_can_async_p)
2776 return (t->to_can_async_p) (t);
2777 return 0;
2778 }
2779
2780 static int
2781 find_default_is_async_p (struct target_ops *ignore)
2782 {
2783 struct target_ops *t;
2784
2785 /* This may be called before the target is pushed on the stack;
2786 look for the default process stratum. If there's none, gdb isn't
2787 configured with a native debugger, and target remote isn't
2788 connected yet. */
2789 t = find_default_run_target (NULL);
2790 if (t && t->to_is_async_p != delegate_is_async_p)
2791 return (t->to_is_async_p) (t);
2792 return 0;
2793 }
2794
2795 static int
2796 find_default_supports_non_stop (struct target_ops *self)
2797 {
2798 struct target_ops *t;
2799
2800 t = find_default_run_target (NULL);
2801 if (t && t->to_supports_non_stop)
2802 return (t->to_supports_non_stop) (t);
2803 return 0;
2804 }
2805
2806 int
2807 target_supports_non_stop (void)
2808 {
2809 struct target_ops *t;
2810
2811 for (t = &current_target; t != NULL; t = t->beneath)
2812 if (t->to_supports_non_stop)
2813 return t->to_supports_non_stop (t);
2814
2815 return 0;
2816 }
2817
2818 /* Implement the "info proc" command. */
2819
2820 int
2821 target_info_proc (char *args, enum info_proc_what what)
2822 {
2823 struct target_ops *t;
2824
2825 /* If we're already connected to something that can get us OS
2826 related data, use it. Otherwise, try using the native
2827 target. */
2828 if (current_target.to_stratum >= process_stratum)
2829 t = current_target.beneath;
2830 else
2831 t = find_default_run_target (NULL);
2832
2833 for (; t != NULL; t = t->beneath)
2834 {
2835 if (t->to_info_proc != NULL)
2836 {
2837 t->to_info_proc (t, args, what);
2838
2839 if (targetdebug)
2840 fprintf_unfiltered (gdb_stdlog,
2841 "target_info_proc (\"%s\", %d)\n", args, what);
2842
2843 return 1;
2844 }
2845 }
2846
2847 return 0;
2848 }
2849
2850 static int
2851 find_default_supports_disable_randomization (struct target_ops *self)
2852 {
2853 struct target_ops *t;
2854
2855 t = find_default_run_target (NULL);
2856 if (t && t->to_supports_disable_randomization)
2857 return (t->to_supports_disable_randomization) (t);
2858 return 0;
2859 }
2860
2861 int
2862 target_supports_disable_randomization (void)
2863 {
2864 struct target_ops *t;
2865
2866 for (t = &current_target; t != NULL; t = t->beneath)
2867 if (t->to_supports_disable_randomization)
2868 return t->to_supports_disable_randomization (t);
2869
2870 return 0;
2871 }
2872
2873 char *
2874 target_get_osdata (const char *type)
2875 {
2876 struct target_ops *t;
2877
2878 /* If we're already connected to something that can get us OS
2879 related data, use it. Otherwise, try using the native
2880 target. */
2881 if (current_target.to_stratum >= process_stratum)
2882 t = current_target.beneath;
2883 else
2884 t = find_default_run_target ("get OS data");
2885
2886 if (!t)
2887 return NULL;
2888
2889 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
2890 }
2891
2892 /* Determine the current address space of thread PTID. */
2893
2894 struct address_space *
2895 target_thread_address_space (ptid_t ptid)
2896 {
2897 struct address_space *aspace;
2898 struct inferior *inf;
2899 struct target_ops *t;
2900
2901 for (t = current_target.beneath; t != NULL; t = t->beneath)
2902 {
2903 if (t->to_thread_address_space != NULL)
2904 {
2905 aspace = t->to_thread_address_space (t, ptid);
2906 gdb_assert (aspace);
2907
2908 if (targetdebug)
2909 fprintf_unfiltered (gdb_stdlog,
2910 "target_thread_address_space (%s) = %d\n",
2911 target_pid_to_str (ptid),
2912 address_space_num (aspace));
2913 return aspace;
2914 }
2915 }
2916
2917 /* Fall-back to the "main" address space of the inferior. */
2918 inf = find_inferior_pid (ptid_get_pid (ptid));
2919
2920 if (inf == NULL || inf->aspace == NULL)
2921 internal_error (__FILE__, __LINE__,
2922 _("Can't determine the current "
2923 "address space of thread %s\n"),
2924 target_pid_to_str (ptid));
2925
2926 return inf->aspace;
2927 }
2928
2929
2930 /* Target file operations. */
2931
2932 static struct target_ops *
2933 default_fileio_target (void)
2934 {
2935 /* If we're already connected to something that can perform
2936 file I/O, use it. Otherwise, try using the native target. */
2937 if (current_target.to_stratum >= process_stratum)
2938 return current_target.beneath;
2939 else
2940 return find_default_run_target ("file I/O");
2941 }
2942
2943 /* Open FILENAME on the target, using FLAGS and MODE. Return a
2944 target file descriptor, or -1 if an error occurs (and set
2945 *TARGET_ERRNO). */
2946 int
2947 target_fileio_open (const char *filename, int flags, int mode,
2948 int *target_errno)
2949 {
2950 struct target_ops *t;
2951
2952 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2953 {
2954 if (t->to_fileio_open != NULL)
2955 {
2956 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
2957
2958 if (targetdebug)
2959 fprintf_unfiltered (gdb_stdlog,
2960 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
2961 filename, flags, mode,
2962 fd, fd != -1 ? 0 : *target_errno);
2963 return fd;
2964 }
2965 }
2966
2967 *target_errno = FILEIO_ENOSYS;
2968 return -1;
2969 }
2970
2971 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
2972 Return the number of bytes written, or -1 if an error occurs
2973 (and set *TARGET_ERRNO). */
2974 int
2975 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
2976 ULONGEST offset, int *target_errno)
2977 {
2978 struct target_ops *t;
2979
2980 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2981 {
2982 if (t->to_fileio_pwrite != NULL)
2983 {
2984 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
2985 target_errno);
2986
2987 if (targetdebug)
2988 fprintf_unfiltered (gdb_stdlog,
2989 "target_fileio_pwrite (%d,...,%d,%s) "
2990 "= %d (%d)\n",
2991 fd, len, pulongest (offset),
2992 ret, ret != -1 ? 0 : *target_errno);
2993 return ret;
2994 }
2995 }
2996
2997 *target_errno = FILEIO_ENOSYS;
2998 return -1;
2999 }
3000
3001 /* Read up to LEN bytes FD on the target into READ_BUF.
3002 Return the number of bytes read, or -1 if an error occurs
3003 (and set *TARGET_ERRNO). */
3004 int
3005 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3006 ULONGEST offset, int *target_errno)
3007 {
3008 struct target_ops *t;
3009
3010 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3011 {
3012 if (t->to_fileio_pread != NULL)
3013 {
3014 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3015 target_errno);
3016
3017 if (targetdebug)
3018 fprintf_unfiltered (gdb_stdlog,
3019 "target_fileio_pread (%d,...,%d,%s) "
3020 "= %d (%d)\n",
3021 fd, len, pulongest (offset),
3022 ret, ret != -1 ? 0 : *target_errno);
3023 return ret;
3024 }
3025 }
3026
3027 *target_errno = FILEIO_ENOSYS;
3028 return -1;
3029 }
3030
3031 /* Close FD on the target. Return 0, or -1 if an error occurs
3032 (and set *TARGET_ERRNO). */
3033 int
3034 target_fileio_close (int fd, int *target_errno)
3035 {
3036 struct target_ops *t;
3037
3038 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3039 {
3040 if (t->to_fileio_close != NULL)
3041 {
3042 int ret = t->to_fileio_close (t, fd, target_errno);
3043
3044 if (targetdebug)
3045 fprintf_unfiltered (gdb_stdlog,
3046 "target_fileio_close (%d) = %d (%d)\n",
3047 fd, ret, ret != -1 ? 0 : *target_errno);
3048 return ret;
3049 }
3050 }
3051
3052 *target_errno = FILEIO_ENOSYS;
3053 return -1;
3054 }
3055
3056 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3057 occurs (and set *TARGET_ERRNO). */
3058 int
3059 target_fileio_unlink (const char *filename, int *target_errno)
3060 {
3061 struct target_ops *t;
3062
3063 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3064 {
3065 if (t->to_fileio_unlink != NULL)
3066 {
3067 int ret = t->to_fileio_unlink (t, filename, target_errno);
3068
3069 if (targetdebug)
3070 fprintf_unfiltered (gdb_stdlog,
3071 "target_fileio_unlink (%s) = %d (%d)\n",
3072 filename, ret, ret != -1 ? 0 : *target_errno);
3073 return ret;
3074 }
3075 }
3076
3077 *target_errno = FILEIO_ENOSYS;
3078 return -1;
3079 }
3080
3081 /* Read value of symbolic link FILENAME on the target. Return a
3082 null-terminated string allocated via xmalloc, or NULL if an error
3083 occurs (and set *TARGET_ERRNO). */
3084 char *
3085 target_fileio_readlink (const char *filename, int *target_errno)
3086 {
3087 struct target_ops *t;
3088
3089 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3090 {
3091 if (t->to_fileio_readlink != NULL)
3092 {
3093 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3094
3095 if (targetdebug)
3096 fprintf_unfiltered (gdb_stdlog,
3097 "target_fileio_readlink (%s) = %s (%d)\n",
3098 filename, ret? ret : "(nil)",
3099 ret? 0 : *target_errno);
3100 return ret;
3101 }
3102 }
3103
3104 *target_errno = FILEIO_ENOSYS;
3105 return NULL;
3106 }
3107
3108 static void
3109 target_fileio_close_cleanup (void *opaque)
3110 {
3111 int fd = *(int *) opaque;
3112 int target_errno;
3113
3114 target_fileio_close (fd, &target_errno);
3115 }
3116
3117 /* Read target file FILENAME. Store the result in *BUF_P and
3118 return the size of the transferred data. PADDING additional bytes are
3119 available in *BUF_P. This is a helper function for
3120 target_fileio_read_alloc; see the declaration of that function for more
3121 information. */
3122
3123 static LONGEST
3124 target_fileio_read_alloc_1 (const char *filename,
3125 gdb_byte **buf_p, int padding)
3126 {
3127 struct cleanup *close_cleanup;
3128 size_t buf_alloc, buf_pos;
3129 gdb_byte *buf;
3130 LONGEST n;
3131 int fd;
3132 int target_errno;
3133
3134 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3135 if (fd == -1)
3136 return -1;
3137
3138 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3139
3140 /* Start by reading up to 4K at a time. The target will throttle
3141 this number down if necessary. */
3142 buf_alloc = 4096;
3143 buf = xmalloc (buf_alloc);
3144 buf_pos = 0;
3145 while (1)
3146 {
3147 n = target_fileio_pread (fd, &buf[buf_pos],
3148 buf_alloc - buf_pos - padding, buf_pos,
3149 &target_errno);
3150 if (n < 0)
3151 {
3152 /* An error occurred. */
3153 do_cleanups (close_cleanup);
3154 xfree (buf);
3155 return -1;
3156 }
3157 else if (n == 0)
3158 {
3159 /* Read all there was. */
3160 do_cleanups (close_cleanup);
3161 if (buf_pos == 0)
3162 xfree (buf);
3163 else
3164 *buf_p = buf;
3165 return buf_pos;
3166 }
3167
3168 buf_pos += n;
3169
3170 /* If the buffer is filling up, expand it. */
3171 if (buf_alloc < buf_pos * 2)
3172 {
3173 buf_alloc *= 2;
3174 buf = xrealloc (buf, buf_alloc);
3175 }
3176
3177 QUIT;
3178 }
3179 }
3180
3181 /* Read target file FILENAME. Store the result in *BUF_P and return
3182 the size of the transferred data. See the declaration in "target.h"
3183 function for more information about the return value. */
3184
3185 LONGEST
3186 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3187 {
3188 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3189 }
3190
3191 /* Read target file FILENAME. The result is NUL-terminated and
3192 returned as a string, allocated using xmalloc. If an error occurs
3193 or the transfer is unsupported, NULL is returned. Empty objects
3194 are returned as allocated but empty strings. A warning is issued
3195 if the result contains any embedded NUL bytes. */
3196
3197 char *
3198 target_fileio_read_stralloc (const char *filename)
3199 {
3200 gdb_byte *buffer;
3201 char *bufstr;
3202 LONGEST i, transferred;
3203
3204 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3205 bufstr = (char *) buffer;
3206
3207 if (transferred < 0)
3208 return NULL;
3209
3210 if (transferred == 0)
3211 return xstrdup ("");
3212
3213 bufstr[transferred] = 0;
3214
3215 /* Check for embedded NUL bytes; but allow trailing NULs. */
3216 for (i = strlen (bufstr); i < transferred; i++)
3217 if (bufstr[i] != 0)
3218 {
3219 warning (_("target file %s "
3220 "contained unexpected null characters"),
3221 filename);
3222 break;
3223 }
3224
3225 return bufstr;
3226 }
3227
3228
3229 static int
3230 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3231 CORE_ADDR addr, int len)
3232 {
3233 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3234 }
3235
3236 static int
3237 default_watchpoint_addr_within_range (struct target_ops *target,
3238 CORE_ADDR addr,
3239 CORE_ADDR start, int length)
3240 {
3241 return addr >= start && addr < start + length;
3242 }
3243
3244 static struct gdbarch *
3245 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3246 {
3247 return target_gdbarch ();
3248 }
3249
3250 static int
3251 return_zero (struct target_ops *ignore)
3252 {
3253 return 0;
3254 }
3255
3256 static int
3257 return_zero_has_execution (struct target_ops *ignore, ptid_t ignore2)
3258 {
3259 return 0;
3260 }
3261
3262 /*
3263 * Find the next target down the stack from the specified target.
3264 */
3265
3266 struct target_ops *
3267 find_target_beneath (struct target_ops *t)
3268 {
3269 return t->beneath;
3270 }
3271
3272 /* See target.h. */
3273
3274 struct target_ops *
3275 find_target_at (enum strata stratum)
3276 {
3277 struct target_ops *t;
3278
3279 for (t = current_target.beneath; t != NULL; t = t->beneath)
3280 if (t->to_stratum == stratum)
3281 return t;
3282
3283 return NULL;
3284 }
3285
3286 \f
3287 /* The inferior process has died. Long live the inferior! */
3288
3289 void
3290 generic_mourn_inferior (void)
3291 {
3292 ptid_t ptid;
3293
3294 ptid = inferior_ptid;
3295 inferior_ptid = null_ptid;
3296
3297 /* Mark breakpoints uninserted in case something tries to delete a
3298 breakpoint while we delete the inferior's threads (which would
3299 fail, since the inferior is long gone). */
3300 mark_breakpoints_out ();
3301
3302 if (!ptid_equal (ptid, null_ptid))
3303 {
3304 int pid = ptid_get_pid (ptid);
3305 exit_inferior (pid);
3306 }
3307
3308 /* Note this wipes step-resume breakpoints, so needs to be done
3309 after exit_inferior, which ends up referencing the step-resume
3310 breakpoints through clear_thread_inferior_resources. */
3311 breakpoint_init_inferior (inf_exited);
3312
3313 registers_changed ();
3314
3315 reopen_exec_file ();
3316 reinit_frame_cache ();
3317
3318 if (deprecated_detach_hook)
3319 deprecated_detach_hook ();
3320 }
3321 \f
3322 /* Convert a normal process ID to a string. Returns the string in a
3323 static buffer. */
3324
3325 char *
3326 normal_pid_to_str (ptid_t ptid)
3327 {
3328 static char buf[32];
3329
3330 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3331 return buf;
3332 }
3333
3334 static char *
3335 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
3336 {
3337 return normal_pid_to_str (ptid);
3338 }
3339
3340 /* Error-catcher for target_find_memory_regions. */
3341 static int
3342 dummy_find_memory_regions (struct target_ops *self,
3343 find_memory_region_ftype ignore1, void *ignore2)
3344 {
3345 error (_("Command not implemented for this target."));
3346 return 0;
3347 }
3348
3349 /* Error-catcher for target_make_corefile_notes. */
3350 static char *
3351 dummy_make_corefile_notes (struct target_ops *self,
3352 bfd *ignore1, int *ignore2)
3353 {
3354 error (_("Command not implemented for this target."));
3355 return NULL;
3356 }
3357
3358 /* Set up the handful of non-empty slots needed by the dummy target
3359 vector. */
3360
3361 static void
3362 init_dummy_target (void)
3363 {
3364 dummy_target.to_shortname = "None";
3365 dummy_target.to_longname = "None";
3366 dummy_target.to_doc = "";
3367 dummy_target.to_create_inferior = find_default_create_inferior;
3368 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3369 dummy_target.to_supports_disable_randomization
3370 = find_default_supports_disable_randomization;
3371 dummy_target.to_stratum = dummy_stratum;
3372 dummy_target.to_has_all_memory = return_zero;
3373 dummy_target.to_has_memory = return_zero;
3374 dummy_target.to_has_stack = return_zero;
3375 dummy_target.to_has_registers = return_zero;
3376 dummy_target.to_has_execution = return_zero_has_execution;
3377 dummy_target.to_magic = OPS_MAGIC;
3378
3379 install_dummy_methods (&dummy_target);
3380 }
3381 \f
3382 static void
3383 debug_to_open (char *args, int from_tty)
3384 {
3385 debug_target.to_open (args, from_tty);
3386
3387 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3388 }
3389
3390 void
3391 target_close (struct target_ops *targ)
3392 {
3393 gdb_assert (!target_is_pushed (targ));
3394
3395 if (targ->to_xclose != NULL)
3396 targ->to_xclose (targ);
3397 else if (targ->to_close != NULL)
3398 targ->to_close (targ);
3399
3400 if (targetdebug)
3401 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3402 }
3403
3404 void
3405 target_attach (char *args, int from_tty)
3406 {
3407 current_target.to_attach (&current_target, args, from_tty);
3408 if (targetdebug)
3409 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3410 args, from_tty);
3411 }
3412
3413 int
3414 target_thread_alive (ptid_t ptid)
3415 {
3416 int retval;
3417
3418 retval = current_target.to_thread_alive (&current_target, ptid);
3419 if (targetdebug)
3420 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3421 ptid_get_pid (ptid), retval);
3422
3423 return retval;
3424 }
3425
3426 void
3427 target_find_new_threads (void)
3428 {
3429 current_target.to_find_new_threads (&current_target);
3430 if (targetdebug)
3431 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3432 }
3433
3434 void
3435 target_stop (ptid_t ptid)
3436 {
3437 if (!may_stop)
3438 {
3439 warning (_("May not interrupt or stop the target, ignoring attempt"));
3440 return;
3441 }
3442
3443 (*current_target.to_stop) (&current_target, ptid);
3444 }
3445
3446 static void
3447 debug_to_post_attach (struct target_ops *self, int pid)
3448 {
3449 debug_target.to_post_attach (&debug_target, pid);
3450
3451 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3452 }
3453
3454 /* Concatenate ELEM to LIST, a comma separate list, and return the
3455 result. The LIST incoming argument is released. */
3456
3457 static char *
3458 str_comma_list_concat_elem (char *list, const char *elem)
3459 {
3460 if (list == NULL)
3461 return xstrdup (elem);
3462 else
3463 return reconcat (list, list, ", ", elem, (char *) NULL);
3464 }
3465
3466 /* Helper for target_options_to_string. If OPT is present in
3467 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3468 Returns the new resulting string. OPT is removed from
3469 TARGET_OPTIONS. */
3470
3471 static char *
3472 do_option (int *target_options, char *ret,
3473 int opt, char *opt_str)
3474 {
3475 if ((*target_options & opt) != 0)
3476 {
3477 ret = str_comma_list_concat_elem (ret, opt_str);
3478 *target_options &= ~opt;
3479 }
3480
3481 return ret;
3482 }
3483
3484 char *
3485 target_options_to_string (int target_options)
3486 {
3487 char *ret = NULL;
3488
3489 #define DO_TARG_OPTION(OPT) \
3490 ret = do_option (&target_options, ret, OPT, #OPT)
3491
3492 DO_TARG_OPTION (TARGET_WNOHANG);
3493
3494 if (target_options != 0)
3495 ret = str_comma_list_concat_elem (ret, "unknown???");
3496
3497 if (ret == NULL)
3498 ret = xstrdup ("");
3499 return ret;
3500 }
3501
3502 static void
3503 debug_print_register (const char * func,
3504 struct regcache *regcache, int regno)
3505 {
3506 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3507
3508 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3509 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3510 && gdbarch_register_name (gdbarch, regno) != NULL
3511 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3512 fprintf_unfiltered (gdb_stdlog, "(%s)",
3513 gdbarch_register_name (gdbarch, regno));
3514 else
3515 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3516 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3517 {
3518 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3519 int i, size = register_size (gdbarch, regno);
3520 gdb_byte buf[MAX_REGISTER_SIZE];
3521
3522 regcache_raw_collect (regcache, regno, buf);
3523 fprintf_unfiltered (gdb_stdlog, " = ");
3524 for (i = 0; i < size; i++)
3525 {
3526 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3527 }
3528 if (size <= sizeof (LONGEST))
3529 {
3530 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3531
3532 fprintf_unfiltered (gdb_stdlog, " %s %s",
3533 core_addr_to_string_nz (val), plongest (val));
3534 }
3535 }
3536 fprintf_unfiltered (gdb_stdlog, "\n");
3537 }
3538
3539 void
3540 target_fetch_registers (struct regcache *regcache, int regno)
3541 {
3542 current_target.to_fetch_registers (&current_target, regcache, regno);
3543 if (targetdebug)
3544 debug_print_register ("target_fetch_registers", regcache, regno);
3545 }
3546
3547 void
3548 target_store_registers (struct regcache *regcache, int regno)
3549 {
3550 struct target_ops *t;
3551
3552 if (!may_write_registers)
3553 error (_("Writing to registers is not allowed (regno %d)"), regno);
3554
3555 current_target.to_store_registers (&current_target, regcache, regno);
3556 if (targetdebug)
3557 {
3558 debug_print_register ("target_store_registers", regcache, regno);
3559 }
3560 }
3561
3562 int
3563 target_core_of_thread (ptid_t ptid)
3564 {
3565 int retval = current_target.to_core_of_thread (&current_target, ptid);
3566
3567 if (targetdebug)
3568 fprintf_unfiltered (gdb_stdlog,
3569 "target_core_of_thread (%d) = %d\n",
3570 ptid_get_pid (ptid), retval);
3571 return retval;
3572 }
3573
3574 int
3575 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3576 {
3577 int retval = current_target.to_verify_memory (&current_target,
3578 data, memaddr, size);
3579
3580 if (targetdebug)
3581 fprintf_unfiltered (gdb_stdlog,
3582 "target_verify_memory (%s, %s) = %d\n",
3583 paddress (target_gdbarch (), memaddr),
3584 pulongest (size),
3585 retval);
3586 return retval;
3587 }
3588
3589 /* The documentation for this function is in its prototype declaration in
3590 target.h. */
3591
3592 int
3593 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3594 {
3595 int ret;
3596
3597 ret = current_target.to_insert_mask_watchpoint (&current_target,
3598 addr, mask, rw);
3599
3600 if (targetdebug)
3601 fprintf_unfiltered (gdb_stdlog, "\
3602 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3603 core_addr_to_string (addr),
3604 core_addr_to_string (mask), rw, ret);
3605
3606 return ret;
3607 }
3608
3609 /* The documentation for this function is in its prototype declaration in
3610 target.h. */
3611
3612 int
3613 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3614 {
3615 int ret;
3616
3617 ret = current_target.to_remove_mask_watchpoint (&current_target,
3618 addr, mask, rw);
3619
3620 if (targetdebug)
3621 fprintf_unfiltered (gdb_stdlog, "\
3622 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
3623 core_addr_to_string (addr),
3624 core_addr_to_string (mask), rw, ret);
3625
3626 return ret;
3627 }
3628
3629 /* The documentation for this function is in its prototype declaration
3630 in target.h. */
3631
3632 int
3633 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
3634 {
3635 return current_target.to_masked_watch_num_registers (&current_target,
3636 addr, mask);
3637 }
3638
3639 /* The documentation for this function is in its prototype declaration
3640 in target.h. */
3641
3642 int
3643 target_ranged_break_num_registers (void)
3644 {
3645 return current_target.to_ranged_break_num_registers (&current_target);
3646 }
3647
3648 /* See target.h. */
3649
3650 struct btrace_target_info *
3651 target_enable_btrace (ptid_t ptid)
3652 {
3653 return current_target.to_enable_btrace (&current_target, ptid);
3654 }
3655
3656 /* See target.h. */
3657
3658 void
3659 target_disable_btrace (struct btrace_target_info *btinfo)
3660 {
3661 current_target.to_disable_btrace (&current_target, btinfo);
3662 }
3663
3664 /* See target.h. */
3665
3666 void
3667 target_teardown_btrace (struct btrace_target_info *btinfo)
3668 {
3669 current_target.to_teardown_btrace (&current_target, btinfo);
3670 }
3671
3672 /* See target.h. */
3673
3674 enum btrace_error
3675 target_read_btrace (VEC (btrace_block_s) **btrace,
3676 struct btrace_target_info *btinfo,
3677 enum btrace_read_type type)
3678 {
3679 return current_target.to_read_btrace (&current_target, btrace, btinfo, type);
3680 }
3681
3682 /* See target.h. */
3683
3684 void
3685 target_stop_recording (void)
3686 {
3687 current_target.to_stop_recording (&current_target);
3688 }
3689
3690 /* See target.h. */
3691
3692 void
3693 target_info_record (void)
3694 {
3695 struct target_ops *t;
3696
3697 for (t = current_target.beneath; t != NULL; t = t->beneath)
3698 if (t->to_info_record != NULL)
3699 {
3700 t->to_info_record (t);
3701 return;
3702 }
3703
3704 tcomplain ();
3705 }
3706
3707 /* See target.h. */
3708
3709 void
3710 target_save_record (const char *filename)
3711 {
3712 current_target.to_save_record (&current_target, filename);
3713 }
3714
3715 /* See target.h. */
3716
3717 int
3718 target_supports_delete_record (void)
3719 {
3720 struct target_ops *t;
3721
3722 for (t = current_target.beneath; t != NULL; t = t->beneath)
3723 if (t->to_delete_record != NULL)
3724 return 1;
3725
3726 return 0;
3727 }
3728
3729 /* See target.h. */
3730
3731 void
3732 target_delete_record (void)
3733 {
3734 current_target.to_delete_record (&current_target);
3735 }
3736
3737 /* See target.h. */
3738
3739 int
3740 target_record_is_replaying (void)
3741 {
3742 return current_target.to_record_is_replaying (&current_target);
3743 }
3744
3745 /* See target.h. */
3746
3747 void
3748 target_goto_record_begin (void)
3749 {
3750 current_target.to_goto_record_begin (&current_target);
3751 }
3752
3753 /* See target.h. */
3754
3755 void
3756 target_goto_record_end (void)
3757 {
3758 current_target.to_goto_record_end (&current_target);
3759 }
3760
3761 /* See target.h. */
3762
3763 void
3764 target_goto_record (ULONGEST insn)
3765 {
3766 current_target.to_goto_record (&current_target, insn);
3767 }
3768
3769 /* See target.h. */
3770
3771 void
3772 target_insn_history (int size, int flags)
3773 {
3774 current_target.to_insn_history (&current_target, size, flags);
3775 }
3776
3777 /* See target.h. */
3778
3779 void
3780 target_insn_history_from (ULONGEST from, int size, int flags)
3781 {
3782 current_target.to_insn_history_from (&current_target, from, size, flags);
3783 }
3784
3785 /* See target.h. */
3786
3787 void
3788 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
3789 {
3790 current_target.to_insn_history_range (&current_target, begin, end, flags);
3791 }
3792
3793 /* See target.h. */
3794
3795 void
3796 target_call_history (int size, int flags)
3797 {
3798 current_target.to_call_history (&current_target, size, flags);
3799 }
3800
3801 /* See target.h. */
3802
3803 void
3804 target_call_history_from (ULONGEST begin, int size, int flags)
3805 {
3806 current_target.to_call_history_from (&current_target, begin, size, flags);
3807 }
3808
3809 /* See target.h. */
3810
3811 void
3812 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
3813 {
3814 current_target.to_call_history_range (&current_target, begin, end, flags);
3815 }
3816
3817 static void
3818 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
3819 {
3820 debug_target.to_prepare_to_store (&debug_target, regcache);
3821
3822 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
3823 }
3824
3825 /* See target.h. */
3826
3827 const struct frame_unwind *
3828 target_get_unwinder (void)
3829 {
3830 struct target_ops *t;
3831
3832 for (t = current_target.beneath; t != NULL; t = t->beneath)
3833 if (t->to_get_unwinder != NULL)
3834 return t->to_get_unwinder;
3835
3836 return NULL;
3837 }
3838
3839 /* See target.h. */
3840
3841 const struct frame_unwind *
3842 target_get_tailcall_unwinder (void)
3843 {
3844 struct target_ops *t;
3845
3846 for (t = current_target.beneath; t != NULL; t = t->beneath)
3847 if (t->to_get_tailcall_unwinder != NULL)
3848 return t->to_get_tailcall_unwinder;
3849
3850 return NULL;
3851 }
3852
3853 /* See target.h. */
3854
3855 CORE_ADDR
3856 forward_target_decr_pc_after_break (struct target_ops *ops,
3857 struct gdbarch *gdbarch)
3858 {
3859 for (; ops != NULL; ops = ops->beneath)
3860 if (ops->to_decr_pc_after_break != NULL)
3861 return ops->to_decr_pc_after_break (ops, gdbarch);
3862
3863 return gdbarch_decr_pc_after_break (gdbarch);
3864 }
3865
3866 /* See target.h. */
3867
3868 CORE_ADDR
3869 target_decr_pc_after_break (struct gdbarch *gdbarch)
3870 {
3871 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
3872 }
3873
3874 static int
3875 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
3876 int write, struct mem_attrib *attrib,
3877 struct target_ops *target)
3878 {
3879 int retval;
3880
3881 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
3882 attrib, target);
3883
3884 fprintf_unfiltered (gdb_stdlog,
3885 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
3886 paddress (target_gdbarch (), memaddr), len,
3887 write ? "write" : "read", retval);
3888
3889 if (retval > 0)
3890 {
3891 int i;
3892
3893 fputs_unfiltered (", bytes =", gdb_stdlog);
3894 for (i = 0; i < retval; i++)
3895 {
3896 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
3897 {
3898 if (targetdebug < 2 && i > 0)
3899 {
3900 fprintf_unfiltered (gdb_stdlog, " ...");
3901 break;
3902 }
3903 fprintf_unfiltered (gdb_stdlog, "\n");
3904 }
3905
3906 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
3907 }
3908 }
3909
3910 fputc_unfiltered ('\n', gdb_stdlog);
3911
3912 return retval;
3913 }
3914
3915 static void
3916 debug_to_files_info (struct target_ops *target)
3917 {
3918 debug_target.to_files_info (target);
3919
3920 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
3921 }
3922
3923 static int
3924 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
3925 struct bp_target_info *bp_tgt)
3926 {
3927 int retval;
3928
3929 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
3930
3931 fprintf_unfiltered (gdb_stdlog,
3932 "target_insert_breakpoint (%s, xxx) = %ld\n",
3933 core_addr_to_string (bp_tgt->placed_address),
3934 (unsigned long) retval);
3935 return retval;
3936 }
3937
3938 static int
3939 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
3940 struct bp_target_info *bp_tgt)
3941 {
3942 int retval;
3943
3944 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
3945
3946 fprintf_unfiltered (gdb_stdlog,
3947 "target_remove_breakpoint (%s, xxx) = %ld\n",
3948 core_addr_to_string (bp_tgt->placed_address),
3949 (unsigned long) retval);
3950 return retval;
3951 }
3952
3953 static int
3954 debug_to_can_use_hw_breakpoint (struct target_ops *self,
3955 int type, int cnt, int from_tty)
3956 {
3957 int retval;
3958
3959 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
3960 type, cnt, from_tty);
3961
3962 fprintf_unfiltered (gdb_stdlog,
3963 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
3964 (unsigned long) type,
3965 (unsigned long) cnt,
3966 (unsigned long) from_tty,
3967 (unsigned long) retval);
3968 return retval;
3969 }
3970
3971 static int
3972 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
3973 CORE_ADDR addr, int len)
3974 {
3975 CORE_ADDR retval;
3976
3977 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
3978 addr, len);
3979
3980 fprintf_unfiltered (gdb_stdlog,
3981 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
3982 core_addr_to_string (addr), (unsigned long) len,
3983 core_addr_to_string (retval));
3984 return retval;
3985 }
3986
3987 static int
3988 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
3989 CORE_ADDR addr, int len, int rw,
3990 struct expression *cond)
3991 {
3992 int retval;
3993
3994 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
3995 addr, len,
3996 rw, cond);
3997
3998 fprintf_unfiltered (gdb_stdlog,
3999 "target_can_accel_watchpoint_condition "
4000 "(%s, %d, %d, %s) = %ld\n",
4001 core_addr_to_string (addr), len, rw,
4002 host_address_to_string (cond), (unsigned long) retval);
4003 return retval;
4004 }
4005
4006 static int
4007 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4008 {
4009 int retval;
4010
4011 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4012
4013 fprintf_unfiltered (gdb_stdlog,
4014 "target_stopped_by_watchpoint () = %ld\n",
4015 (unsigned long) retval);
4016 return retval;
4017 }
4018
4019 static int
4020 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4021 {
4022 int retval;
4023
4024 retval = debug_target.to_stopped_data_address (target, addr);
4025
4026 fprintf_unfiltered (gdb_stdlog,
4027 "target_stopped_data_address ([%s]) = %ld\n",
4028 core_addr_to_string (*addr),
4029 (unsigned long)retval);
4030 return retval;
4031 }
4032
4033 static int
4034 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4035 CORE_ADDR addr,
4036 CORE_ADDR start, int length)
4037 {
4038 int retval;
4039
4040 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4041 start, length);
4042
4043 fprintf_filtered (gdb_stdlog,
4044 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4045 core_addr_to_string (addr), core_addr_to_string (start),
4046 length, retval);
4047 return retval;
4048 }
4049
4050 static int
4051 debug_to_insert_hw_breakpoint (struct target_ops *self,
4052 struct gdbarch *gdbarch,
4053 struct bp_target_info *bp_tgt)
4054 {
4055 int retval;
4056
4057 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4058 gdbarch, bp_tgt);
4059
4060 fprintf_unfiltered (gdb_stdlog,
4061 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4062 core_addr_to_string (bp_tgt->placed_address),
4063 (unsigned long) retval);
4064 return retval;
4065 }
4066
4067 static int
4068 debug_to_remove_hw_breakpoint (struct target_ops *self,
4069 struct gdbarch *gdbarch,
4070 struct bp_target_info *bp_tgt)
4071 {
4072 int retval;
4073
4074 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4075 gdbarch, bp_tgt);
4076
4077 fprintf_unfiltered (gdb_stdlog,
4078 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4079 core_addr_to_string (bp_tgt->placed_address),
4080 (unsigned long) retval);
4081 return retval;
4082 }
4083
4084 static int
4085 debug_to_insert_watchpoint (struct target_ops *self,
4086 CORE_ADDR addr, int len, int type,
4087 struct expression *cond)
4088 {
4089 int retval;
4090
4091 retval = debug_target.to_insert_watchpoint (&debug_target,
4092 addr, len, type, cond);
4093
4094 fprintf_unfiltered (gdb_stdlog,
4095 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4096 core_addr_to_string (addr), len, type,
4097 host_address_to_string (cond), (unsigned long) retval);
4098 return retval;
4099 }
4100
4101 static int
4102 debug_to_remove_watchpoint (struct target_ops *self,
4103 CORE_ADDR addr, int len, int type,
4104 struct expression *cond)
4105 {
4106 int retval;
4107
4108 retval = debug_target.to_remove_watchpoint (&debug_target,
4109 addr, len, type, cond);
4110
4111 fprintf_unfiltered (gdb_stdlog,
4112 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4113 core_addr_to_string (addr), len, type,
4114 host_address_to_string (cond), (unsigned long) retval);
4115 return retval;
4116 }
4117
4118 static void
4119 debug_to_terminal_init (struct target_ops *self)
4120 {
4121 debug_target.to_terminal_init (&debug_target);
4122
4123 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4124 }
4125
4126 static void
4127 debug_to_terminal_inferior (struct target_ops *self)
4128 {
4129 debug_target.to_terminal_inferior (&debug_target);
4130
4131 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4132 }
4133
4134 static void
4135 debug_to_terminal_ours_for_output (struct target_ops *self)
4136 {
4137 debug_target.to_terminal_ours_for_output (&debug_target);
4138
4139 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4140 }
4141
4142 static void
4143 debug_to_terminal_ours (struct target_ops *self)
4144 {
4145 debug_target.to_terminal_ours (&debug_target);
4146
4147 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4148 }
4149
4150 static void
4151 debug_to_terminal_save_ours (struct target_ops *self)
4152 {
4153 debug_target.to_terminal_save_ours (&debug_target);
4154
4155 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4156 }
4157
4158 static void
4159 debug_to_terminal_info (struct target_ops *self,
4160 const char *arg, int from_tty)
4161 {
4162 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4163
4164 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4165 from_tty);
4166 }
4167
4168 static void
4169 debug_to_load (struct target_ops *self, char *args, int from_tty)
4170 {
4171 debug_target.to_load (&debug_target, args, from_tty);
4172
4173 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4174 }
4175
4176 static void
4177 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4178 {
4179 debug_target.to_post_startup_inferior (&debug_target, ptid);
4180
4181 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4182 ptid_get_pid (ptid));
4183 }
4184
4185 static int
4186 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4187 {
4188 int retval;
4189
4190 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4191
4192 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4193 pid, retval);
4194
4195 return retval;
4196 }
4197
4198 static int
4199 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4200 {
4201 int retval;
4202
4203 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4204
4205 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4206 pid, retval);
4207
4208 return retval;
4209 }
4210
4211 static int
4212 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4213 {
4214 int retval;
4215
4216 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4217
4218 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4219 pid, retval);
4220
4221 return retval;
4222 }
4223
4224 static int
4225 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4226 {
4227 int retval;
4228
4229 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4230
4231 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4232 pid, retval);
4233
4234 return retval;
4235 }
4236
4237 static int
4238 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4239 {
4240 int retval;
4241
4242 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4243
4244 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4245 pid, retval);
4246
4247 return retval;
4248 }
4249
4250 static int
4251 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4252 {
4253 int retval;
4254
4255 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4256
4257 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4258 pid, retval);
4259
4260 return retval;
4261 }
4262
4263 static int
4264 debug_to_has_exited (struct target_ops *self,
4265 int pid, int wait_status, int *exit_status)
4266 {
4267 int has_exited;
4268
4269 has_exited = debug_target.to_has_exited (&debug_target,
4270 pid, wait_status, exit_status);
4271
4272 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4273 pid, wait_status, *exit_status, has_exited);
4274
4275 return has_exited;
4276 }
4277
4278 static int
4279 debug_to_can_run (struct target_ops *self)
4280 {
4281 int retval;
4282
4283 retval = debug_target.to_can_run (&debug_target);
4284
4285 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4286
4287 return retval;
4288 }
4289
4290 static struct gdbarch *
4291 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4292 {
4293 struct gdbarch *retval;
4294
4295 retval = debug_target.to_thread_architecture (ops, ptid);
4296
4297 fprintf_unfiltered (gdb_stdlog,
4298 "target_thread_architecture (%s) = %s [%s]\n",
4299 target_pid_to_str (ptid),
4300 host_address_to_string (retval),
4301 gdbarch_bfd_arch_info (retval)->printable_name);
4302 return retval;
4303 }
4304
4305 static void
4306 debug_to_stop (struct target_ops *self, ptid_t ptid)
4307 {
4308 debug_target.to_stop (&debug_target, ptid);
4309
4310 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4311 target_pid_to_str (ptid));
4312 }
4313
4314 static void
4315 debug_to_rcmd (struct target_ops *self, char *command,
4316 struct ui_file *outbuf)
4317 {
4318 debug_target.to_rcmd (&debug_target, command, outbuf);
4319 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4320 }
4321
4322 static char *
4323 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
4324 {
4325 char *exec_file;
4326
4327 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
4328
4329 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4330 pid, exec_file);
4331
4332 return exec_file;
4333 }
4334
4335 static void
4336 setup_target_debug (void)
4337 {
4338 memcpy (&debug_target, &current_target, sizeof debug_target);
4339
4340 current_target.to_open = debug_to_open;
4341 current_target.to_post_attach = debug_to_post_attach;
4342 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4343 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4344 current_target.to_files_info = debug_to_files_info;
4345 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4346 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4347 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4348 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4349 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4350 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4351 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4352 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4353 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4354 current_target.to_watchpoint_addr_within_range
4355 = debug_to_watchpoint_addr_within_range;
4356 current_target.to_region_ok_for_hw_watchpoint
4357 = debug_to_region_ok_for_hw_watchpoint;
4358 current_target.to_can_accel_watchpoint_condition
4359 = debug_to_can_accel_watchpoint_condition;
4360 current_target.to_terminal_init = debug_to_terminal_init;
4361 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4362 current_target.to_terminal_ours_for_output
4363 = debug_to_terminal_ours_for_output;
4364 current_target.to_terminal_ours = debug_to_terminal_ours;
4365 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4366 current_target.to_terminal_info = debug_to_terminal_info;
4367 current_target.to_load = debug_to_load;
4368 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4369 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4370 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4371 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4372 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4373 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4374 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4375 current_target.to_has_exited = debug_to_has_exited;
4376 current_target.to_can_run = debug_to_can_run;
4377 current_target.to_stop = debug_to_stop;
4378 current_target.to_rcmd = debug_to_rcmd;
4379 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4380 current_target.to_thread_architecture = debug_to_thread_architecture;
4381 }
4382 \f
4383
4384 static char targ_desc[] =
4385 "Names of targets and files being debugged.\nShows the entire \
4386 stack of targets currently in use (including the exec-file,\n\
4387 core-file, and process, if any), as well as the symbol file name.";
4388
4389 static void
4390 default_rcmd (struct target_ops *self, char *command, struct ui_file *output)
4391 {
4392 error (_("\"monitor\" command not supported by this target."));
4393 }
4394
4395 static void
4396 do_monitor_command (char *cmd,
4397 int from_tty)
4398 {
4399 target_rcmd (cmd, gdb_stdtarg);
4400 }
4401
4402 /* Print the name of each layers of our target stack. */
4403
4404 static void
4405 maintenance_print_target_stack (char *cmd, int from_tty)
4406 {
4407 struct target_ops *t;
4408
4409 printf_filtered (_("The current target stack is:\n"));
4410
4411 for (t = target_stack; t != NULL; t = t->beneath)
4412 {
4413 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4414 }
4415 }
4416
4417 /* Controls if async mode is permitted. */
4418 int target_async_permitted = 0;
4419
4420 /* The set command writes to this variable. If the inferior is
4421 executing, target_async_permitted is *not* updated. */
4422 static int target_async_permitted_1 = 0;
4423
4424 static void
4425 set_target_async_command (char *args, int from_tty,
4426 struct cmd_list_element *c)
4427 {
4428 if (have_live_inferiors ())
4429 {
4430 target_async_permitted_1 = target_async_permitted;
4431 error (_("Cannot change this setting while the inferior is running."));
4432 }
4433
4434 target_async_permitted = target_async_permitted_1;
4435 }
4436
4437 static void
4438 show_target_async_command (struct ui_file *file, int from_tty,
4439 struct cmd_list_element *c,
4440 const char *value)
4441 {
4442 fprintf_filtered (file,
4443 _("Controlling the inferior in "
4444 "asynchronous mode is %s.\n"), value);
4445 }
4446
4447 /* Temporary copies of permission settings. */
4448
4449 static int may_write_registers_1 = 1;
4450 static int may_write_memory_1 = 1;
4451 static int may_insert_breakpoints_1 = 1;
4452 static int may_insert_tracepoints_1 = 1;
4453 static int may_insert_fast_tracepoints_1 = 1;
4454 static int may_stop_1 = 1;
4455
4456 /* Make the user-set values match the real values again. */
4457
4458 void
4459 update_target_permissions (void)
4460 {
4461 may_write_registers_1 = may_write_registers;
4462 may_write_memory_1 = may_write_memory;
4463 may_insert_breakpoints_1 = may_insert_breakpoints;
4464 may_insert_tracepoints_1 = may_insert_tracepoints;
4465 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4466 may_stop_1 = may_stop;
4467 }
4468
4469 /* The one function handles (most of) the permission flags in the same
4470 way. */
4471
4472 static void
4473 set_target_permissions (char *args, int from_tty,
4474 struct cmd_list_element *c)
4475 {
4476 if (target_has_execution)
4477 {
4478 update_target_permissions ();
4479 error (_("Cannot change this setting while the inferior is running."));
4480 }
4481
4482 /* Make the real values match the user-changed values. */
4483 may_write_registers = may_write_registers_1;
4484 may_insert_breakpoints = may_insert_breakpoints_1;
4485 may_insert_tracepoints = may_insert_tracepoints_1;
4486 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4487 may_stop = may_stop_1;
4488 update_observer_mode ();
4489 }
4490
4491 /* Set memory write permission independently of observer mode. */
4492
4493 static void
4494 set_write_memory_permission (char *args, int from_tty,
4495 struct cmd_list_element *c)
4496 {
4497 /* Make the real values match the user-changed values. */
4498 may_write_memory = may_write_memory_1;
4499 update_observer_mode ();
4500 }
4501
4502
4503 void
4504 initialize_targets (void)
4505 {
4506 init_dummy_target ();
4507 push_target (&dummy_target);
4508
4509 add_info ("target", target_info, targ_desc);
4510 add_info ("files", target_info, targ_desc);
4511
4512 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4513 Set target debugging."), _("\
4514 Show target debugging."), _("\
4515 When non-zero, target debugging is enabled. Higher numbers are more\n\
4516 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4517 command."),
4518 NULL,
4519 show_targetdebug,
4520 &setdebuglist, &showdebuglist);
4521
4522 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4523 &trust_readonly, _("\
4524 Set mode for reading from readonly sections."), _("\
4525 Show mode for reading from readonly sections."), _("\
4526 When this mode is on, memory reads from readonly sections (such as .text)\n\
4527 will be read from the object file instead of from the target. This will\n\
4528 result in significant performance improvement for remote targets."),
4529 NULL,
4530 show_trust_readonly,
4531 &setlist, &showlist);
4532
4533 add_com ("monitor", class_obscure, do_monitor_command,
4534 _("Send a command to the remote monitor (remote targets only)."));
4535
4536 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4537 _("Print the name of each layer of the internal target stack."),
4538 &maintenanceprintlist);
4539
4540 add_setshow_boolean_cmd ("target-async", no_class,
4541 &target_async_permitted_1, _("\
4542 Set whether gdb controls the inferior in asynchronous mode."), _("\
4543 Show whether gdb controls the inferior in asynchronous mode."), _("\
4544 Tells gdb whether to control the inferior in asynchronous mode."),
4545 set_target_async_command,
4546 show_target_async_command,
4547 &setlist,
4548 &showlist);
4549
4550 add_setshow_boolean_cmd ("may-write-registers", class_support,
4551 &may_write_registers_1, _("\
4552 Set permission to write into registers."), _("\
4553 Show permission to write into registers."), _("\
4554 When this permission is on, GDB may write into the target's registers.\n\
4555 Otherwise, any sort of write attempt will result in an error."),
4556 set_target_permissions, NULL,
4557 &setlist, &showlist);
4558
4559 add_setshow_boolean_cmd ("may-write-memory", class_support,
4560 &may_write_memory_1, _("\
4561 Set permission to write into target memory."), _("\
4562 Show permission to write into target memory."), _("\
4563 When this permission is on, GDB may write into the target's memory.\n\
4564 Otherwise, any sort of write attempt will result in an error."),
4565 set_write_memory_permission, NULL,
4566 &setlist, &showlist);
4567
4568 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4569 &may_insert_breakpoints_1, _("\
4570 Set permission to insert breakpoints in the target."), _("\
4571 Show permission to insert breakpoints in the target."), _("\
4572 When this permission is on, GDB may insert breakpoints in the program.\n\
4573 Otherwise, any sort of insertion attempt will result in an error."),
4574 set_target_permissions, NULL,
4575 &setlist, &showlist);
4576
4577 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4578 &may_insert_tracepoints_1, _("\
4579 Set permission to insert tracepoints in the target."), _("\
4580 Show permission to insert tracepoints in the target."), _("\
4581 When this permission is on, GDB may insert tracepoints in the program.\n\
4582 Otherwise, any sort of insertion attempt will result in an error."),
4583 set_target_permissions, NULL,
4584 &setlist, &showlist);
4585
4586 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4587 &may_insert_fast_tracepoints_1, _("\
4588 Set permission to insert fast tracepoints in the target."), _("\
4589 Show permission to insert fast tracepoints in the target."), _("\
4590 When this permission is on, GDB may insert fast tracepoints.\n\
4591 Otherwise, any sort of insertion attempt will result in an error."),
4592 set_target_permissions, NULL,
4593 &setlist, &showlist);
4594
4595 add_setshow_boolean_cmd ("may-interrupt", class_support,
4596 &may_stop_1, _("\
4597 Set permission to interrupt or signal the target."), _("\
4598 Show permission to interrupt or signal the target."), _("\
4599 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4600 Otherwise, any attempt to interrupt or stop will be ignored."),
4601 set_target_permissions, NULL,
4602 &setlist, &showlist);
4603 }