]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/target.c
Fix length arg in call to breakpoint_xfer_memory.
[thirdparty/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47 #include "auxv.h"
48
49 static void target_info (char *, int);
50
51 static void default_terminal_info (struct target_ops *, const char *, int);
52
53 static int default_watchpoint_addr_within_range (struct target_ops *,
54 CORE_ADDR, CORE_ADDR, int);
55
56 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
57 CORE_ADDR, int);
58
59 static void default_rcmd (struct target_ops *, char *, struct ui_file *);
60
61 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
62 long lwp, long tid);
63
64 static int default_follow_fork (struct target_ops *self, int follow_child,
65 int detach_fork);
66
67 static void default_mourn_inferior (struct target_ops *self);
68
69 static int default_search_memory (struct target_ops *ops,
70 CORE_ADDR start_addr,
71 ULONGEST search_space_len,
72 const gdb_byte *pattern,
73 ULONGEST pattern_len,
74 CORE_ADDR *found_addrp);
75
76 static void tcomplain (void) ATTRIBUTE_NORETURN;
77
78 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
79
80 static int return_zero (struct target_ops *);
81
82 static int return_zero_has_execution (struct target_ops *, ptid_t);
83
84 void target_ignore (void);
85
86 static void target_command (char *, int);
87
88 static struct target_ops *find_default_run_target (char *);
89
90 static target_xfer_partial_ftype default_xfer_partial;
91
92 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
93 ptid_t ptid);
94
95 static int dummy_find_memory_regions (struct target_ops *self,
96 find_memory_region_ftype ignore1,
97 void *ignore2);
98
99 static char *dummy_make_corefile_notes (struct target_ops *self,
100 bfd *ignore1, int *ignore2);
101
102 static char *default_pid_to_str (struct target_ops *ops, ptid_t ptid);
103
104 static int find_default_can_async_p (struct target_ops *ignore);
105
106 static int find_default_is_async_p (struct target_ops *ignore);
107
108 static enum exec_direction_kind default_execution_direction
109 (struct target_ops *self);
110
111 static CORE_ADDR default_target_decr_pc_after_break (struct target_ops *ops,
112 struct gdbarch *gdbarch);
113
114 #include "target-delegates.c"
115
116 static void init_dummy_target (void);
117
118 static struct target_ops debug_target;
119
120 static void debug_to_open (char *, int);
121
122 static void debug_to_prepare_to_store (struct target_ops *self,
123 struct regcache *);
124
125 static void debug_to_files_info (struct target_ops *);
126
127 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
128 struct bp_target_info *);
129
130 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
131 struct bp_target_info *);
132
133 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
134 int, int, int);
135
136 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
137 struct gdbarch *,
138 struct bp_target_info *);
139
140 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
141 struct gdbarch *,
142 struct bp_target_info *);
143
144 static int debug_to_insert_watchpoint (struct target_ops *self,
145 CORE_ADDR, int, int,
146 struct expression *);
147
148 static int debug_to_remove_watchpoint (struct target_ops *self,
149 CORE_ADDR, int, int,
150 struct expression *);
151
152 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
153
154 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
155 CORE_ADDR, CORE_ADDR, int);
156
157 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
158 CORE_ADDR, int);
159
160 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
161 CORE_ADDR, int, int,
162 struct expression *);
163
164 static void debug_to_terminal_init (struct target_ops *self);
165
166 static void debug_to_terminal_inferior (struct target_ops *self);
167
168 static void debug_to_terminal_ours_for_output (struct target_ops *self);
169
170 static void debug_to_terminal_save_ours (struct target_ops *self);
171
172 static void debug_to_terminal_ours (struct target_ops *self);
173
174 static void debug_to_load (struct target_ops *self, char *, int);
175
176 static int debug_to_can_run (struct target_ops *self);
177
178 static void debug_to_stop (struct target_ops *self, ptid_t);
179
180 /* Pointer to array of target architecture structures; the size of the
181 array; the current index into the array; the allocated size of the
182 array. */
183 struct target_ops **target_structs;
184 unsigned target_struct_size;
185 unsigned target_struct_allocsize;
186 #define DEFAULT_ALLOCSIZE 10
187
188 /* The initial current target, so that there is always a semi-valid
189 current target. */
190
191 static struct target_ops dummy_target;
192
193 /* Top of target stack. */
194
195 static struct target_ops *target_stack;
196
197 /* The target structure we are currently using to talk to a process
198 or file or whatever "inferior" we have. */
199
200 struct target_ops current_target;
201
202 /* Command list for target. */
203
204 static struct cmd_list_element *targetlist = NULL;
205
206 /* Nonzero if we should trust readonly sections from the
207 executable when reading memory. */
208
209 static int trust_readonly = 0;
210
211 /* Nonzero if we should show true memory content including
212 memory breakpoint inserted by gdb. */
213
214 static int show_memory_breakpoints = 0;
215
216 /* These globals control whether GDB attempts to perform these
217 operations; they are useful for targets that need to prevent
218 inadvertant disruption, such as in non-stop mode. */
219
220 int may_write_registers = 1;
221
222 int may_write_memory = 1;
223
224 int may_insert_breakpoints = 1;
225
226 int may_insert_tracepoints = 1;
227
228 int may_insert_fast_tracepoints = 1;
229
230 int may_stop = 1;
231
232 /* Non-zero if we want to see trace of target level stuff. */
233
234 static unsigned int targetdebug = 0;
235 static void
236 show_targetdebug (struct ui_file *file, int from_tty,
237 struct cmd_list_element *c, const char *value)
238 {
239 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
240 }
241
242 static void setup_target_debug (void);
243
244 /* The user just typed 'target' without the name of a target. */
245
246 static void
247 target_command (char *arg, int from_tty)
248 {
249 fputs_filtered ("Argument required (target name). Try `help target'\n",
250 gdb_stdout);
251 }
252
253 /* Default target_has_* methods for process_stratum targets. */
254
255 int
256 default_child_has_all_memory (struct target_ops *ops)
257 {
258 /* If no inferior selected, then we can't read memory here. */
259 if (ptid_equal (inferior_ptid, null_ptid))
260 return 0;
261
262 return 1;
263 }
264
265 int
266 default_child_has_memory (struct target_ops *ops)
267 {
268 /* If no inferior selected, then we can't read memory here. */
269 if (ptid_equal (inferior_ptid, null_ptid))
270 return 0;
271
272 return 1;
273 }
274
275 int
276 default_child_has_stack (struct target_ops *ops)
277 {
278 /* If no inferior selected, there's no stack. */
279 if (ptid_equal (inferior_ptid, null_ptid))
280 return 0;
281
282 return 1;
283 }
284
285 int
286 default_child_has_registers (struct target_ops *ops)
287 {
288 /* Can't read registers from no inferior. */
289 if (ptid_equal (inferior_ptid, null_ptid))
290 return 0;
291
292 return 1;
293 }
294
295 int
296 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
297 {
298 /* If there's no thread selected, then we can't make it run through
299 hoops. */
300 if (ptid_equal (the_ptid, null_ptid))
301 return 0;
302
303 return 1;
304 }
305
306
307 int
308 target_has_all_memory_1 (void)
309 {
310 struct target_ops *t;
311
312 for (t = current_target.beneath; t != NULL; t = t->beneath)
313 if (t->to_has_all_memory (t))
314 return 1;
315
316 return 0;
317 }
318
319 int
320 target_has_memory_1 (void)
321 {
322 struct target_ops *t;
323
324 for (t = current_target.beneath; t != NULL; t = t->beneath)
325 if (t->to_has_memory (t))
326 return 1;
327
328 return 0;
329 }
330
331 int
332 target_has_stack_1 (void)
333 {
334 struct target_ops *t;
335
336 for (t = current_target.beneath; t != NULL; t = t->beneath)
337 if (t->to_has_stack (t))
338 return 1;
339
340 return 0;
341 }
342
343 int
344 target_has_registers_1 (void)
345 {
346 struct target_ops *t;
347
348 for (t = current_target.beneath; t != NULL; t = t->beneath)
349 if (t->to_has_registers (t))
350 return 1;
351
352 return 0;
353 }
354
355 int
356 target_has_execution_1 (ptid_t the_ptid)
357 {
358 struct target_ops *t;
359
360 for (t = current_target.beneath; t != NULL; t = t->beneath)
361 if (t->to_has_execution (t, the_ptid))
362 return 1;
363
364 return 0;
365 }
366
367 int
368 target_has_execution_current (void)
369 {
370 return target_has_execution_1 (inferior_ptid);
371 }
372
373 /* Complete initialization of T. This ensures that various fields in
374 T are set, if needed by the target implementation. */
375
376 void
377 complete_target_initialization (struct target_ops *t)
378 {
379 /* Provide default values for all "must have" methods. */
380 if (t->to_xfer_partial == NULL)
381 t->to_xfer_partial = default_xfer_partial;
382
383 if (t->to_has_all_memory == NULL)
384 t->to_has_all_memory = return_zero;
385
386 if (t->to_has_memory == NULL)
387 t->to_has_memory = return_zero;
388
389 if (t->to_has_stack == NULL)
390 t->to_has_stack = return_zero;
391
392 if (t->to_has_registers == NULL)
393 t->to_has_registers = return_zero;
394
395 if (t->to_has_execution == NULL)
396 t->to_has_execution = return_zero_has_execution;
397
398 install_delegators (t);
399 }
400
401 /* Add possible target architecture T to the list and add a new
402 command 'target T->to_shortname'. Set COMPLETER as the command's
403 completer if not NULL. */
404
405 void
406 add_target_with_completer (struct target_ops *t,
407 completer_ftype *completer)
408 {
409 struct cmd_list_element *c;
410
411 complete_target_initialization (t);
412
413 if (!target_structs)
414 {
415 target_struct_allocsize = DEFAULT_ALLOCSIZE;
416 target_structs = (struct target_ops **) xmalloc
417 (target_struct_allocsize * sizeof (*target_structs));
418 }
419 if (target_struct_size >= target_struct_allocsize)
420 {
421 target_struct_allocsize *= 2;
422 target_structs = (struct target_ops **)
423 xrealloc ((char *) target_structs,
424 target_struct_allocsize * sizeof (*target_structs));
425 }
426 target_structs[target_struct_size++] = t;
427
428 if (targetlist == NULL)
429 add_prefix_cmd ("target", class_run, target_command, _("\
430 Connect to a target machine or process.\n\
431 The first argument is the type or protocol of the target machine.\n\
432 Remaining arguments are interpreted by the target protocol. For more\n\
433 information on the arguments for a particular protocol, type\n\
434 `help target ' followed by the protocol name."),
435 &targetlist, "target ", 0, &cmdlist);
436 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
437 &targetlist);
438 if (completer != NULL)
439 set_cmd_completer (c, completer);
440 }
441
442 /* Add a possible target architecture to the list. */
443
444 void
445 add_target (struct target_ops *t)
446 {
447 add_target_with_completer (t, NULL);
448 }
449
450 /* See target.h. */
451
452 void
453 add_deprecated_target_alias (struct target_ops *t, char *alias)
454 {
455 struct cmd_list_element *c;
456 char *alt;
457
458 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
459 see PR cli/15104. */
460 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
461 alt = xstrprintf ("target %s", t->to_shortname);
462 deprecate_cmd (c, alt);
463 }
464
465 /* Stub functions */
466
467 void
468 target_ignore (void)
469 {
470 }
471
472 void
473 target_kill (void)
474 {
475 if (targetdebug)
476 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
477
478 current_target.to_kill (&current_target);
479 }
480
481 void
482 target_load (char *arg, int from_tty)
483 {
484 target_dcache_invalidate ();
485 (*current_target.to_load) (&current_target, arg, from_tty);
486 }
487
488 void
489 target_create_inferior (char *exec_file, char *args,
490 char **env, int from_tty)
491 {
492 struct target_ops *t;
493
494 for (t = current_target.beneath; t != NULL; t = t->beneath)
495 {
496 if (t->to_create_inferior != NULL)
497 {
498 t->to_create_inferior (t, exec_file, args, env, from_tty);
499 if (targetdebug)
500 fprintf_unfiltered (gdb_stdlog,
501 "target_create_inferior (%s, %s, xxx, %d)\n",
502 exec_file, args, from_tty);
503 return;
504 }
505 }
506
507 internal_error (__FILE__, __LINE__,
508 _("could not find a target to create inferior"));
509 }
510
511 void
512 target_terminal_inferior (void)
513 {
514 /* A background resume (``run&'') should leave GDB in control of the
515 terminal. Use target_can_async_p, not target_is_async_p, since at
516 this point the target is not async yet. However, if sync_execution
517 is not set, we know it will become async prior to resume. */
518 if (target_can_async_p () && !sync_execution)
519 return;
520
521 /* If GDB is resuming the inferior in the foreground, install
522 inferior's terminal modes. */
523 (*current_target.to_terminal_inferior) (&current_target);
524 }
525
526 static int
527 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
528 struct target_ops *t)
529 {
530 errno = EIO; /* Can't read/write this location. */
531 return 0; /* No bytes handled. */
532 }
533
534 static void
535 tcomplain (void)
536 {
537 error (_("You can't do that when your target is `%s'"),
538 current_target.to_shortname);
539 }
540
541 void
542 noprocess (void)
543 {
544 error (_("You can't do that without a process to debug."));
545 }
546
547 static void
548 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
549 {
550 printf_unfiltered (_("No saved terminal information.\n"));
551 }
552
553 /* A default implementation for the to_get_ada_task_ptid target method.
554
555 This function builds the PTID by using both LWP and TID as part of
556 the PTID lwp and tid elements. The pid used is the pid of the
557 inferior_ptid. */
558
559 static ptid_t
560 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
561 {
562 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
563 }
564
565 static enum exec_direction_kind
566 default_execution_direction (struct target_ops *self)
567 {
568 if (!target_can_execute_reverse)
569 return EXEC_FORWARD;
570 else if (!target_can_async_p ())
571 return EXEC_FORWARD;
572 else
573 gdb_assert_not_reached ("\
574 to_execution_direction must be implemented for reverse async");
575 }
576
577 /* Go through the target stack from top to bottom, copying over zero
578 entries in current_target, then filling in still empty entries. In
579 effect, we are doing class inheritance through the pushed target
580 vectors.
581
582 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
583 is currently implemented, is that it discards any knowledge of
584 which target an inherited method originally belonged to.
585 Consequently, new new target methods should instead explicitly and
586 locally search the target stack for the target that can handle the
587 request. */
588
589 static void
590 update_current_target (void)
591 {
592 struct target_ops *t;
593
594 /* First, reset current's contents. */
595 memset (&current_target, 0, sizeof (current_target));
596
597 /* Install the delegators. */
598 install_delegators (&current_target);
599
600 current_target.to_stratum = target_stack->to_stratum;
601
602 #define INHERIT(FIELD, TARGET) \
603 if (!current_target.FIELD) \
604 current_target.FIELD = (TARGET)->FIELD
605
606 /* Do not add any new INHERITs here. Instead, use the delegation
607 mechanism provided by make-target-delegates. */
608 for (t = target_stack; t; t = t->beneath)
609 {
610 INHERIT (to_shortname, t);
611 INHERIT (to_longname, t);
612 INHERIT (to_attach_no_wait, t);
613 INHERIT (deprecated_xfer_memory, t);
614 INHERIT (to_have_steppable_watchpoint, t);
615 INHERIT (to_have_continuable_watchpoint, t);
616 INHERIT (to_has_thread_control, t);
617 }
618 #undef INHERIT
619
620 /* Clean up a target struct so it no longer has any zero pointers in
621 it. Do not add any new de_faults here. Instead, use the
622 delegation mechanism provided by make-target-delegates. */
623
624 #define de_fault(field, value) \
625 if (!current_target.field) \
626 current_target.field = value
627
628 de_fault (deprecated_xfer_memory,
629 (int (*) (CORE_ADDR, gdb_byte *, int, int,
630 struct mem_attrib *, struct target_ops *))
631 nomemory);
632
633 #undef de_fault
634
635 /* Finally, position the target-stack beneath the squashed
636 "current_target". That way code looking for a non-inherited
637 target method can quickly and simply find it. */
638 current_target.beneath = target_stack;
639
640 if (targetdebug)
641 setup_target_debug ();
642 }
643
644 /* Push a new target type into the stack of the existing target accessors,
645 possibly superseding some of the existing accessors.
646
647 Rather than allow an empty stack, we always have the dummy target at
648 the bottom stratum, so we can call the function vectors without
649 checking them. */
650
651 void
652 push_target (struct target_ops *t)
653 {
654 struct target_ops **cur;
655
656 /* Check magic number. If wrong, it probably means someone changed
657 the struct definition, but not all the places that initialize one. */
658 if (t->to_magic != OPS_MAGIC)
659 {
660 fprintf_unfiltered (gdb_stderr,
661 "Magic number of %s target struct wrong\n",
662 t->to_shortname);
663 internal_error (__FILE__, __LINE__,
664 _("failed internal consistency check"));
665 }
666
667 /* Find the proper stratum to install this target in. */
668 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
669 {
670 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
671 break;
672 }
673
674 /* If there's already targets at this stratum, remove them. */
675 /* FIXME: cagney/2003-10-15: I think this should be popping all
676 targets to CUR, and not just those at this stratum level. */
677 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
678 {
679 /* There's already something at this stratum level. Close it,
680 and un-hook it from the stack. */
681 struct target_ops *tmp = (*cur);
682
683 (*cur) = (*cur)->beneath;
684 tmp->beneath = NULL;
685 target_close (tmp);
686 }
687
688 /* We have removed all targets in our stratum, now add the new one. */
689 t->beneath = (*cur);
690 (*cur) = t;
691
692 update_current_target ();
693 }
694
695 /* Remove a target_ops vector from the stack, wherever it may be.
696 Return how many times it was removed (0 or 1). */
697
698 int
699 unpush_target (struct target_ops *t)
700 {
701 struct target_ops **cur;
702 struct target_ops *tmp;
703
704 if (t->to_stratum == dummy_stratum)
705 internal_error (__FILE__, __LINE__,
706 _("Attempt to unpush the dummy target"));
707
708 /* Look for the specified target. Note that we assume that a target
709 can only occur once in the target stack. */
710
711 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
712 {
713 if ((*cur) == t)
714 break;
715 }
716
717 /* If we don't find target_ops, quit. Only open targets should be
718 closed. */
719 if ((*cur) == NULL)
720 return 0;
721
722 /* Unchain the target. */
723 tmp = (*cur);
724 (*cur) = (*cur)->beneath;
725 tmp->beneath = NULL;
726
727 update_current_target ();
728
729 /* Finally close the target. Note we do this after unchaining, so
730 any target method calls from within the target_close
731 implementation don't end up in T anymore. */
732 target_close (t);
733
734 return 1;
735 }
736
737 void
738 pop_all_targets_above (enum strata above_stratum)
739 {
740 while ((int) (current_target.to_stratum) > (int) above_stratum)
741 {
742 if (!unpush_target (target_stack))
743 {
744 fprintf_unfiltered (gdb_stderr,
745 "pop_all_targets couldn't find target %s\n",
746 target_stack->to_shortname);
747 internal_error (__FILE__, __LINE__,
748 _("failed internal consistency check"));
749 break;
750 }
751 }
752 }
753
754 void
755 pop_all_targets (void)
756 {
757 pop_all_targets_above (dummy_stratum);
758 }
759
760 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
761
762 int
763 target_is_pushed (struct target_ops *t)
764 {
765 struct target_ops **cur;
766
767 /* Check magic number. If wrong, it probably means someone changed
768 the struct definition, but not all the places that initialize one. */
769 if (t->to_magic != OPS_MAGIC)
770 {
771 fprintf_unfiltered (gdb_stderr,
772 "Magic number of %s target struct wrong\n",
773 t->to_shortname);
774 internal_error (__FILE__, __LINE__,
775 _("failed internal consistency check"));
776 }
777
778 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
779 if (*cur == t)
780 return 1;
781
782 return 0;
783 }
784
785 /* Using the objfile specified in OBJFILE, find the address for the
786 current thread's thread-local storage with offset OFFSET. */
787 CORE_ADDR
788 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
789 {
790 volatile CORE_ADDR addr = 0;
791 struct target_ops *target;
792
793 for (target = current_target.beneath;
794 target != NULL;
795 target = target->beneath)
796 {
797 if (target->to_get_thread_local_address != NULL)
798 break;
799 }
800
801 if (target != NULL
802 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
803 {
804 ptid_t ptid = inferior_ptid;
805 volatile struct gdb_exception ex;
806
807 TRY_CATCH (ex, RETURN_MASK_ALL)
808 {
809 CORE_ADDR lm_addr;
810
811 /* Fetch the load module address for this objfile. */
812 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
813 objfile);
814 /* If it's 0, throw the appropriate exception. */
815 if (lm_addr == 0)
816 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
817 _("TLS load module not found"));
818
819 addr = target->to_get_thread_local_address (target, ptid,
820 lm_addr, offset);
821 }
822 /* If an error occurred, print TLS related messages here. Otherwise,
823 throw the error to some higher catcher. */
824 if (ex.reason < 0)
825 {
826 int objfile_is_library = (objfile->flags & OBJF_SHARED);
827
828 switch (ex.error)
829 {
830 case TLS_NO_LIBRARY_SUPPORT_ERROR:
831 error (_("Cannot find thread-local variables "
832 "in this thread library."));
833 break;
834 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
835 if (objfile_is_library)
836 error (_("Cannot find shared library `%s' in dynamic"
837 " linker's load module list"), objfile_name (objfile));
838 else
839 error (_("Cannot find executable file `%s' in dynamic"
840 " linker's load module list"), objfile_name (objfile));
841 break;
842 case TLS_NOT_ALLOCATED_YET_ERROR:
843 if (objfile_is_library)
844 error (_("The inferior has not yet allocated storage for"
845 " thread-local variables in\n"
846 "the shared library `%s'\n"
847 "for %s"),
848 objfile_name (objfile), target_pid_to_str (ptid));
849 else
850 error (_("The inferior has not yet allocated storage for"
851 " thread-local variables in\n"
852 "the executable `%s'\n"
853 "for %s"),
854 objfile_name (objfile), target_pid_to_str (ptid));
855 break;
856 case TLS_GENERIC_ERROR:
857 if (objfile_is_library)
858 error (_("Cannot find thread-local storage for %s, "
859 "shared library %s:\n%s"),
860 target_pid_to_str (ptid),
861 objfile_name (objfile), ex.message);
862 else
863 error (_("Cannot find thread-local storage for %s, "
864 "executable file %s:\n%s"),
865 target_pid_to_str (ptid),
866 objfile_name (objfile), ex.message);
867 break;
868 default:
869 throw_exception (ex);
870 break;
871 }
872 }
873 }
874 /* It wouldn't be wrong here to try a gdbarch method, too; finding
875 TLS is an ABI-specific thing. But we don't do that yet. */
876 else
877 error (_("Cannot find thread-local variables on this target"));
878
879 return addr;
880 }
881
882 const char *
883 target_xfer_status_to_string (enum target_xfer_status err)
884 {
885 #define CASE(X) case X: return #X
886 switch (err)
887 {
888 CASE(TARGET_XFER_E_IO);
889 CASE(TARGET_XFER_E_UNAVAILABLE);
890 default:
891 return "<unknown>";
892 }
893 #undef CASE
894 };
895
896
897 #undef MIN
898 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
899
900 /* target_read_string -- read a null terminated string, up to LEN bytes,
901 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
902 Set *STRING to a pointer to malloc'd memory containing the data; the caller
903 is responsible for freeing it. Return the number of bytes successfully
904 read. */
905
906 int
907 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
908 {
909 int tlen, offset, i;
910 gdb_byte buf[4];
911 int errcode = 0;
912 char *buffer;
913 int buffer_allocated;
914 char *bufptr;
915 unsigned int nbytes_read = 0;
916
917 gdb_assert (string);
918
919 /* Small for testing. */
920 buffer_allocated = 4;
921 buffer = xmalloc (buffer_allocated);
922 bufptr = buffer;
923
924 while (len > 0)
925 {
926 tlen = MIN (len, 4 - (memaddr & 3));
927 offset = memaddr & 3;
928
929 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
930 if (errcode != 0)
931 {
932 /* The transfer request might have crossed the boundary to an
933 unallocated region of memory. Retry the transfer, requesting
934 a single byte. */
935 tlen = 1;
936 offset = 0;
937 errcode = target_read_memory (memaddr, buf, 1);
938 if (errcode != 0)
939 goto done;
940 }
941
942 if (bufptr - buffer + tlen > buffer_allocated)
943 {
944 unsigned int bytes;
945
946 bytes = bufptr - buffer;
947 buffer_allocated *= 2;
948 buffer = xrealloc (buffer, buffer_allocated);
949 bufptr = buffer + bytes;
950 }
951
952 for (i = 0; i < tlen; i++)
953 {
954 *bufptr++ = buf[i + offset];
955 if (buf[i + offset] == '\000')
956 {
957 nbytes_read += i + 1;
958 goto done;
959 }
960 }
961
962 memaddr += tlen;
963 len -= tlen;
964 nbytes_read += tlen;
965 }
966 done:
967 *string = buffer;
968 if (errnop != NULL)
969 *errnop = errcode;
970 return nbytes_read;
971 }
972
973 struct target_section_table *
974 target_get_section_table (struct target_ops *target)
975 {
976 if (targetdebug)
977 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
978
979 return (*target->to_get_section_table) (target);
980 }
981
982 /* Find a section containing ADDR. */
983
984 struct target_section *
985 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
986 {
987 struct target_section_table *table = target_get_section_table (target);
988 struct target_section *secp;
989
990 if (table == NULL)
991 return NULL;
992
993 for (secp = table->sections; secp < table->sections_end; secp++)
994 {
995 if (addr >= secp->addr && addr < secp->endaddr)
996 return secp;
997 }
998 return NULL;
999 }
1000
1001 /* Read memory from the live target, even if currently inspecting a
1002 traceframe. The return is the same as that of target_read. */
1003
1004 static enum target_xfer_status
1005 target_read_live_memory (enum target_object object,
1006 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1007 ULONGEST *xfered_len)
1008 {
1009 enum target_xfer_status ret;
1010 struct cleanup *cleanup;
1011
1012 /* Switch momentarily out of tfind mode so to access live memory.
1013 Note that this must not clear global state, such as the frame
1014 cache, which must still remain valid for the previous traceframe.
1015 We may be _building_ the frame cache at this point. */
1016 cleanup = make_cleanup_restore_traceframe_number ();
1017 set_traceframe_number (-1);
1018
1019 ret = target_xfer_partial (current_target.beneath, object, NULL,
1020 myaddr, NULL, memaddr, len, xfered_len);
1021
1022 do_cleanups (cleanup);
1023 return ret;
1024 }
1025
1026 /* Using the set of read-only target sections of OPS, read live
1027 read-only memory. Note that the actual reads start from the
1028 top-most target again.
1029
1030 For interface/parameters/return description see target.h,
1031 to_xfer_partial. */
1032
1033 static enum target_xfer_status
1034 memory_xfer_live_readonly_partial (struct target_ops *ops,
1035 enum target_object object,
1036 gdb_byte *readbuf, ULONGEST memaddr,
1037 ULONGEST len, ULONGEST *xfered_len)
1038 {
1039 struct target_section *secp;
1040 struct target_section_table *table;
1041
1042 secp = target_section_by_addr (ops, memaddr);
1043 if (secp != NULL
1044 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1045 secp->the_bfd_section)
1046 & SEC_READONLY))
1047 {
1048 struct target_section *p;
1049 ULONGEST memend = memaddr + len;
1050
1051 table = target_get_section_table (ops);
1052
1053 for (p = table->sections; p < table->sections_end; p++)
1054 {
1055 if (memaddr >= p->addr)
1056 {
1057 if (memend <= p->endaddr)
1058 {
1059 /* Entire transfer is within this section. */
1060 return target_read_live_memory (object, memaddr,
1061 readbuf, len, xfered_len);
1062 }
1063 else if (memaddr >= p->endaddr)
1064 {
1065 /* This section ends before the transfer starts. */
1066 continue;
1067 }
1068 else
1069 {
1070 /* This section overlaps the transfer. Just do half. */
1071 len = p->endaddr - memaddr;
1072 return target_read_live_memory (object, memaddr,
1073 readbuf, len, xfered_len);
1074 }
1075 }
1076 }
1077 }
1078
1079 return TARGET_XFER_EOF;
1080 }
1081
1082 /* Read memory from more than one valid target. A core file, for
1083 instance, could have some of memory but delegate other bits to
1084 the target below it. So, we must manually try all targets. */
1085
1086 static enum target_xfer_status
1087 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1088 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1089 ULONGEST *xfered_len)
1090 {
1091 enum target_xfer_status res;
1092
1093 do
1094 {
1095 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1096 readbuf, writebuf, memaddr, len,
1097 xfered_len);
1098 if (res == TARGET_XFER_OK)
1099 break;
1100
1101 /* Stop if the target reports that the memory is not available. */
1102 if (res == TARGET_XFER_E_UNAVAILABLE)
1103 break;
1104
1105 /* We want to continue past core files to executables, but not
1106 past a running target's memory. */
1107 if (ops->to_has_all_memory (ops))
1108 break;
1109
1110 ops = ops->beneath;
1111 }
1112 while (ops != NULL);
1113
1114 return res;
1115 }
1116
1117 /* Perform a partial memory transfer.
1118 For docs see target.h, to_xfer_partial. */
1119
1120 static enum target_xfer_status
1121 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1122 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1123 ULONGEST len, ULONGEST *xfered_len)
1124 {
1125 enum target_xfer_status res;
1126 int reg_len;
1127 struct mem_region *region;
1128 struct inferior *inf;
1129
1130 /* For accesses to unmapped overlay sections, read directly from
1131 files. Must do this first, as MEMADDR may need adjustment. */
1132 if (readbuf != NULL && overlay_debugging)
1133 {
1134 struct obj_section *section = find_pc_overlay (memaddr);
1135
1136 if (pc_in_unmapped_range (memaddr, section))
1137 {
1138 struct target_section_table *table
1139 = target_get_section_table (ops);
1140 const char *section_name = section->the_bfd_section->name;
1141
1142 memaddr = overlay_mapped_address (memaddr, section);
1143 return section_table_xfer_memory_partial (readbuf, writebuf,
1144 memaddr, len, xfered_len,
1145 table->sections,
1146 table->sections_end,
1147 section_name);
1148 }
1149 }
1150
1151 /* Try the executable files, if "trust-readonly-sections" is set. */
1152 if (readbuf != NULL && trust_readonly)
1153 {
1154 struct target_section *secp;
1155 struct target_section_table *table;
1156
1157 secp = target_section_by_addr (ops, memaddr);
1158 if (secp != NULL
1159 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1160 secp->the_bfd_section)
1161 & SEC_READONLY))
1162 {
1163 table = target_get_section_table (ops);
1164 return section_table_xfer_memory_partial (readbuf, writebuf,
1165 memaddr, len, xfered_len,
1166 table->sections,
1167 table->sections_end,
1168 NULL);
1169 }
1170 }
1171
1172 /* If reading unavailable memory in the context of traceframes, and
1173 this address falls within a read-only section, fallback to
1174 reading from live memory. */
1175 if (readbuf != NULL && get_traceframe_number () != -1)
1176 {
1177 VEC(mem_range_s) *available;
1178
1179 /* If we fail to get the set of available memory, then the
1180 target does not support querying traceframe info, and so we
1181 attempt reading from the traceframe anyway (assuming the
1182 target implements the old QTro packet then). */
1183 if (traceframe_available_memory (&available, memaddr, len))
1184 {
1185 struct cleanup *old_chain;
1186
1187 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1188
1189 if (VEC_empty (mem_range_s, available)
1190 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1191 {
1192 /* Don't read into the traceframe's available
1193 memory. */
1194 if (!VEC_empty (mem_range_s, available))
1195 {
1196 LONGEST oldlen = len;
1197
1198 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1199 gdb_assert (len <= oldlen);
1200 }
1201
1202 do_cleanups (old_chain);
1203
1204 /* This goes through the topmost target again. */
1205 res = memory_xfer_live_readonly_partial (ops, object,
1206 readbuf, memaddr,
1207 len, xfered_len);
1208 if (res == TARGET_XFER_OK)
1209 return TARGET_XFER_OK;
1210 else
1211 {
1212 /* No use trying further, we know some memory starting
1213 at MEMADDR isn't available. */
1214 *xfered_len = len;
1215 return TARGET_XFER_E_UNAVAILABLE;
1216 }
1217 }
1218
1219 /* Don't try to read more than how much is available, in
1220 case the target implements the deprecated QTro packet to
1221 cater for older GDBs (the target's knowledge of read-only
1222 sections may be outdated by now). */
1223 len = VEC_index (mem_range_s, available, 0)->length;
1224
1225 do_cleanups (old_chain);
1226 }
1227 }
1228
1229 /* Try GDB's internal data cache. */
1230 region = lookup_mem_region (memaddr);
1231 /* region->hi == 0 means there's no upper bound. */
1232 if (memaddr + len < region->hi || region->hi == 0)
1233 reg_len = len;
1234 else
1235 reg_len = region->hi - memaddr;
1236
1237 switch (region->attrib.mode)
1238 {
1239 case MEM_RO:
1240 if (writebuf != NULL)
1241 return TARGET_XFER_E_IO;
1242 break;
1243
1244 case MEM_WO:
1245 if (readbuf != NULL)
1246 return TARGET_XFER_E_IO;
1247 break;
1248
1249 case MEM_FLASH:
1250 /* We only support writing to flash during "load" for now. */
1251 if (writebuf != NULL)
1252 error (_("Writing to flash memory forbidden in this context"));
1253 break;
1254
1255 case MEM_NONE:
1256 return TARGET_XFER_E_IO;
1257 }
1258
1259 if (!ptid_equal (inferior_ptid, null_ptid))
1260 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1261 else
1262 inf = NULL;
1263
1264 if (inf != NULL
1265 /* The dcache reads whole cache lines; that doesn't play well
1266 with reading from a trace buffer, because reading outside of
1267 the collected memory range fails. */
1268 && get_traceframe_number () == -1
1269 && (region->attrib.cache
1270 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1271 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1272 {
1273 DCACHE *dcache = target_dcache_get_or_init ();
1274 int l;
1275
1276 if (readbuf != NULL)
1277 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1278 else
1279 /* FIXME drow/2006-08-09: If we're going to preserve const
1280 correctness dcache_xfer_memory should take readbuf and
1281 writebuf. */
1282 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1283 reg_len, 1);
1284 if (l <= 0)
1285 return TARGET_XFER_E_IO;
1286 else
1287 {
1288 *xfered_len = (ULONGEST) l;
1289 return TARGET_XFER_OK;
1290 }
1291 }
1292
1293 /* If none of those methods found the memory we wanted, fall back
1294 to a target partial transfer. Normally a single call to
1295 to_xfer_partial is enough; if it doesn't recognize an object
1296 it will call the to_xfer_partial of the next target down.
1297 But for memory this won't do. Memory is the only target
1298 object which can be read from more than one valid target.
1299 A core file, for instance, could have some of memory but
1300 delegate other bits to the target below it. So, we must
1301 manually try all targets. */
1302
1303 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1304 xfered_len);
1305
1306 /* Make sure the cache gets updated no matter what - if we are writing
1307 to the stack. Even if this write is not tagged as such, we still need
1308 to update the cache. */
1309
1310 if (res == TARGET_XFER_OK
1311 && inf != NULL
1312 && writebuf != NULL
1313 && target_dcache_init_p ()
1314 && !region->attrib.cache
1315 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1316 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1317 {
1318 DCACHE *dcache = target_dcache_get ();
1319
1320 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1321 }
1322
1323 /* If we still haven't got anything, return the last error. We
1324 give up. */
1325 return res;
1326 }
1327
1328 /* Perform a partial memory transfer. For docs see target.h,
1329 to_xfer_partial. */
1330
1331 static enum target_xfer_status
1332 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1333 gdb_byte *readbuf, const gdb_byte *writebuf,
1334 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1335 {
1336 enum target_xfer_status res;
1337
1338 /* Zero length requests are ok and require no work. */
1339 if (len == 0)
1340 return TARGET_XFER_EOF;
1341
1342 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1343 breakpoint insns, thus hiding out from higher layers whether
1344 there are software breakpoints inserted in the code stream. */
1345 if (readbuf != NULL)
1346 {
1347 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1348 xfered_len);
1349
1350 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1351 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, *xfered_len);
1352 }
1353 else
1354 {
1355 void *buf;
1356 struct cleanup *old_chain;
1357
1358 /* A large write request is likely to be partially satisfied
1359 by memory_xfer_partial_1. We will continually malloc
1360 and free a copy of the entire write request for breakpoint
1361 shadow handling even though we only end up writing a small
1362 subset of it. Cap writes to 4KB to mitigate this. */
1363 len = min (4096, len);
1364
1365 buf = xmalloc (len);
1366 old_chain = make_cleanup (xfree, buf);
1367 memcpy (buf, writebuf, len);
1368
1369 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1370 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1371 xfered_len);
1372
1373 do_cleanups (old_chain);
1374 }
1375
1376 return res;
1377 }
1378
1379 static void
1380 restore_show_memory_breakpoints (void *arg)
1381 {
1382 show_memory_breakpoints = (uintptr_t) arg;
1383 }
1384
1385 struct cleanup *
1386 make_show_memory_breakpoints_cleanup (int show)
1387 {
1388 int current = show_memory_breakpoints;
1389
1390 show_memory_breakpoints = show;
1391 return make_cleanup (restore_show_memory_breakpoints,
1392 (void *) (uintptr_t) current);
1393 }
1394
1395 /* For docs see target.h, to_xfer_partial. */
1396
1397 enum target_xfer_status
1398 target_xfer_partial (struct target_ops *ops,
1399 enum target_object object, const char *annex,
1400 gdb_byte *readbuf, const gdb_byte *writebuf,
1401 ULONGEST offset, ULONGEST len,
1402 ULONGEST *xfered_len)
1403 {
1404 enum target_xfer_status retval;
1405
1406 gdb_assert (ops->to_xfer_partial != NULL);
1407
1408 /* Transfer is done when LEN is zero. */
1409 if (len == 0)
1410 return TARGET_XFER_EOF;
1411
1412 if (writebuf && !may_write_memory)
1413 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1414 core_addr_to_string_nz (offset), plongest (len));
1415
1416 *xfered_len = 0;
1417
1418 /* If this is a memory transfer, let the memory-specific code
1419 have a look at it instead. Memory transfers are more
1420 complicated. */
1421 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1422 || object == TARGET_OBJECT_CODE_MEMORY)
1423 retval = memory_xfer_partial (ops, object, readbuf,
1424 writebuf, offset, len, xfered_len);
1425 else if (object == TARGET_OBJECT_RAW_MEMORY)
1426 {
1427 /* Request the normal memory object from other layers. */
1428 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1429 xfered_len);
1430 }
1431 else
1432 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1433 writebuf, offset, len, xfered_len);
1434
1435 if (targetdebug)
1436 {
1437 const unsigned char *myaddr = NULL;
1438
1439 fprintf_unfiltered (gdb_stdlog,
1440 "%s:target_xfer_partial "
1441 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1442 ops->to_shortname,
1443 (int) object,
1444 (annex ? annex : "(null)"),
1445 host_address_to_string (readbuf),
1446 host_address_to_string (writebuf),
1447 core_addr_to_string_nz (offset),
1448 pulongest (len), retval,
1449 pulongest (*xfered_len));
1450
1451 if (readbuf)
1452 myaddr = readbuf;
1453 if (writebuf)
1454 myaddr = writebuf;
1455 if (retval == TARGET_XFER_OK && myaddr != NULL)
1456 {
1457 int i;
1458
1459 fputs_unfiltered (", bytes =", gdb_stdlog);
1460 for (i = 0; i < *xfered_len; i++)
1461 {
1462 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1463 {
1464 if (targetdebug < 2 && i > 0)
1465 {
1466 fprintf_unfiltered (gdb_stdlog, " ...");
1467 break;
1468 }
1469 fprintf_unfiltered (gdb_stdlog, "\n");
1470 }
1471
1472 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1473 }
1474 }
1475
1476 fputc_unfiltered ('\n', gdb_stdlog);
1477 }
1478
1479 /* Check implementations of to_xfer_partial update *XFERED_LEN
1480 properly. Do assertion after printing debug messages, so that we
1481 can find more clues on assertion failure from debugging messages. */
1482 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1483 gdb_assert (*xfered_len > 0);
1484
1485 return retval;
1486 }
1487
1488 /* Read LEN bytes of target memory at address MEMADDR, placing the
1489 results in GDB's memory at MYADDR. Returns either 0 for success or
1490 TARGET_XFER_E_IO if any error occurs.
1491
1492 If an error occurs, no guarantee is made about the contents of the data at
1493 MYADDR. In particular, the caller should not depend upon partial reads
1494 filling the buffer with good data. There is no way for the caller to know
1495 how much good data might have been transfered anyway. Callers that can
1496 deal with partial reads should call target_read (which will retry until
1497 it makes no progress, and then return how much was transferred). */
1498
1499 int
1500 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1501 {
1502 /* Dispatch to the topmost target, not the flattened current_target.
1503 Memory accesses check target->to_has_(all_)memory, and the
1504 flattened target doesn't inherit those. */
1505 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1506 myaddr, memaddr, len) == len)
1507 return 0;
1508 else
1509 return TARGET_XFER_E_IO;
1510 }
1511
1512 /* Like target_read_memory, but specify explicitly that this is a read
1513 from the target's raw memory. That is, this read bypasses the
1514 dcache, breakpoint shadowing, etc. */
1515
1516 int
1517 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1518 {
1519 /* See comment in target_read_memory about why the request starts at
1520 current_target.beneath. */
1521 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1522 myaddr, memaddr, len) == len)
1523 return 0;
1524 else
1525 return TARGET_XFER_E_IO;
1526 }
1527
1528 /* Like target_read_memory, but specify explicitly that this is a read from
1529 the target's stack. This may trigger different cache behavior. */
1530
1531 int
1532 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1533 {
1534 /* See comment in target_read_memory about why the request starts at
1535 current_target.beneath. */
1536 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1537 myaddr, memaddr, len) == len)
1538 return 0;
1539 else
1540 return TARGET_XFER_E_IO;
1541 }
1542
1543 /* Like target_read_memory, but specify explicitly that this is a read from
1544 the target's code. This may trigger different cache behavior. */
1545
1546 int
1547 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1548 {
1549 /* See comment in target_read_memory about why the request starts at
1550 current_target.beneath. */
1551 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1552 myaddr, memaddr, len) == len)
1553 return 0;
1554 else
1555 return TARGET_XFER_E_IO;
1556 }
1557
1558 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1559 Returns either 0 for success or TARGET_XFER_E_IO if any
1560 error occurs. If an error occurs, no guarantee is made about how
1561 much data got written. Callers that can deal with partial writes
1562 should call target_write. */
1563
1564 int
1565 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1566 {
1567 /* See comment in target_read_memory about why the request starts at
1568 current_target.beneath. */
1569 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1570 myaddr, memaddr, len) == len)
1571 return 0;
1572 else
1573 return TARGET_XFER_E_IO;
1574 }
1575
1576 /* Write LEN bytes from MYADDR to target raw memory at address
1577 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1578 if any error occurs. If an error occurs, no guarantee is made
1579 about how much data got written. Callers that can deal with
1580 partial writes should call target_write. */
1581
1582 int
1583 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1584 {
1585 /* See comment in target_read_memory about why the request starts at
1586 current_target.beneath. */
1587 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1588 myaddr, memaddr, len) == len)
1589 return 0;
1590 else
1591 return TARGET_XFER_E_IO;
1592 }
1593
1594 /* Fetch the target's memory map. */
1595
1596 VEC(mem_region_s) *
1597 target_memory_map (void)
1598 {
1599 VEC(mem_region_s) *result;
1600 struct mem_region *last_one, *this_one;
1601 int ix;
1602 struct target_ops *t;
1603
1604 if (targetdebug)
1605 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1606
1607 result = current_target.to_memory_map (&current_target);
1608 if (result == NULL)
1609 return NULL;
1610
1611 qsort (VEC_address (mem_region_s, result),
1612 VEC_length (mem_region_s, result),
1613 sizeof (struct mem_region), mem_region_cmp);
1614
1615 /* Check that regions do not overlap. Simultaneously assign
1616 a numbering for the "mem" commands to use to refer to
1617 each region. */
1618 last_one = NULL;
1619 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1620 {
1621 this_one->number = ix;
1622
1623 if (last_one && last_one->hi > this_one->lo)
1624 {
1625 warning (_("Overlapping regions in memory map: ignoring"));
1626 VEC_free (mem_region_s, result);
1627 return NULL;
1628 }
1629 last_one = this_one;
1630 }
1631
1632 return result;
1633 }
1634
1635 void
1636 target_flash_erase (ULONGEST address, LONGEST length)
1637 {
1638 if (targetdebug)
1639 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1640 hex_string (address), phex (length, 0));
1641 current_target.to_flash_erase (&current_target, address, length);
1642 }
1643
1644 void
1645 target_flash_done (void)
1646 {
1647 if (targetdebug)
1648 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1649 current_target.to_flash_done (&current_target);
1650 }
1651
1652 static void
1653 show_trust_readonly (struct ui_file *file, int from_tty,
1654 struct cmd_list_element *c, const char *value)
1655 {
1656 fprintf_filtered (file,
1657 _("Mode for reading from readonly sections is %s.\n"),
1658 value);
1659 }
1660
1661 /* More generic transfers. */
1662
1663 static enum target_xfer_status
1664 default_xfer_partial (struct target_ops *ops, enum target_object object,
1665 const char *annex, gdb_byte *readbuf,
1666 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
1667 ULONGEST *xfered_len)
1668 {
1669 if (object == TARGET_OBJECT_MEMORY
1670 && ops->deprecated_xfer_memory != NULL)
1671 /* If available, fall back to the target's
1672 "deprecated_xfer_memory" method. */
1673 {
1674 int xfered = -1;
1675
1676 errno = 0;
1677 if (writebuf != NULL)
1678 {
1679 void *buffer = xmalloc (len);
1680 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1681
1682 memcpy (buffer, writebuf, len);
1683 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1684 1/*write*/, NULL, ops);
1685 do_cleanups (cleanup);
1686 }
1687 if (readbuf != NULL)
1688 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1689 0/*read*/, NULL, ops);
1690 if (xfered > 0)
1691 {
1692 *xfered_len = (ULONGEST) xfered;
1693 return TARGET_XFER_E_IO;
1694 }
1695 else if (xfered == 0 && errno == 0)
1696 /* "deprecated_xfer_memory" uses 0, cross checked against
1697 ERRNO as one indication of an error. */
1698 return TARGET_XFER_EOF;
1699 else
1700 return TARGET_XFER_E_IO;
1701 }
1702 else
1703 {
1704 gdb_assert (ops->beneath != NULL);
1705 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1706 readbuf, writebuf, offset, len,
1707 xfered_len);
1708 }
1709 }
1710
1711 /* Target vector read/write partial wrapper functions. */
1712
1713 static enum target_xfer_status
1714 target_read_partial (struct target_ops *ops,
1715 enum target_object object,
1716 const char *annex, gdb_byte *buf,
1717 ULONGEST offset, ULONGEST len,
1718 ULONGEST *xfered_len)
1719 {
1720 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1721 xfered_len);
1722 }
1723
1724 static enum target_xfer_status
1725 target_write_partial (struct target_ops *ops,
1726 enum target_object object,
1727 const char *annex, const gdb_byte *buf,
1728 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1729 {
1730 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1731 xfered_len);
1732 }
1733
1734 /* Wrappers to perform the full transfer. */
1735
1736 /* For docs on target_read see target.h. */
1737
1738 LONGEST
1739 target_read (struct target_ops *ops,
1740 enum target_object object,
1741 const char *annex, gdb_byte *buf,
1742 ULONGEST offset, LONGEST len)
1743 {
1744 LONGEST xfered = 0;
1745
1746 while (xfered < len)
1747 {
1748 ULONGEST xfered_len;
1749 enum target_xfer_status status;
1750
1751 status = target_read_partial (ops, object, annex,
1752 (gdb_byte *) buf + xfered,
1753 offset + xfered, len - xfered,
1754 &xfered_len);
1755
1756 /* Call an observer, notifying them of the xfer progress? */
1757 if (status == TARGET_XFER_EOF)
1758 return xfered;
1759 else if (status == TARGET_XFER_OK)
1760 {
1761 xfered += xfered_len;
1762 QUIT;
1763 }
1764 else
1765 return -1;
1766
1767 }
1768 return len;
1769 }
1770
1771 /* Assuming that the entire [begin, end) range of memory cannot be
1772 read, try to read whatever subrange is possible to read.
1773
1774 The function returns, in RESULT, either zero or one memory block.
1775 If there's a readable subrange at the beginning, it is completely
1776 read and returned. Any further readable subrange will not be read.
1777 Otherwise, if there's a readable subrange at the end, it will be
1778 completely read and returned. Any readable subranges before it
1779 (obviously, not starting at the beginning), will be ignored. In
1780 other cases -- either no readable subrange, or readable subrange(s)
1781 that is neither at the beginning, or end, nothing is returned.
1782
1783 The purpose of this function is to handle a read across a boundary
1784 of accessible memory in a case when memory map is not available.
1785 The above restrictions are fine for this case, but will give
1786 incorrect results if the memory is 'patchy'. However, supporting
1787 'patchy' memory would require trying to read every single byte,
1788 and it seems unacceptable solution. Explicit memory map is
1789 recommended for this case -- and target_read_memory_robust will
1790 take care of reading multiple ranges then. */
1791
1792 static void
1793 read_whatever_is_readable (struct target_ops *ops,
1794 ULONGEST begin, ULONGEST end,
1795 VEC(memory_read_result_s) **result)
1796 {
1797 gdb_byte *buf = xmalloc (end - begin);
1798 ULONGEST current_begin = begin;
1799 ULONGEST current_end = end;
1800 int forward;
1801 memory_read_result_s r;
1802 ULONGEST xfered_len;
1803
1804 /* If we previously failed to read 1 byte, nothing can be done here. */
1805 if (end - begin <= 1)
1806 {
1807 xfree (buf);
1808 return;
1809 }
1810
1811 /* Check that either first or the last byte is readable, and give up
1812 if not. This heuristic is meant to permit reading accessible memory
1813 at the boundary of accessible region. */
1814 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1815 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
1816 {
1817 forward = 1;
1818 ++current_begin;
1819 }
1820 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1821 buf + (end-begin) - 1, end - 1, 1,
1822 &xfered_len) == TARGET_XFER_OK)
1823 {
1824 forward = 0;
1825 --current_end;
1826 }
1827 else
1828 {
1829 xfree (buf);
1830 return;
1831 }
1832
1833 /* Loop invariant is that the [current_begin, current_end) was previously
1834 found to be not readable as a whole.
1835
1836 Note loop condition -- if the range has 1 byte, we can't divide the range
1837 so there's no point trying further. */
1838 while (current_end - current_begin > 1)
1839 {
1840 ULONGEST first_half_begin, first_half_end;
1841 ULONGEST second_half_begin, second_half_end;
1842 LONGEST xfer;
1843 ULONGEST middle = current_begin + (current_end - current_begin)/2;
1844
1845 if (forward)
1846 {
1847 first_half_begin = current_begin;
1848 first_half_end = middle;
1849 second_half_begin = middle;
1850 second_half_end = current_end;
1851 }
1852 else
1853 {
1854 first_half_begin = middle;
1855 first_half_end = current_end;
1856 second_half_begin = current_begin;
1857 second_half_end = middle;
1858 }
1859
1860 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1861 buf + (first_half_begin - begin),
1862 first_half_begin,
1863 first_half_end - first_half_begin);
1864
1865 if (xfer == first_half_end - first_half_begin)
1866 {
1867 /* This half reads up fine. So, the error must be in the
1868 other half. */
1869 current_begin = second_half_begin;
1870 current_end = second_half_end;
1871 }
1872 else
1873 {
1874 /* This half is not readable. Because we've tried one byte, we
1875 know some part of this half if actually redable. Go to the next
1876 iteration to divide again and try to read.
1877
1878 We don't handle the other half, because this function only tries
1879 to read a single readable subrange. */
1880 current_begin = first_half_begin;
1881 current_end = first_half_end;
1882 }
1883 }
1884
1885 if (forward)
1886 {
1887 /* The [begin, current_begin) range has been read. */
1888 r.begin = begin;
1889 r.end = current_begin;
1890 r.data = buf;
1891 }
1892 else
1893 {
1894 /* The [current_end, end) range has been read. */
1895 LONGEST rlen = end - current_end;
1896
1897 r.data = xmalloc (rlen);
1898 memcpy (r.data, buf + current_end - begin, rlen);
1899 r.begin = current_end;
1900 r.end = end;
1901 xfree (buf);
1902 }
1903 VEC_safe_push(memory_read_result_s, (*result), &r);
1904 }
1905
1906 void
1907 free_memory_read_result_vector (void *x)
1908 {
1909 VEC(memory_read_result_s) *v = x;
1910 memory_read_result_s *current;
1911 int ix;
1912
1913 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
1914 {
1915 xfree (current->data);
1916 }
1917 VEC_free (memory_read_result_s, v);
1918 }
1919
1920 VEC(memory_read_result_s) *
1921 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
1922 {
1923 VEC(memory_read_result_s) *result = 0;
1924
1925 LONGEST xfered = 0;
1926 while (xfered < len)
1927 {
1928 struct mem_region *region = lookup_mem_region (offset + xfered);
1929 LONGEST rlen;
1930
1931 /* If there is no explicit region, a fake one should be created. */
1932 gdb_assert (region);
1933
1934 if (region->hi == 0)
1935 rlen = len - xfered;
1936 else
1937 rlen = region->hi - offset;
1938
1939 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
1940 {
1941 /* Cannot read this region. Note that we can end up here only
1942 if the region is explicitly marked inaccessible, or
1943 'inaccessible-by-default' is in effect. */
1944 xfered += rlen;
1945 }
1946 else
1947 {
1948 LONGEST to_read = min (len - xfered, rlen);
1949 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
1950
1951 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1952 (gdb_byte *) buffer,
1953 offset + xfered, to_read);
1954 /* Call an observer, notifying them of the xfer progress? */
1955 if (xfer <= 0)
1956 {
1957 /* Got an error reading full chunk. See if maybe we can read
1958 some subrange. */
1959 xfree (buffer);
1960 read_whatever_is_readable (ops, offset + xfered,
1961 offset + xfered + to_read, &result);
1962 xfered += to_read;
1963 }
1964 else
1965 {
1966 struct memory_read_result r;
1967 r.data = buffer;
1968 r.begin = offset + xfered;
1969 r.end = r.begin + xfer;
1970 VEC_safe_push (memory_read_result_s, result, &r);
1971 xfered += xfer;
1972 }
1973 QUIT;
1974 }
1975 }
1976 return result;
1977 }
1978
1979
1980 /* An alternative to target_write with progress callbacks. */
1981
1982 LONGEST
1983 target_write_with_progress (struct target_ops *ops,
1984 enum target_object object,
1985 const char *annex, const gdb_byte *buf,
1986 ULONGEST offset, LONGEST len,
1987 void (*progress) (ULONGEST, void *), void *baton)
1988 {
1989 LONGEST xfered = 0;
1990
1991 /* Give the progress callback a chance to set up. */
1992 if (progress)
1993 (*progress) (0, baton);
1994
1995 while (xfered < len)
1996 {
1997 ULONGEST xfered_len;
1998 enum target_xfer_status status;
1999
2000 status = target_write_partial (ops, object, annex,
2001 (gdb_byte *) buf + xfered,
2002 offset + xfered, len - xfered,
2003 &xfered_len);
2004
2005 if (status == TARGET_XFER_EOF)
2006 return xfered;
2007 if (TARGET_XFER_STATUS_ERROR_P (status))
2008 return -1;
2009
2010 gdb_assert (status == TARGET_XFER_OK);
2011 if (progress)
2012 (*progress) (xfered_len, baton);
2013
2014 xfered += xfered_len;
2015 QUIT;
2016 }
2017 return len;
2018 }
2019
2020 /* For docs on target_write see target.h. */
2021
2022 LONGEST
2023 target_write (struct target_ops *ops,
2024 enum target_object object,
2025 const char *annex, const gdb_byte *buf,
2026 ULONGEST offset, LONGEST len)
2027 {
2028 return target_write_with_progress (ops, object, annex, buf, offset, len,
2029 NULL, NULL);
2030 }
2031
2032 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2033 the size of the transferred data. PADDING additional bytes are
2034 available in *BUF_P. This is a helper function for
2035 target_read_alloc; see the declaration of that function for more
2036 information. */
2037
2038 static LONGEST
2039 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2040 const char *annex, gdb_byte **buf_p, int padding)
2041 {
2042 size_t buf_alloc, buf_pos;
2043 gdb_byte *buf;
2044
2045 /* This function does not have a length parameter; it reads the
2046 entire OBJECT). Also, it doesn't support objects fetched partly
2047 from one target and partly from another (in a different stratum,
2048 e.g. a core file and an executable). Both reasons make it
2049 unsuitable for reading memory. */
2050 gdb_assert (object != TARGET_OBJECT_MEMORY);
2051
2052 /* Start by reading up to 4K at a time. The target will throttle
2053 this number down if necessary. */
2054 buf_alloc = 4096;
2055 buf = xmalloc (buf_alloc);
2056 buf_pos = 0;
2057 while (1)
2058 {
2059 ULONGEST xfered_len;
2060 enum target_xfer_status status;
2061
2062 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2063 buf_pos, buf_alloc - buf_pos - padding,
2064 &xfered_len);
2065
2066 if (status == TARGET_XFER_EOF)
2067 {
2068 /* Read all there was. */
2069 if (buf_pos == 0)
2070 xfree (buf);
2071 else
2072 *buf_p = buf;
2073 return buf_pos;
2074 }
2075 else if (status != TARGET_XFER_OK)
2076 {
2077 /* An error occurred. */
2078 xfree (buf);
2079 return TARGET_XFER_E_IO;
2080 }
2081
2082 buf_pos += xfered_len;
2083
2084 /* If the buffer is filling up, expand it. */
2085 if (buf_alloc < buf_pos * 2)
2086 {
2087 buf_alloc *= 2;
2088 buf = xrealloc (buf, buf_alloc);
2089 }
2090
2091 QUIT;
2092 }
2093 }
2094
2095 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2096 the size of the transferred data. See the declaration in "target.h"
2097 function for more information about the return value. */
2098
2099 LONGEST
2100 target_read_alloc (struct target_ops *ops, enum target_object object,
2101 const char *annex, gdb_byte **buf_p)
2102 {
2103 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2104 }
2105
2106 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2107 returned as a string, allocated using xmalloc. If an error occurs
2108 or the transfer is unsupported, NULL is returned. Empty objects
2109 are returned as allocated but empty strings. A warning is issued
2110 if the result contains any embedded NUL bytes. */
2111
2112 char *
2113 target_read_stralloc (struct target_ops *ops, enum target_object object,
2114 const char *annex)
2115 {
2116 gdb_byte *buffer;
2117 char *bufstr;
2118 LONGEST i, transferred;
2119
2120 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2121 bufstr = (char *) buffer;
2122
2123 if (transferred < 0)
2124 return NULL;
2125
2126 if (transferred == 0)
2127 return xstrdup ("");
2128
2129 bufstr[transferred] = 0;
2130
2131 /* Check for embedded NUL bytes; but allow trailing NULs. */
2132 for (i = strlen (bufstr); i < transferred; i++)
2133 if (bufstr[i] != 0)
2134 {
2135 warning (_("target object %d, annex %s, "
2136 "contained unexpected null characters"),
2137 (int) object, annex ? annex : "(none)");
2138 break;
2139 }
2140
2141 return bufstr;
2142 }
2143
2144 /* Memory transfer methods. */
2145
2146 void
2147 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2148 LONGEST len)
2149 {
2150 /* This method is used to read from an alternate, non-current
2151 target. This read must bypass the overlay support (as symbols
2152 don't match this target), and GDB's internal cache (wrong cache
2153 for this target). */
2154 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2155 != len)
2156 memory_error (TARGET_XFER_E_IO, addr);
2157 }
2158
2159 ULONGEST
2160 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2161 int len, enum bfd_endian byte_order)
2162 {
2163 gdb_byte buf[sizeof (ULONGEST)];
2164
2165 gdb_assert (len <= sizeof (buf));
2166 get_target_memory (ops, addr, buf, len);
2167 return extract_unsigned_integer (buf, len, byte_order);
2168 }
2169
2170 /* See target.h. */
2171
2172 int
2173 target_insert_breakpoint (struct gdbarch *gdbarch,
2174 struct bp_target_info *bp_tgt)
2175 {
2176 if (!may_insert_breakpoints)
2177 {
2178 warning (_("May not insert breakpoints"));
2179 return 1;
2180 }
2181
2182 return current_target.to_insert_breakpoint (&current_target,
2183 gdbarch, bp_tgt);
2184 }
2185
2186 /* See target.h. */
2187
2188 int
2189 target_remove_breakpoint (struct gdbarch *gdbarch,
2190 struct bp_target_info *bp_tgt)
2191 {
2192 /* This is kind of a weird case to handle, but the permission might
2193 have been changed after breakpoints were inserted - in which case
2194 we should just take the user literally and assume that any
2195 breakpoints should be left in place. */
2196 if (!may_insert_breakpoints)
2197 {
2198 warning (_("May not remove breakpoints"));
2199 return 1;
2200 }
2201
2202 return current_target.to_remove_breakpoint (&current_target,
2203 gdbarch, bp_tgt);
2204 }
2205
2206 static void
2207 target_info (char *args, int from_tty)
2208 {
2209 struct target_ops *t;
2210 int has_all_mem = 0;
2211
2212 if (symfile_objfile != NULL)
2213 printf_unfiltered (_("Symbols from \"%s\".\n"),
2214 objfile_name (symfile_objfile));
2215
2216 for (t = target_stack; t != NULL; t = t->beneath)
2217 {
2218 if (!(*t->to_has_memory) (t))
2219 continue;
2220
2221 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2222 continue;
2223 if (has_all_mem)
2224 printf_unfiltered (_("\tWhile running this, "
2225 "GDB does not access memory from...\n"));
2226 printf_unfiltered ("%s:\n", t->to_longname);
2227 (t->to_files_info) (t);
2228 has_all_mem = (*t->to_has_all_memory) (t);
2229 }
2230 }
2231
2232 /* This function is called before any new inferior is created, e.g.
2233 by running a program, attaching, or connecting to a target.
2234 It cleans up any state from previous invocations which might
2235 change between runs. This is a subset of what target_preopen
2236 resets (things which might change between targets). */
2237
2238 void
2239 target_pre_inferior (int from_tty)
2240 {
2241 /* Clear out solib state. Otherwise the solib state of the previous
2242 inferior might have survived and is entirely wrong for the new
2243 target. This has been observed on GNU/Linux using glibc 2.3. How
2244 to reproduce:
2245
2246 bash$ ./foo&
2247 [1] 4711
2248 bash$ ./foo&
2249 [1] 4712
2250 bash$ gdb ./foo
2251 [...]
2252 (gdb) attach 4711
2253 (gdb) detach
2254 (gdb) attach 4712
2255 Cannot access memory at address 0xdeadbeef
2256 */
2257
2258 /* In some OSs, the shared library list is the same/global/shared
2259 across inferiors. If code is shared between processes, so are
2260 memory regions and features. */
2261 if (!gdbarch_has_global_solist (target_gdbarch ()))
2262 {
2263 no_shared_libraries (NULL, from_tty);
2264
2265 invalidate_target_mem_regions ();
2266
2267 target_clear_description ();
2268 }
2269
2270 agent_capability_invalidate ();
2271 }
2272
2273 /* Callback for iterate_over_inferiors. Gets rid of the given
2274 inferior. */
2275
2276 static int
2277 dispose_inferior (struct inferior *inf, void *args)
2278 {
2279 struct thread_info *thread;
2280
2281 thread = any_thread_of_process (inf->pid);
2282 if (thread)
2283 {
2284 switch_to_thread (thread->ptid);
2285
2286 /* Core inferiors actually should be detached, not killed. */
2287 if (target_has_execution)
2288 target_kill ();
2289 else
2290 target_detach (NULL, 0);
2291 }
2292
2293 return 0;
2294 }
2295
2296 /* This is to be called by the open routine before it does
2297 anything. */
2298
2299 void
2300 target_preopen (int from_tty)
2301 {
2302 dont_repeat ();
2303
2304 if (have_inferiors ())
2305 {
2306 if (!from_tty
2307 || !have_live_inferiors ()
2308 || query (_("A program is being debugged already. Kill it? ")))
2309 iterate_over_inferiors (dispose_inferior, NULL);
2310 else
2311 error (_("Program not killed."));
2312 }
2313
2314 /* Calling target_kill may remove the target from the stack. But if
2315 it doesn't (which seems like a win for UDI), remove it now. */
2316 /* Leave the exec target, though. The user may be switching from a
2317 live process to a core of the same program. */
2318 pop_all_targets_above (file_stratum);
2319
2320 target_pre_inferior (from_tty);
2321 }
2322
2323 /* Detach a target after doing deferred register stores. */
2324
2325 void
2326 target_detach (const char *args, int from_tty)
2327 {
2328 struct target_ops* t;
2329
2330 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2331 /* Don't remove global breakpoints here. They're removed on
2332 disconnection from the target. */
2333 ;
2334 else
2335 /* If we're in breakpoints-always-inserted mode, have to remove
2336 them before detaching. */
2337 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2338
2339 prepare_for_detach ();
2340
2341 current_target.to_detach (&current_target, args, from_tty);
2342 if (targetdebug)
2343 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2344 args, from_tty);
2345 }
2346
2347 void
2348 target_disconnect (char *args, int from_tty)
2349 {
2350 /* If we're in breakpoints-always-inserted mode or if breakpoints
2351 are global across processes, we have to remove them before
2352 disconnecting. */
2353 remove_breakpoints ();
2354
2355 if (targetdebug)
2356 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2357 args, from_tty);
2358 current_target.to_disconnect (&current_target, args, from_tty);
2359 }
2360
2361 ptid_t
2362 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2363 {
2364 struct target_ops *t;
2365 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2366 status, options);
2367
2368 if (targetdebug)
2369 {
2370 char *status_string;
2371 char *options_string;
2372
2373 status_string = target_waitstatus_to_string (status);
2374 options_string = target_options_to_string (options);
2375 fprintf_unfiltered (gdb_stdlog,
2376 "target_wait (%d, status, options={%s})"
2377 " = %d, %s\n",
2378 ptid_get_pid (ptid), options_string,
2379 ptid_get_pid (retval), status_string);
2380 xfree (status_string);
2381 xfree (options_string);
2382 }
2383
2384 return retval;
2385 }
2386
2387 char *
2388 target_pid_to_str (ptid_t ptid)
2389 {
2390 return (*current_target.to_pid_to_str) (&current_target, ptid);
2391 }
2392
2393 char *
2394 target_thread_name (struct thread_info *info)
2395 {
2396 return current_target.to_thread_name (&current_target, info);
2397 }
2398
2399 void
2400 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2401 {
2402 struct target_ops *t;
2403
2404 target_dcache_invalidate ();
2405
2406 current_target.to_resume (&current_target, ptid, step, signal);
2407 if (targetdebug)
2408 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2409 ptid_get_pid (ptid),
2410 step ? "step" : "continue",
2411 gdb_signal_to_name (signal));
2412
2413 registers_changed_ptid (ptid);
2414 set_executing (ptid, 1);
2415 set_running (ptid, 1);
2416 clear_inline_frame_state (ptid);
2417 }
2418
2419 void
2420 target_pass_signals (int numsigs, unsigned char *pass_signals)
2421 {
2422 if (targetdebug)
2423 {
2424 int i;
2425
2426 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2427 numsigs);
2428
2429 for (i = 0; i < numsigs; i++)
2430 if (pass_signals[i])
2431 fprintf_unfiltered (gdb_stdlog, " %s",
2432 gdb_signal_to_name (i));
2433
2434 fprintf_unfiltered (gdb_stdlog, " })\n");
2435 }
2436
2437 (*current_target.to_pass_signals) (&current_target, numsigs, pass_signals);
2438 }
2439
2440 void
2441 target_program_signals (int numsigs, unsigned char *program_signals)
2442 {
2443 if (targetdebug)
2444 {
2445 int i;
2446
2447 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2448 numsigs);
2449
2450 for (i = 0; i < numsigs; i++)
2451 if (program_signals[i])
2452 fprintf_unfiltered (gdb_stdlog, " %s",
2453 gdb_signal_to_name (i));
2454
2455 fprintf_unfiltered (gdb_stdlog, " })\n");
2456 }
2457
2458 (*current_target.to_program_signals) (&current_target,
2459 numsigs, program_signals);
2460 }
2461
2462 static int
2463 default_follow_fork (struct target_ops *self, int follow_child,
2464 int detach_fork)
2465 {
2466 /* Some target returned a fork event, but did not know how to follow it. */
2467 internal_error (__FILE__, __LINE__,
2468 _("could not find a target to follow fork"));
2469 }
2470
2471 /* Look through the list of possible targets for a target that can
2472 follow forks. */
2473
2474 int
2475 target_follow_fork (int follow_child, int detach_fork)
2476 {
2477 int retval = current_target.to_follow_fork (&current_target,
2478 follow_child, detach_fork);
2479
2480 if (targetdebug)
2481 fprintf_unfiltered (gdb_stdlog,
2482 "target_follow_fork (%d, %d) = %d\n",
2483 follow_child, detach_fork, retval);
2484 return retval;
2485 }
2486
2487 static void
2488 default_mourn_inferior (struct target_ops *self)
2489 {
2490 internal_error (__FILE__, __LINE__,
2491 _("could not find a target to follow mourn inferior"));
2492 }
2493
2494 void
2495 target_mourn_inferior (void)
2496 {
2497 current_target.to_mourn_inferior (&current_target);
2498 if (targetdebug)
2499 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2500
2501 /* We no longer need to keep handles on any of the object files.
2502 Make sure to release them to avoid unnecessarily locking any
2503 of them while we're not actually debugging. */
2504 bfd_cache_close_all ();
2505 }
2506
2507 /* Look for a target which can describe architectural features, starting
2508 from TARGET. If we find one, return its description. */
2509
2510 const struct target_desc *
2511 target_read_description (struct target_ops *target)
2512 {
2513 return target->to_read_description (target);
2514 }
2515
2516 /* This implements a basic search of memory, reading target memory and
2517 performing the search here (as opposed to performing the search in on the
2518 target side with, for example, gdbserver). */
2519
2520 int
2521 simple_search_memory (struct target_ops *ops,
2522 CORE_ADDR start_addr, ULONGEST search_space_len,
2523 const gdb_byte *pattern, ULONGEST pattern_len,
2524 CORE_ADDR *found_addrp)
2525 {
2526 /* NOTE: also defined in find.c testcase. */
2527 #define SEARCH_CHUNK_SIZE 16000
2528 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2529 /* Buffer to hold memory contents for searching. */
2530 gdb_byte *search_buf;
2531 unsigned search_buf_size;
2532 struct cleanup *old_cleanups;
2533
2534 search_buf_size = chunk_size + pattern_len - 1;
2535
2536 /* No point in trying to allocate a buffer larger than the search space. */
2537 if (search_space_len < search_buf_size)
2538 search_buf_size = search_space_len;
2539
2540 search_buf = malloc (search_buf_size);
2541 if (search_buf == NULL)
2542 error (_("Unable to allocate memory to perform the search."));
2543 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2544
2545 /* Prime the search buffer. */
2546
2547 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2548 search_buf, start_addr, search_buf_size) != search_buf_size)
2549 {
2550 warning (_("Unable to access %s bytes of target "
2551 "memory at %s, halting search."),
2552 pulongest (search_buf_size), hex_string (start_addr));
2553 do_cleanups (old_cleanups);
2554 return -1;
2555 }
2556
2557 /* Perform the search.
2558
2559 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2560 When we've scanned N bytes we copy the trailing bytes to the start and
2561 read in another N bytes. */
2562
2563 while (search_space_len >= pattern_len)
2564 {
2565 gdb_byte *found_ptr;
2566 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2567
2568 found_ptr = memmem (search_buf, nr_search_bytes,
2569 pattern, pattern_len);
2570
2571 if (found_ptr != NULL)
2572 {
2573 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2574
2575 *found_addrp = found_addr;
2576 do_cleanups (old_cleanups);
2577 return 1;
2578 }
2579
2580 /* Not found in this chunk, skip to next chunk. */
2581
2582 /* Don't let search_space_len wrap here, it's unsigned. */
2583 if (search_space_len >= chunk_size)
2584 search_space_len -= chunk_size;
2585 else
2586 search_space_len = 0;
2587
2588 if (search_space_len >= pattern_len)
2589 {
2590 unsigned keep_len = search_buf_size - chunk_size;
2591 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2592 int nr_to_read;
2593
2594 /* Copy the trailing part of the previous iteration to the front
2595 of the buffer for the next iteration. */
2596 gdb_assert (keep_len == pattern_len - 1);
2597 memcpy (search_buf, search_buf + chunk_size, keep_len);
2598
2599 nr_to_read = min (search_space_len - keep_len, chunk_size);
2600
2601 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2602 search_buf + keep_len, read_addr,
2603 nr_to_read) != nr_to_read)
2604 {
2605 warning (_("Unable to access %s bytes of target "
2606 "memory at %s, halting search."),
2607 plongest (nr_to_read),
2608 hex_string (read_addr));
2609 do_cleanups (old_cleanups);
2610 return -1;
2611 }
2612
2613 start_addr += chunk_size;
2614 }
2615 }
2616
2617 /* Not found. */
2618
2619 do_cleanups (old_cleanups);
2620 return 0;
2621 }
2622
2623 /* Default implementation of memory-searching. */
2624
2625 static int
2626 default_search_memory (struct target_ops *self,
2627 CORE_ADDR start_addr, ULONGEST search_space_len,
2628 const gdb_byte *pattern, ULONGEST pattern_len,
2629 CORE_ADDR *found_addrp)
2630 {
2631 /* Start over from the top of the target stack. */
2632 return simple_search_memory (current_target.beneath,
2633 start_addr, search_space_len,
2634 pattern, pattern_len, found_addrp);
2635 }
2636
2637 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2638 sequence of bytes in PATTERN with length PATTERN_LEN.
2639
2640 The result is 1 if found, 0 if not found, and -1 if there was an error
2641 requiring halting of the search (e.g. memory read error).
2642 If the pattern is found the address is recorded in FOUND_ADDRP. */
2643
2644 int
2645 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2646 const gdb_byte *pattern, ULONGEST pattern_len,
2647 CORE_ADDR *found_addrp)
2648 {
2649 int found;
2650
2651 if (targetdebug)
2652 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2653 hex_string (start_addr));
2654
2655 found = current_target.to_search_memory (&current_target, start_addr,
2656 search_space_len,
2657 pattern, pattern_len, found_addrp);
2658
2659 if (targetdebug)
2660 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2661
2662 return found;
2663 }
2664
2665 /* Look through the currently pushed targets. If none of them will
2666 be able to restart the currently running process, issue an error
2667 message. */
2668
2669 void
2670 target_require_runnable (void)
2671 {
2672 struct target_ops *t;
2673
2674 for (t = target_stack; t != NULL; t = t->beneath)
2675 {
2676 /* If this target knows how to create a new program, then
2677 assume we will still be able to after killing the current
2678 one. Either killing and mourning will not pop T, or else
2679 find_default_run_target will find it again. */
2680 if (t->to_create_inferior != NULL)
2681 return;
2682
2683 /* Do not worry about thread_stratum targets that can not
2684 create inferiors. Assume they will be pushed again if
2685 necessary, and continue to the process_stratum. */
2686 if (t->to_stratum == thread_stratum
2687 || t->to_stratum == arch_stratum)
2688 continue;
2689
2690 error (_("The \"%s\" target does not support \"run\". "
2691 "Try \"help target\" or \"continue\"."),
2692 t->to_shortname);
2693 }
2694
2695 /* This function is only called if the target is running. In that
2696 case there should have been a process_stratum target and it
2697 should either know how to create inferiors, or not... */
2698 internal_error (__FILE__, __LINE__, _("No targets found"));
2699 }
2700
2701 /* Look through the list of possible targets for a target that can
2702 execute a run or attach command without any other data. This is
2703 used to locate the default process stratum.
2704
2705 If DO_MESG is not NULL, the result is always valid (error() is
2706 called for errors); else, return NULL on error. */
2707
2708 static struct target_ops *
2709 find_default_run_target (char *do_mesg)
2710 {
2711 struct target_ops **t;
2712 struct target_ops *runable = NULL;
2713 int count;
2714
2715 count = 0;
2716
2717 for (t = target_structs; t < target_structs + target_struct_size;
2718 ++t)
2719 {
2720 if ((*t)->to_can_run != delegate_can_run && target_can_run (*t))
2721 {
2722 runable = *t;
2723 ++count;
2724 }
2725 }
2726
2727 if (count != 1)
2728 {
2729 if (do_mesg)
2730 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2731 else
2732 return NULL;
2733 }
2734
2735 return runable;
2736 }
2737
2738 void
2739 find_default_attach (struct target_ops *ops, char *args, int from_tty)
2740 {
2741 struct target_ops *t;
2742
2743 t = find_default_run_target ("attach");
2744 (t->to_attach) (t, args, from_tty);
2745 return;
2746 }
2747
2748 void
2749 find_default_create_inferior (struct target_ops *ops,
2750 char *exec_file, char *allargs, char **env,
2751 int from_tty)
2752 {
2753 struct target_ops *t;
2754
2755 t = find_default_run_target ("run");
2756 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
2757 return;
2758 }
2759
2760 static int
2761 find_default_can_async_p (struct target_ops *ignore)
2762 {
2763 struct target_ops *t;
2764
2765 /* This may be called before the target is pushed on the stack;
2766 look for the default process stratum. If there's none, gdb isn't
2767 configured with a native debugger, and target remote isn't
2768 connected yet. */
2769 t = find_default_run_target (NULL);
2770 if (t && t->to_can_async_p != delegate_can_async_p)
2771 return (t->to_can_async_p) (t);
2772 return 0;
2773 }
2774
2775 static int
2776 find_default_is_async_p (struct target_ops *ignore)
2777 {
2778 struct target_ops *t;
2779
2780 /* This may be called before the target is pushed on the stack;
2781 look for the default process stratum. If there's none, gdb isn't
2782 configured with a native debugger, and target remote isn't
2783 connected yet. */
2784 t = find_default_run_target (NULL);
2785 if (t && t->to_is_async_p != delegate_is_async_p)
2786 return (t->to_is_async_p) (t);
2787 return 0;
2788 }
2789
2790 static int
2791 find_default_supports_non_stop (struct target_ops *self)
2792 {
2793 struct target_ops *t;
2794
2795 t = find_default_run_target (NULL);
2796 if (t && t->to_supports_non_stop)
2797 return (t->to_supports_non_stop) (t);
2798 return 0;
2799 }
2800
2801 int
2802 target_supports_non_stop (void)
2803 {
2804 struct target_ops *t;
2805
2806 for (t = &current_target; t != NULL; t = t->beneath)
2807 if (t->to_supports_non_stop)
2808 return t->to_supports_non_stop (t);
2809
2810 return 0;
2811 }
2812
2813 /* Implement the "info proc" command. */
2814
2815 int
2816 target_info_proc (char *args, enum info_proc_what what)
2817 {
2818 struct target_ops *t;
2819
2820 /* If we're already connected to something that can get us OS
2821 related data, use it. Otherwise, try using the native
2822 target. */
2823 if (current_target.to_stratum >= process_stratum)
2824 t = current_target.beneath;
2825 else
2826 t = find_default_run_target (NULL);
2827
2828 for (; t != NULL; t = t->beneath)
2829 {
2830 if (t->to_info_proc != NULL)
2831 {
2832 t->to_info_proc (t, args, what);
2833
2834 if (targetdebug)
2835 fprintf_unfiltered (gdb_stdlog,
2836 "target_info_proc (\"%s\", %d)\n", args, what);
2837
2838 return 1;
2839 }
2840 }
2841
2842 return 0;
2843 }
2844
2845 static int
2846 find_default_supports_disable_randomization (struct target_ops *self)
2847 {
2848 struct target_ops *t;
2849
2850 t = find_default_run_target (NULL);
2851 if (t && t->to_supports_disable_randomization)
2852 return (t->to_supports_disable_randomization) (t);
2853 return 0;
2854 }
2855
2856 int
2857 target_supports_disable_randomization (void)
2858 {
2859 struct target_ops *t;
2860
2861 for (t = &current_target; t != NULL; t = t->beneath)
2862 if (t->to_supports_disable_randomization)
2863 return t->to_supports_disable_randomization (t);
2864
2865 return 0;
2866 }
2867
2868 char *
2869 target_get_osdata (const char *type)
2870 {
2871 struct target_ops *t;
2872
2873 /* If we're already connected to something that can get us OS
2874 related data, use it. Otherwise, try using the native
2875 target. */
2876 if (current_target.to_stratum >= process_stratum)
2877 t = current_target.beneath;
2878 else
2879 t = find_default_run_target ("get OS data");
2880
2881 if (!t)
2882 return NULL;
2883
2884 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
2885 }
2886
2887 /* Determine the current address space of thread PTID. */
2888
2889 struct address_space *
2890 target_thread_address_space (ptid_t ptid)
2891 {
2892 struct address_space *aspace;
2893 struct inferior *inf;
2894 struct target_ops *t;
2895
2896 for (t = current_target.beneath; t != NULL; t = t->beneath)
2897 {
2898 if (t->to_thread_address_space != NULL)
2899 {
2900 aspace = t->to_thread_address_space (t, ptid);
2901 gdb_assert (aspace);
2902
2903 if (targetdebug)
2904 fprintf_unfiltered (gdb_stdlog,
2905 "target_thread_address_space (%s) = %d\n",
2906 target_pid_to_str (ptid),
2907 address_space_num (aspace));
2908 return aspace;
2909 }
2910 }
2911
2912 /* Fall-back to the "main" address space of the inferior. */
2913 inf = find_inferior_pid (ptid_get_pid (ptid));
2914
2915 if (inf == NULL || inf->aspace == NULL)
2916 internal_error (__FILE__, __LINE__,
2917 _("Can't determine the current "
2918 "address space of thread %s\n"),
2919 target_pid_to_str (ptid));
2920
2921 return inf->aspace;
2922 }
2923
2924
2925 /* Target file operations. */
2926
2927 static struct target_ops *
2928 default_fileio_target (void)
2929 {
2930 /* If we're already connected to something that can perform
2931 file I/O, use it. Otherwise, try using the native target. */
2932 if (current_target.to_stratum >= process_stratum)
2933 return current_target.beneath;
2934 else
2935 return find_default_run_target ("file I/O");
2936 }
2937
2938 /* Open FILENAME on the target, using FLAGS and MODE. Return a
2939 target file descriptor, or -1 if an error occurs (and set
2940 *TARGET_ERRNO). */
2941 int
2942 target_fileio_open (const char *filename, int flags, int mode,
2943 int *target_errno)
2944 {
2945 struct target_ops *t;
2946
2947 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2948 {
2949 if (t->to_fileio_open != NULL)
2950 {
2951 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
2952
2953 if (targetdebug)
2954 fprintf_unfiltered (gdb_stdlog,
2955 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
2956 filename, flags, mode,
2957 fd, fd != -1 ? 0 : *target_errno);
2958 return fd;
2959 }
2960 }
2961
2962 *target_errno = FILEIO_ENOSYS;
2963 return -1;
2964 }
2965
2966 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
2967 Return the number of bytes written, or -1 if an error occurs
2968 (and set *TARGET_ERRNO). */
2969 int
2970 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
2971 ULONGEST offset, int *target_errno)
2972 {
2973 struct target_ops *t;
2974
2975 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2976 {
2977 if (t->to_fileio_pwrite != NULL)
2978 {
2979 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
2980 target_errno);
2981
2982 if (targetdebug)
2983 fprintf_unfiltered (gdb_stdlog,
2984 "target_fileio_pwrite (%d,...,%d,%s) "
2985 "= %d (%d)\n",
2986 fd, len, pulongest (offset),
2987 ret, ret != -1 ? 0 : *target_errno);
2988 return ret;
2989 }
2990 }
2991
2992 *target_errno = FILEIO_ENOSYS;
2993 return -1;
2994 }
2995
2996 /* Read up to LEN bytes FD on the target into READ_BUF.
2997 Return the number of bytes read, or -1 if an error occurs
2998 (and set *TARGET_ERRNO). */
2999 int
3000 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3001 ULONGEST offset, int *target_errno)
3002 {
3003 struct target_ops *t;
3004
3005 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3006 {
3007 if (t->to_fileio_pread != NULL)
3008 {
3009 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3010 target_errno);
3011
3012 if (targetdebug)
3013 fprintf_unfiltered (gdb_stdlog,
3014 "target_fileio_pread (%d,...,%d,%s) "
3015 "= %d (%d)\n",
3016 fd, len, pulongest (offset),
3017 ret, ret != -1 ? 0 : *target_errno);
3018 return ret;
3019 }
3020 }
3021
3022 *target_errno = FILEIO_ENOSYS;
3023 return -1;
3024 }
3025
3026 /* Close FD on the target. Return 0, or -1 if an error occurs
3027 (and set *TARGET_ERRNO). */
3028 int
3029 target_fileio_close (int fd, int *target_errno)
3030 {
3031 struct target_ops *t;
3032
3033 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3034 {
3035 if (t->to_fileio_close != NULL)
3036 {
3037 int ret = t->to_fileio_close (t, fd, target_errno);
3038
3039 if (targetdebug)
3040 fprintf_unfiltered (gdb_stdlog,
3041 "target_fileio_close (%d) = %d (%d)\n",
3042 fd, ret, ret != -1 ? 0 : *target_errno);
3043 return ret;
3044 }
3045 }
3046
3047 *target_errno = FILEIO_ENOSYS;
3048 return -1;
3049 }
3050
3051 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3052 occurs (and set *TARGET_ERRNO). */
3053 int
3054 target_fileio_unlink (const char *filename, int *target_errno)
3055 {
3056 struct target_ops *t;
3057
3058 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3059 {
3060 if (t->to_fileio_unlink != NULL)
3061 {
3062 int ret = t->to_fileio_unlink (t, filename, target_errno);
3063
3064 if (targetdebug)
3065 fprintf_unfiltered (gdb_stdlog,
3066 "target_fileio_unlink (%s) = %d (%d)\n",
3067 filename, ret, ret != -1 ? 0 : *target_errno);
3068 return ret;
3069 }
3070 }
3071
3072 *target_errno = FILEIO_ENOSYS;
3073 return -1;
3074 }
3075
3076 /* Read value of symbolic link FILENAME on the target. Return a
3077 null-terminated string allocated via xmalloc, or NULL if an error
3078 occurs (and set *TARGET_ERRNO). */
3079 char *
3080 target_fileio_readlink (const char *filename, int *target_errno)
3081 {
3082 struct target_ops *t;
3083
3084 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3085 {
3086 if (t->to_fileio_readlink != NULL)
3087 {
3088 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3089
3090 if (targetdebug)
3091 fprintf_unfiltered (gdb_stdlog,
3092 "target_fileio_readlink (%s) = %s (%d)\n",
3093 filename, ret? ret : "(nil)",
3094 ret? 0 : *target_errno);
3095 return ret;
3096 }
3097 }
3098
3099 *target_errno = FILEIO_ENOSYS;
3100 return NULL;
3101 }
3102
3103 static void
3104 target_fileio_close_cleanup (void *opaque)
3105 {
3106 int fd = *(int *) opaque;
3107 int target_errno;
3108
3109 target_fileio_close (fd, &target_errno);
3110 }
3111
3112 /* Read target file FILENAME. Store the result in *BUF_P and
3113 return the size of the transferred data. PADDING additional bytes are
3114 available in *BUF_P. This is a helper function for
3115 target_fileio_read_alloc; see the declaration of that function for more
3116 information. */
3117
3118 static LONGEST
3119 target_fileio_read_alloc_1 (const char *filename,
3120 gdb_byte **buf_p, int padding)
3121 {
3122 struct cleanup *close_cleanup;
3123 size_t buf_alloc, buf_pos;
3124 gdb_byte *buf;
3125 LONGEST n;
3126 int fd;
3127 int target_errno;
3128
3129 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3130 if (fd == -1)
3131 return -1;
3132
3133 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3134
3135 /* Start by reading up to 4K at a time. The target will throttle
3136 this number down if necessary. */
3137 buf_alloc = 4096;
3138 buf = xmalloc (buf_alloc);
3139 buf_pos = 0;
3140 while (1)
3141 {
3142 n = target_fileio_pread (fd, &buf[buf_pos],
3143 buf_alloc - buf_pos - padding, buf_pos,
3144 &target_errno);
3145 if (n < 0)
3146 {
3147 /* An error occurred. */
3148 do_cleanups (close_cleanup);
3149 xfree (buf);
3150 return -1;
3151 }
3152 else if (n == 0)
3153 {
3154 /* Read all there was. */
3155 do_cleanups (close_cleanup);
3156 if (buf_pos == 0)
3157 xfree (buf);
3158 else
3159 *buf_p = buf;
3160 return buf_pos;
3161 }
3162
3163 buf_pos += n;
3164
3165 /* If the buffer is filling up, expand it. */
3166 if (buf_alloc < buf_pos * 2)
3167 {
3168 buf_alloc *= 2;
3169 buf = xrealloc (buf, buf_alloc);
3170 }
3171
3172 QUIT;
3173 }
3174 }
3175
3176 /* Read target file FILENAME. Store the result in *BUF_P and return
3177 the size of the transferred data. See the declaration in "target.h"
3178 function for more information about the return value. */
3179
3180 LONGEST
3181 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3182 {
3183 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3184 }
3185
3186 /* Read target file FILENAME. The result is NUL-terminated and
3187 returned as a string, allocated using xmalloc. If an error occurs
3188 or the transfer is unsupported, NULL is returned. Empty objects
3189 are returned as allocated but empty strings. A warning is issued
3190 if the result contains any embedded NUL bytes. */
3191
3192 char *
3193 target_fileio_read_stralloc (const char *filename)
3194 {
3195 gdb_byte *buffer;
3196 char *bufstr;
3197 LONGEST i, transferred;
3198
3199 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3200 bufstr = (char *) buffer;
3201
3202 if (transferred < 0)
3203 return NULL;
3204
3205 if (transferred == 0)
3206 return xstrdup ("");
3207
3208 bufstr[transferred] = 0;
3209
3210 /* Check for embedded NUL bytes; but allow trailing NULs. */
3211 for (i = strlen (bufstr); i < transferred; i++)
3212 if (bufstr[i] != 0)
3213 {
3214 warning (_("target file %s "
3215 "contained unexpected null characters"),
3216 filename);
3217 break;
3218 }
3219
3220 return bufstr;
3221 }
3222
3223
3224 static int
3225 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3226 CORE_ADDR addr, int len)
3227 {
3228 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3229 }
3230
3231 static int
3232 default_watchpoint_addr_within_range (struct target_ops *target,
3233 CORE_ADDR addr,
3234 CORE_ADDR start, int length)
3235 {
3236 return addr >= start && addr < start + length;
3237 }
3238
3239 static struct gdbarch *
3240 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3241 {
3242 return target_gdbarch ();
3243 }
3244
3245 static int
3246 return_zero (struct target_ops *ignore)
3247 {
3248 return 0;
3249 }
3250
3251 static int
3252 return_zero_has_execution (struct target_ops *ignore, ptid_t ignore2)
3253 {
3254 return 0;
3255 }
3256
3257 /*
3258 * Find the next target down the stack from the specified target.
3259 */
3260
3261 struct target_ops *
3262 find_target_beneath (struct target_ops *t)
3263 {
3264 return t->beneath;
3265 }
3266
3267 /* See target.h. */
3268
3269 struct target_ops *
3270 find_target_at (enum strata stratum)
3271 {
3272 struct target_ops *t;
3273
3274 for (t = current_target.beneath; t != NULL; t = t->beneath)
3275 if (t->to_stratum == stratum)
3276 return t;
3277
3278 return NULL;
3279 }
3280
3281 \f
3282 /* The inferior process has died. Long live the inferior! */
3283
3284 void
3285 generic_mourn_inferior (void)
3286 {
3287 ptid_t ptid;
3288
3289 ptid = inferior_ptid;
3290 inferior_ptid = null_ptid;
3291
3292 /* Mark breakpoints uninserted in case something tries to delete a
3293 breakpoint while we delete the inferior's threads (which would
3294 fail, since the inferior is long gone). */
3295 mark_breakpoints_out ();
3296
3297 if (!ptid_equal (ptid, null_ptid))
3298 {
3299 int pid = ptid_get_pid (ptid);
3300 exit_inferior (pid);
3301 }
3302
3303 /* Note this wipes step-resume breakpoints, so needs to be done
3304 after exit_inferior, which ends up referencing the step-resume
3305 breakpoints through clear_thread_inferior_resources. */
3306 breakpoint_init_inferior (inf_exited);
3307
3308 registers_changed ();
3309
3310 reopen_exec_file ();
3311 reinit_frame_cache ();
3312
3313 if (deprecated_detach_hook)
3314 deprecated_detach_hook ();
3315 }
3316 \f
3317 /* Convert a normal process ID to a string. Returns the string in a
3318 static buffer. */
3319
3320 char *
3321 normal_pid_to_str (ptid_t ptid)
3322 {
3323 static char buf[32];
3324
3325 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3326 return buf;
3327 }
3328
3329 static char *
3330 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
3331 {
3332 return normal_pid_to_str (ptid);
3333 }
3334
3335 /* Error-catcher for target_find_memory_regions. */
3336 static int
3337 dummy_find_memory_regions (struct target_ops *self,
3338 find_memory_region_ftype ignore1, void *ignore2)
3339 {
3340 error (_("Command not implemented for this target."));
3341 return 0;
3342 }
3343
3344 /* Error-catcher for target_make_corefile_notes. */
3345 static char *
3346 dummy_make_corefile_notes (struct target_ops *self,
3347 bfd *ignore1, int *ignore2)
3348 {
3349 error (_("Command not implemented for this target."));
3350 return NULL;
3351 }
3352
3353 /* Set up the handful of non-empty slots needed by the dummy target
3354 vector. */
3355
3356 static void
3357 init_dummy_target (void)
3358 {
3359 dummy_target.to_shortname = "None";
3360 dummy_target.to_longname = "None";
3361 dummy_target.to_doc = "";
3362 dummy_target.to_create_inferior = find_default_create_inferior;
3363 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3364 dummy_target.to_supports_disable_randomization
3365 = find_default_supports_disable_randomization;
3366 dummy_target.to_stratum = dummy_stratum;
3367 dummy_target.to_has_all_memory = return_zero;
3368 dummy_target.to_has_memory = return_zero;
3369 dummy_target.to_has_stack = return_zero;
3370 dummy_target.to_has_registers = return_zero;
3371 dummy_target.to_has_execution = return_zero_has_execution;
3372 dummy_target.to_magic = OPS_MAGIC;
3373
3374 install_dummy_methods (&dummy_target);
3375 }
3376 \f
3377 static void
3378 debug_to_open (char *args, int from_tty)
3379 {
3380 debug_target.to_open (args, from_tty);
3381
3382 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3383 }
3384
3385 void
3386 target_close (struct target_ops *targ)
3387 {
3388 gdb_assert (!target_is_pushed (targ));
3389
3390 if (targ->to_xclose != NULL)
3391 targ->to_xclose (targ);
3392 else if (targ->to_close != NULL)
3393 targ->to_close (targ);
3394
3395 if (targetdebug)
3396 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3397 }
3398
3399 void
3400 target_attach (char *args, int from_tty)
3401 {
3402 current_target.to_attach (&current_target, args, from_tty);
3403 if (targetdebug)
3404 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3405 args, from_tty);
3406 }
3407
3408 int
3409 target_thread_alive (ptid_t ptid)
3410 {
3411 int retval;
3412
3413 retval = current_target.to_thread_alive (&current_target, ptid);
3414 if (targetdebug)
3415 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3416 ptid_get_pid (ptid), retval);
3417
3418 return retval;
3419 }
3420
3421 void
3422 target_find_new_threads (void)
3423 {
3424 current_target.to_find_new_threads (&current_target);
3425 if (targetdebug)
3426 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3427 }
3428
3429 void
3430 target_stop (ptid_t ptid)
3431 {
3432 if (!may_stop)
3433 {
3434 warning (_("May not interrupt or stop the target, ignoring attempt"));
3435 return;
3436 }
3437
3438 (*current_target.to_stop) (&current_target, ptid);
3439 }
3440
3441 static void
3442 debug_to_post_attach (struct target_ops *self, int pid)
3443 {
3444 debug_target.to_post_attach (&debug_target, pid);
3445
3446 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3447 }
3448
3449 /* Concatenate ELEM to LIST, a comma separate list, and return the
3450 result. The LIST incoming argument is released. */
3451
3452 static char *
3453 str_comma_list_concat_elem (char *list, const char *elem)
3454 {
3455 if (list == NULL)
3456 return xstrdup (elem);
3457 else
3458 return reconcat (list, list, ", ", elem, (char *) NULL);
3459 }
3460
3461 /* Helper for target_options_to_string. If OPT is present in
3462 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3463 Returns the new resulting string. OPT is removed from
3464 TARGET_OPTIONS. */
3465
3466 static char *
3467 do_option (int *target_options, char *ret,
3468 int opt, char *opt_str)
3469 {
3470 if ((*target_options & opt) != 0)
3471 {
3472 ret = str_comma_list_concat_elem (ret, opt_str);
3473 *target_options &= ~opt;
3474 }
3475
3476 return ret;
3477 }
3478
3479 char *
3480 target_options_to_string (int target_options)
3481 {
3482 char *ret = NULL;
3483
3484 #define DO_TARG_OPTION(OPT) \
3485 ret = do_option (&target_options, ret, OPT, #OPT)
3486
3487 DO_TARG_OPTION (TARGET_WNOHANG);
3488
3489 if (target_options != 0)
3490 ret = str_comma_list_concat_elem (ret, "unknown???");
3491
3492 if (ret == NULL)
3493 ret = xstrdup ("");
3494 return ret;
3495 }
3496
3497 static void
3498 debug_print_register (const char * func,
3499 struct regcache *regcache, int regno)
3500 {
3501 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3502
3503 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3504 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3505 && gdbarch_register_name (gdbarch, regno) != NULL
3506 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3507 fprintf_unfiltered (gdb_stdlog, "(%s)",
3508 gdbarch_register_name (gdbarch, regno));
3509 else
3510 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3511 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3512 {
3513 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3514 int i, size = register_size (gdbarch, regno);
3515 gdb_byte buf[MAX_REGISTER_SIZE];
3516
3517 regcache_raw_collect (regcache, regno, buf);
3518 fprintf_unfiltered (gdb_stdlog, " = ");
3519 for (i = 0; i < size; i++)
3520 {
3521 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3522 }
3523 if (size <= sizeof (LONGEST))
3524 {
3525 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3526
3527 fprintf_unfiltered (gdb_stdlog, " %s %s",
3528 core_addr_to_string_nz (val), plongest (val));
3529 }
3530 }
3531 fprintf_unfiltered (gdb_stdlog, "\n");
3532 }
3533
3534 void
3535 target_fetch_registers (struct regcache *regcache, int regno)
3536 {
3537 current_target.to_fetch_registers (&current_target, regcache, regno);
3538 if (targetdebug)
3539 debug_print_register ("target_fetch_registers", regcache, regno);
3540 }
3541
3542 void
3543 target_store_registers (struct regcache *regcache, int regno)
3544 {
3545 struct target_ops *t;
3546
3547 if (!may_write_registers)
3548 error (_("Writing to registers is not allowed (regno %d)"), regno);
3549
3550 current_target.to_store_registers (&current_target, regcache, regno);
3551 if (targetdebug)
3552 {
3553 debug_print_register ("target_store_registers", regcache, regno);
3554 }
3555 }
3556
3557 int
3558 target_core_of_thread (ptid_t ptid)
3559 {
3560 int retval = current_target.to_core_of_thread (&current_target, ptid);
3561
3562 if (targetdebug)
3563 fprintf_unfiltered (gdb_stdlog,
3564 "target_core_of_thread (%d) = %d\n",
3565 ptid_get_pid (ptid), retval);
3566 return retval;
3567 }
3568
3569 int
3570 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3571 {
3572 int retval = current_target.to_verify_memory (&current_target,
3573 data, memaddr, size);
3574
3575 if (targetdebug)
3576 fprintf_unfiltered (gdb_stdlog,
3577 "target_verify_memory (%s, %s) = %d\n",
3578 paddress (target_gdbarch (), memaddr),
3579 pulongest (size),
3580 retval);
3581 return retval;
3582 }
3583
3584 /* The documentation for this function is in its prototype declaration in
3585 target.h. */
3586
3587 int
3588 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3589 {
3590 int ret;
3591
3592 ret = current_target.to_insert_mask_watchpoint (&current_target,
3593 addr, mask, rw);
3594
3595 if (targetdebug)
3596 fprintf_unfiltered (gdb_stdlog, "\
3597 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3598 core_addr_to_string (addr),
3599 core_addr_to_string (mask), rw, ret);
3600
3601 return ret;
3602 }
3603
3604 /* The documentation for this function is in its prototype declaration in
3605 target.h. */
3606
3607 int
3608 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3609 {
3610 int ret;
3611
3612 ret = current_target.to_remove_mask_watchpoint (&current_target,
3613 addr, mask, rw);
3614
3615 if (targetdebug)
3616 fprintf_unfiltered (gdb_stdlog, "\
3617 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
3618 core_addr_to_string (addr),
3619 core_addr_to_string (mask), rw, ret);
3620
3621 return ret;
3622 }
3623
3624 /* The documentation for this function is in its prototype declaration
3625 in target.h. */
3626
3627 int
3628 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
3629 {
3630 return current_target.to_masked_watch_num_registers (&current_target,
3631 addr, mask);
3632 }
3633
3634 /* The documentation for this function is in its prototype declaration
3635 in target.h. */
3636
3637 int
3638 target_ranged_break_num_registers (void)
3639 {
3640 return current_target.to_ranged_break_num_registers (&current_target);
3641 }
3642
3643 /* See target.h. */
3644
3645 struct btrace_target_info *
3646 target_enable_btrace (ptid_t ptid)
3647 {
3648 return current_target.to_enable_btrace (&current_target, ptid);
3649 }
3650
3651 /* See target.h. */
3652
3653 void
3654 target_disable_btrace (struct btrace_target_info *btinfo)
3655 {
3656 current_target.to_disable_btrace (&current_target, btinfo);
3657 }
3658
3659 /* See target.h. */
3660
3661 void
3662 target_teardown_btrace (struct btrace_target_info *btinfo)
3663 {
3664 current_target.to_teardown_btrace (&current_target, btinfo);
3665 }
3666
3667 /* See target.h. */
3668
3669 enum btrace_error
3670 target_read_btrace (VEC (btrace_block_s) **btrace,
3671 struct btrace_target_info *btinfo,
3672 enum btrace_read_type type)
3673 {
3674 return current_target.to_read_btrace (&current_target, btrace, btinfo, type);
3675 }
3676
3677 /* See target.h. */
3678
3679 void
3680 target_stop_recording (void)
3681 {
3682 current_target.to_stop_recording (&current_target);
3683 }
3684
3685 /* See target.h. */
3686
3687 void
3688 target_info_record (void)
3689 {
3690 struct target_ops *t;
3691
3692 for (t = current_target.beneath; t != NULL; t = t->beneath)
3693 if (t->to_info_record != NULL)
3694 {
3695 t->to_info_record (t);
3696 return;
3697 }
3698
3699 tcomplain ();
3700 }
3701
3702 /* See target.h. */
3703
3704 void
3705 target_save_record (const char *filename)
3706 {
3707 current_target.to_save_record (&current_target, filename);
3708 }
3709
3710 /* See target.h. */
3711
3712 int
3713 target_supports_delete_record (void)
3714 {
3715 struct target_ops *t;
3716
3717 for (t = current_target.beneath; t != NULL; t = t->beneath)
3718 if (t->to_delete_record != NULL)
3719 return 1;
3720
3721 return 0;
3722 }
3723
3724 /* See target.h. */
3725
3726 void
3727 target_delete_record (void)
3728 {
3729 current_target.to_delete_record (&current_target);
3730 }
3731
3732 /* See target.h. */
3733
3734 int
3735 target_record_is_replaying (void)
3736 {
3737 return current_target.to_record_is_replaying (&current_target);
3738 }
3739
3740 /* See target.h. */
3741
3742 void
3743 target_goto_record_begin (void)
3744 {
3745 current_target.to_goto_record_begin (&current_target);
3746 }
3747
3748 /* See target.h. */
3749
3750 void
3751 target_goto_record_end (void)
3752 {
3753 current_target.to_goto_record_end (&current_target);
3754 }
3755
3756 /* See target.h. */
3757
3758 void
3759 target_goto_record (ULONGEST insn)
3760 {
3761 current_target.to_goto_record (&current_target, insn);
3762 }
3763
3764 /* See target.h. */
3765
3766 void
3767 target_insn_history (int size, int flags)
3768 {
3769 current_target.to_insn_history (&current_target, size, flags);
3770 }
3771
3772 /* See target.h. */
3773
3774 void
3775 target_insn_history_from (ULONGEST from, int size, int flags)
3776 {
3777 current_target.to_insn_history_from (&current_target, from, size, flags);
3778 }
3779
3780 /* See target.h. */
3781
3782 void
3783 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
3784 {
3785 current_target.to_insn_history_range (&current_target, begin, end, flags);
3786 }
3787
3788 /* See target.h. */
3789
3790 void
3791 target_call_history (int size, int flags)
3792 {
3793 current_target.to_call_history (&current_target, size, flags);
3794 }
3795
3796 /* See target.h. */
3797
3798 void
3799 target_call_history_from (ULONGEST begin, int size, int flags)
3800 {
3801 current_target.to_call_history_from (&current_target, begin, size, flags);
3802 }
3803
3804 /* See target.h. */
3805
3806 void
3807 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
3808 {
3809 current_target.to_call_history_range (&current_target, begin, end, flags);
3810 }
3811
3812 static void
3813 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
3814 {
3815 debug_target.to_prepare_to_store (&debug_target, regcache);
3816
3817 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
3818 }
3819
3820 /* See target.h. */
3821
3822 const struct frame_unwind *
3823 target_get_unwinder (void)
3824 {
3825 return current_target.to_get_unwinder (&current_target);
3826 }
3827
3828 /* See target.h. */
3829
3830 const struct frame_unwind *
3831 target_get_tailcall_unwinder (void)
3832 {
3833 return current_target.to_get_tailcall_unwinder (&current_target);
3834 }
3835
3836 /* Default implementation of to_decr_pc_after_break. */
3837
3838 static CORE_ADDR
3839 default_target_decr_pc_after_break (struct target_ops *ops,
3840 struct gdbarch *gdbarch)
3841 {
3842 return gdbarch_decr_pc_after_break (gdbarch);
3843 }
3844
3845 /* See target.h. */
3846
3847 CORE_ADDR
3848 target_decr_pc_after_break (struct gdbarch *gdbarch)
3849 {
3850 return current_target.to_decr_pc_after_break (&current_target, gdbarch);
3851 }
3852
3853 static int
3854 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
3855 int write, struct mem_attrib *attrib,
3856 struct target_ops *target)
3857 {
3858 int retval;
3859
3860 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
3861 attrib, target);
3862
3863 fprintf_unfiltered (gdb_stdlog,
3864 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
3865 paddress (target_gdbarch (), memaddr), len,
3866 write ? "write" : "read", retval);
3867
3868 if (retval > 0)
3869 {
3870 int i;
3871
3872 fputs_unfiltered (", bytes =", gdb_stdlog);
3873 for (i = 0; i < retval; i++)
3874 {
3875 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
3876 {
3877 if (targetdebug < 2 && i > 0)
3878 {
3879 fprintf_unfiltered (gdb_stdlog, " ...");
3880 break;
3881 }
3882 fprintf_unfiltered (gdb_stdlog, "\n");
3883 }
3884
3885 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
3886 }
3887 }
3888
3889 fputc_unfiltered ('\n', gdb_stdlog);
3890
3891 return retval;
3892 }
3893
3894 static void
3895 debug_to_files_info (struct target_ops *target)
3896 {
3897 debug_target.to_files_info (target);
3898
3899 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
3900 }
3901
3902 static int
3903 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
3904 struct bp_target_info *bp_tgt)
3905 {
3906 int retval;
3907
3908 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
3909
3910 fprintf_unfiltered (gdb_stdlog,
3911 "target_insert_breakpoint (%s, xxx) = %ld\n",
3912 core_addr_to_string (bp_tgt->placed_address),
3913 (unsigned long) retval);
3914 return retval;
3915 }
3916
3917 static int
3918 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
3919 struct bp_target_info *bp_tgt)
3920 {
3921 int retval;
3922
3923 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
3924
3925 fprintf_unfiltered (gdb_stdlog,
3926 "target_remove_breakpoint (%s, xxx) = %ld\n",
3927 core_addr_to_string (bp_tgt->placed_address),
3928 (unsigned long) retval);
3929 return retval;
3930 }
3931
3932 static int
3933 debug_to_can_use_hw_breakpoint (struct target_ops *self,
3934 int type, int cnt, int from_tty)
3935 {
3936 int retval;
3937
3938 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
3939 type, cnt, from_tty);
3940
3941 fprintf_unfiltered (gdb_stdlog,
3942 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
3943 (unsigned long) type,
3944 (unsigned long) cnt,
3945 (unsigned long) from_tty,
3946 (unsigned long) retval);
3947 return retval;
3948 }
3949
3950 static int
3951 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
3952 CORE_ADDR addr, int len)
3953 {
3954 CORE_ADDR retval;
3955
3956 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
3957 addr, len);
3958
3959 fprintf_unfiltered (gdb_stdlog,
3960 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
3961 core_addr_to_string (addr), (unsigned long) len,
3962 core_addr_to_string (retval));
3963 return retval;
3964 }
3965
3966 static int
3967 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
3968 CORE_ADDR addr, int len, int rw,
3969 struct expression *cond)
3970 {
3971 int retval;
3972
3973 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
3974 addr, len,
3975 rw, cond);
3976
3977 fprintf_unfiltered (gdb_stdlog,
3978 "target_can_accel_watchpoint_condition "
3979 "(%s, %d, %d, %s) = %ld\n",
3980 core_addr_to_string (addr), len, rw,
3981 host_address_to_string (cond), (unsigned long) retval);
3982 return retval;
3983 }
3984
3985 static int
3986 debug_to_stopped_by_watchpoint (struct target_ops *ops)
3987 {
3988 int retval;
3989
3990 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
3991
3992 fprintf_unfiltered (gdb_stdlog,
3993 "target_stopped_by_watchpoint () = %ld\n",
3994 (unsigned long) retval);
3995 return retval;
3996 }
3997
3998 static int
3999 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4000 {
4001 int retval;
4002
4003 retval = debug_target.to_stopped_data_address (target, addr);
4004
4005 fprintf_unfiltered (gdb_stdlog,
4006 "target_stopped_data_address ([%s]) = %ld\n",
4007 core_addr_to_string (*addr),
4008 (unsigned long)retval);
4009 return retval;
4010 }
4011
4012 static int
4013 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4014 CORE_ADDR addr,
4015 CORE_ADDR start, int length)
4016 {
4017 int retval;
4018
4019 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4020 start, length);
4021
4022 fprintf_filtered (gdb_stdlog,
4023 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4024 core_addr_to_string (addr), core_addr_to_string (start),
4025 length, retval);
4026 return retval;
4027 }
4028
4029 static int
4030 debug_to_insert_hw_breakpoint (struct target_ops *self,
4031 struct gdbarch *gdbarch,
4032 struct bp_target_info *bp_tgt)
4033 {
4034 int retval;
4035
4036 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4037 gdbarch, bp_tgt);
4038
4039 fprintf_unfiltered (gdb_stdlog,
4040 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4041 core_addr_to_string (bp_tgt->placed_address),
4042 (unsigned long) retval);
4043 return retval;
4044 }
4045
4046 static int
4047 debug_to_remove_hw_breakpoint (struct target_ops *self,
4048 struct gdbarch *gdbarch,
4049 struct bp_target_info *bp_tgt)
4050 {
4051 int retval;
4052
4053 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4054 gdbarch, bp_tgt);
4055
4056 fprintf_unfiltered (gdb_stdlog,
4057 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4058 core_addr_to_string (bp_tgt->placed_address),
4059 (unsigned long) retval);
4060 return retval;
4061 }
4062
4063 static int
4064 debug_to_insert_watchpoint (struct target_ops *self,
4065 CORE_ADDR addr, int len, int type,
4066 struct expression *cond)
4067 {
4068 int retval;
4069
4070 retval = debug_target.to_insert_watchpoint (&debug_target,
4071 addr, len, type, cond);
4072
4073 fprintf_unfiltered (gdb_stdlog,
4074 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4075 core_addr_to_string (addr), len, type,
4076 host_address_to_string (cond), (unsigned long) retval);
4077 return retval;
4078 }
4079
4080 static int
4081 debug_to_remove_watchpoint (struct target_ops *self,
4082 CORE_ADDR addr, int len, int type,
4083 struct expression *cond)
4084 {
4085 int retval;
4086
4087 retval = debug_target.to_remove_watchpoint (&debug_target,
4088 addr, len, type, cond);
4089
4090 fprintf_unfiltered (gdb_stdlog,
4091 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4092 core_addr_to_string (addr), len, type,
4093 host_address_to_string (cond), (unsigned long) retval);
4094 return retval;
4095 }
4096
4097 static void
4098 debug_to_terminal_init (struct target_ops *self)
4099 {
4100 debug_target.to_terminal_init (&debug_target);
4101
4102 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4103 }
4104
4105 static void
4106 debug_to_terminal_inferior (struct target_ops *self)
4107 {
4108 debug_target.to_terminal_inferior (&debug_target);
4109
4110 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4111 }
4112
4113 static void
4114 debug_to_terminal_ours_for_output (struct target_ops *self)
4115 {
4116 debug_target.to_terminal_ours_for_output (&debug_target);
4117
4118 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4119 }
4120
4121 static void
4122 debug_to_terminal_ours (struct target_ops *self)
4123 {
4124 debug_target.to_terminal_ours (&debug_target);
4125
4126 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4127 }
4128
4129 static void
4130 debug_to_terminal_save_ours (struct target_ops *self)
4131 {
4132 debug_target.to_terminal_save_ours (&debug_target);
4133
4134 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4135 }
4136
4137 static void
4138 debug_to_terminal_info (struct target_ops *self,
4139 const char *arg, int from_tty)
4140 {
4141 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4142
4143 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4144 from_tty);
4145 }
4146
4147 static void
4148 debug_to_load (struct target_ops *self, char *args, int from_tty)
4149 {
4150 debug_target.to_load (&debug_target, args, from_tty);
4151
4152 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4153 }
4154
4155 static void
4156 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4157 {
4158 debug_target.to_post_startup_inferior (&debug_target, ptid);
4159
4160 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4161 ptid_get_pid (ptid));
4162 }
4163
4164 static int
4165 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4166 {
4167 int retval;
4168
4169 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4170
4171 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4172 pid, retval);
4173
4174 return retval;
4175 }
4176
4177 static int
4178 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4179 {
4180 int retval;
4181
4182 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4183
4184 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4185 pid, retval);
4186
4187 return retval;
4188 }
4189
4190 static int
4191 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4192 {
4193 int retval;
4194
4195 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4196
4197 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4198 pid, retval);
4199
4200 return retval;
4201 }
4202
4203 static int
4204 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4205 {
4206 int retval;
4207
4208 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4209
4210 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4211 pid, retval);
4212
4213 return retval;
4214 }
4215
4216 static int
4217 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4218 {
4219 int retval;
4220
4221 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4222
4223 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4224 pid, retval);
4225
4226 return retval;
4227 }
4228
4229 static int
4230 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4231 {
4232 int retval;
4233
4234 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4235
4236 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4237 pid, retval);
4238
4239 return retval;
4240 }
4241
4242 static int
4243 debug_to_has_exited (struct target_ops *self,
4244 int pid, int wait_status, int *exit_status)
4245 {
4246 int has_exited;
4247
4248 has_exited = debug_target.to_has_exited (&debug_target,
4249 pid, wait_status, exit_status);
4250
4251 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4252 pid, wait_status, *exit_status, has_exited);
4253
4254 return has_exited;
4255 }
4256
4257 static int
4258 debug_to_can_run (struct target_ops *self)
4259 {
4260 int retval;
4261
4262 retval = debug_target.to_can_run (&debug_target);
4263
4264 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4265
4266 return retval;
4267 }
4268
4269 static struct gdbarch *
4270 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4271 {
4272 struct gdbarch *retval;
4273
4274 retval = debug_target.to_thread_architecture (ops, ptid);
4275
4276 fprintf_unfiltered (gdb_stdlog,
4277 "target_thread_architecture (%s) = %s [%s]\n",
4278 target_pid_to_str (ptid),
4279 host_address_to_string (retval),
4280 gdbarch_bfd_arch_info (retval)->printable_name);
4281 return retval;
4282 }
4283
4284 static void
4285 debug_to_stop (struct target_ops *self, ptid_t ptid)
4286 {
4287 debug_target.to_stop (&debug_target, ptid);
4288
4289 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4290 target_pid_to_str (ptid));
4291 }
4292
4293 static void
4294 debug_to_rcmd (struct target_ops *self, char *command,
4295 struct ui_file *outbuf)
4296 {
4297 debug_target.to_rcmd (&debug_target, command, outbuf);
4298 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4299 }
4300
4301 static char *
4302 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
4303 {
4304 char *exec_file;
4305
4306 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
4307
4308 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4309 pid, exec_file);
4310
4311 return exec_file;
4312 }
4313
4314 static void
4315 setup_target_debug (void)
4316 {
4317 memcpy (&debug_target, &current_target, sizeof debug_target);
4318
4319 current_target.to_open = debug_to_open;
4320 current_target.to_post_attach = debug_to_post_attach;
4321 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4322 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4323 current_target.to_files_info = debug_to_files_info;
4324 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4325 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4326 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4327 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4328 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4329 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4330 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4331 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4332 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4333 current_target.to_watchpoint_addr_within_range
4334 = debug_to_watchpoint_addr_within_range;
4335 current_target.to_region_ok_for_hw_watchpoint
4336 = debug_to_region_ok_for_hw_watchpoint;
4337 current_target.to_can_accel_watchpoint_condition
4338 = debug_to_can_accel_watchpoint_condition;
4339 current_target.to_terminal_init = debug_to_terminal_init;
4340 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4341 current_target.to_terminal_ours_for_output
4342 = debug_to_terminal_ours_for_output;
4343 current_target.to_terminal_ours = debug_to_terminal_ours;
4344 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4345 current_target.to_terminal_info = debug_to_terminal_info;
4346 current_target.to_load = debug_to_load;
4347 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4348 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4349 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4350 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4351 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4352 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4353 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4354 current_target.to_has_exited = debug_to_has_exited;
4355 current_target.to_can_run = debug_to_can_run;
4356 current_target.to_stop = debug_to_stop;
4357 current_target.to_rcmd = debug_to_rcmd;
4358 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4359 current_target.to_thread_architecture = debug_to_thread_architecture;
4360 }
4361 \f
4362
4363 static char targ_desc[] =
4364 "Names of targets and files being debugged.\nShows the entire \
4365 stack of targets currently in use (including the exec-file,\n\
4366 core-file, and process, if any), as well as the symbol file name.";
4367
4368 static void
4369 default_rcmd (struct target_ops *self, char *command, struct ui_file *output)
4370 {
4371 error (_("\"monitor\" command not supported by this target."));
4372 }
4373
4374 static void
4375 do_monitor_command (char *cmd,
4376 int from_tty)
4377 {
4378 target_rcmd (cmd, gdb_stdtarg);
4379 }
4380
4381 /* Print the name of each layers of our target stack. */
4382
4383 static void
4384 maintenance_print_target_stack (char *cmd, int from_tty)
4385 {
4386 struct target_ops *t;
4387
4388 printf_filtered (_("The current target stack is:\n"));
4389
4390 for (t = target_stack; t != NULL; t = t->beneath)
4391 {
4392 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4393 }
4394 }
4395
4396 /* Controls if async mode is permitted. */
4397 int target_async_permitted = 0;
4398
4399 /* The set command writes to this variable. If the inferior is
4400 executing, target_async_permitted is *not* updated. */
4401 static int target_async_permitted_1 = 0;
4402
4403 static void
4404 set_target_async_command (char *args, int from_tty,
4405 struct cmd_list_element *c)
4406 {
4407 if (have_live_inferiors ())
4408 {
4409 target_async_permitted_1 = target_async_permitted;
4410 error (_("Cannot change this setting while the inferior is running."));
4411 }
4412
4413 target_async_permitted = target_async_permitted_1;
4414 }
4415
4416 static void
4417 show_target_async_command (struct ui_file *file, int from_tty,
4418 struct cmd_list_element *c,
4419 const char *value)
4420 {
4421 fprintf_filtered (file,
4422 _("Controlling the inferior in "
4423 "asynchronous mode is %s.\n"), value);
4424 }
4425
4426 /* Temporary copies of permission settings. */
4427
4428 static int may_write_registers_1 = 1;
4429 static int may_write_memory_1 = 1;
4430 static int may_insert_breakpoints_1 = 1;
4431 static int may_insert_tracepoints_1 = 1;
4432 static int may_insert_fast_tracepoints_1 = 1;
4433 static int may_stop_1 = 1;
4434
4435 /* Make the user-set values match the real values again. */
4436
4437 void
4438 update_target_permissions (void)
4439 {
4440 may_write_registers_1 = may_write_registers;
4441 may_write_memory_1 = may_write_memory;
4442 may_insert_breakpoints_1 = may_insert_breakpoints;
4443 may_insert_tracepoints_1 = may_insert_tracepoints;
4444 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4445 may_stop_1 = may_stop;
4446 }
4447
4448 /* The one function handles (most of) the permission flags in the same
4449 way. */
4450
4451 static void
4452 set_target_permissions (char *args, int from_tty,
4453 struct cmd_list_element *c)
4454 {
4455 if (target_has_execution)
4456 {
4457 update_target_permissions ();
4458 error (_("Cannot change this setting while the inferior is running."));
4459 }
4460
4461 /* Make the real values match the user-changed values. */
4462 may_write_registers = may_write_registers_1;
4463 may_insert_breakpoints = may_insert_breakpoints_1;
4464 may_insert_tracepoints = may_insert_tracepoints_1;
4465 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4466 may_stop = may_stop_1;
4467 update_observer_mode ();
4468 }
4469
4470 /* Set memory write permission independently of observer mode. */
4471
4472 static void
4473 set_write_memory_permission (char *args, int from_tty,
4474 struct cmd_list_element *c)
4475 {
4476 /* Make the real values match the user-changed values. */
4477 may_write_memory = may_write_memory_1;
4478 update_observer_mode ();
4479 }
4480
4481
4482 void
4483 initialize_targets (void)
4484 {
4485 init_dummy_target ();
4486 push_target (&dummy_target);
4487
4488 add_info ("target", target_info, targ_desc);
4489 add_info ("files", target_info, targ_desc);
4490
4491 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4492 Set target debugging."), _("\
4493 Show target debugging."), _("\
4494 When non-zero, target debugging is enabled. Higher numbers are more\n\
4495 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4496 command."),
4497 NULL,
4498 show_targetdebug,
4499 &setdebuglist, &showdebuglist);
4500
4501 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4502 &trust_readonly, _("\
4503 Set mode for reading from readonly sections."), _("\
4504 Show mode for reading from readonly sections."), _("\
4505 When this mode is on, memory reads from readonly sections (such as .text)\n\
4506 will be read from the object file instead of from the target. This will\n\
4507 result in significant performance improvement for remote targets."),
4508 NULL,
4509 show_trust_readonly,
4510 &setlist, &showlist);
4511
4512 add_com ("monitor", class_obscure, do_monitor_command,
4513 _("Send a command to the remote monitor (remote targets only)."));
4514
4515 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4516 _("Print the name of each layer of the internal target stack."),
4517 &maintenanceprintlist);
4518
4519 add_setshow_boolean_cmd ("target-async", no_class,
4520 &target_async_permitted_1, _("\
4521 Set whether gdb controls the inferior in asynchronous mode."), _("\
4522 Show whether gdb controls the inferior in asynchronous mode."), _("\
4523 Tells gdb whether to control the inferior in asynchronous mode."),
4524 set_target_async_command,
4525 show_target_async_command,
4526 &setlist,
4527 &showlist);
4528
4529 add_setshow_boolean_cmd ("may-write-registers", class_support,
4530 &may_write_registers_1, _("\
4531 Set permission to write into registers."), _("\
4532 Show permission to write into registers."), _("\
4533 When this permission is on, GDB may write into the target's registers.\n\
4534 Otherwise, any sort of write attempt will result in an error."),
4535 set_target_permissions, NULL,
4536 &setlist, &showlist);
4537
4538 add_setshow_boolean_cmd ("may-write-memory", class_support,
4539 &may_write_memory_1, _("\
4540 Set permission to write into target memory."), _("\
4541 Show permission to write into target memory."), _("\
4542 When this permission is on, GDB may write into the target's memory.\n\
4543 Otherwise, any sort of write attempt will result in an error."),
4544 set_write_memory_permission, NULL,
4545 &setlist, &showlist);
4546
4547 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4548 &may_insert_breakpoints_1, _("\
4549 Set permission to insert breakpoints in the target."), _("\
4550 Show permission to insert breakpoints in the target."), _("\
4551 When this permission is on, GDB may insert breakpoints in the program.\n\
4552 Otherwise, any sort of insertion attempt will result in an error."),
4553 set_target_permissions, NULL,
4554 &setlist, &showlist);
4555
4556 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4557 &may_insert_tracepoints_1, _("\
4558 Set permission to insert tracepoints in the target."), _("\
4559 Show permission to insert tracepoints in the target."), _("\
4560 When this permission is on, GDB may insert tracepoints in the program.\n\
4561 Otherwise, any sort of insertion attempt will result in an error."),
4562 set_target_permissions, NULL,
4563 &setlist, &showlist);
4564
4565 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4566 &may_insert_fast_tracepoints_1, _("\
4567 Set permission to insert fast tracepoints in the target."), _("\
4568 Show permission to insert fast tracepoints in the target."), _("\
4569 When this permission is on, GDB may insert fast tracepoints.\n\
4570 Otherwise, any sort of insertion attempt will result in an error."),
4571 set_target_permissions, NULL,
4572 &setlist, &showlist);
4573
4574 add_setshow_boolean_cmd ("may-interrupt", class_support,
4575 &may_stop_1, _("\
4576 Set permission to interrupt or signal the target."), _("\
4577 Show permission to interrupt or signal the target."), _("\
4578 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4579 Otherwise, any attempt to interrupt or stop will be ignored."),
4580 set_target_permissions, NULL,
4581 &setlist, &showlist);
4582 }