]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/target.c
gdb: pass child_ptid and fork kind to target_ops::follow_fork
[thirdparty/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2021 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "target.h"
24 #include "target-dcache.h"
25 #include "gdbcmd.h"
26 #include "symtab.h"
27 #include "inferior.h"
28 #include "infrun.h"
29 #include "bfd.h"
30 #include "symfile.h"
31 #include "objfiles.h"
32 #include "dcache.h"
33 #include <signal.h>
34 #include "regcache.h"
35 #include "gdbcore.h"
36 #include "target-descriptions.h"
37 #include "gdbthread.h"
38 #include "solib.h"
39 #include "exec.h"
40 #include "inline-frame.h"
41 #include "tracepoint.h"
42 #include "gdb/fileio.h"
43 #include "gdbsupport/agent.h"
44 #include "auxv.h"
45 #include "target-debug.h"
46 #include "top.h"
47 #include "event-top.h"
48 #include <algorithm>
49 #include "gdbsupport/byte-vector.h"
50 #include "gdbsupport/search.h"
51 #include "terminal.h"
52 #include <unordered_map>
53 #include "target-connection.h"
54 #include "valprint.h"
55 #include "cli/cli-decode.h"
56
57 static void generic_tls_error (void) ATTRIBUTE_NORETURN;
58
59 static void default_terminal_info (struct target_ops *, const char *, int);
60
61 static int default_watchpoint_addr_within_range (struct target_ops *,
62 CORE_ADDR, CORE_ADDR, int);
63
64 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
65 CORE_ADDR, int);
66
67 static void default_rcmd (struct target_ops *, const char *, struct ui_file *);
68
69 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
70 long lwp, long tid);
71
72 static void default_mourn_inferior (struct target_ops *self);
73
74 static int default_search_memory (struct target_ops *ops,
75 CORE_ADDR start_addr,
76 ULONGEST search_space_len,
77 const gdb_byte *pattern,
78 ULONGEST pattern_len,
79 CORE_ADDR *found_addrp);
80
81 static int default_verify_memory (struct target_ops *self,
82 const gdb_byte *data,
83 CORE_ADDR memaddr, ULONGEST size);
84
85 static void tcomplain (void) ATTRIBUTE_NORETURN;
86
87 static struct target_ops *find_default_run_target (const char *);
88
89 static int dummy_find_memory_regions (struct target_ops *self,
90 find_memory_region_ftype ignore1,
91 void *ignore2);
92
93 static gdb::unique_xmalloc_ptr<char> dummy_make_corefile_notes
94 (struct target_ops *self, bfd *ignore1, int *ignore2);
95
96 static std::string default_pid_to_str (struct target_ops *ops, ptid_t ptid);
97
98 static enum exec_direction_kind default_execution_direction
99 (struct target_ops *self);
100
101 /* Mapping between target_info objects (which have address identity)
102 and corresponding open/factory function/callback. Each add_target
103 call adds one entry to this map, and registers a "target
104 TARGET_NAME" command that when invoked calls the factory registered
105 here. The target_info object is associated with the command via
106 the command's context. */
107 static std::unordered_map<const target_info *, target_open_ftype *>
108 target_factories;
109
110 /* The singleton debug target. */
111
112 static struct target_ops *the_debug_target;
113
114 /* Command list for target. */
115
116 static struct cmd_list_element *targetlist = NULL;
117
118 /* True if we should trust readonly sections from the
119 executable when reading memory. */
120
121 static bool trust_readonly = false;
122
123 /* Nonzero if we should show true memory content including
124 memory breakpoint inserted by gdb. */
125
126 static int show_memory_breakpoints = 0;
127
128 /* These globals control whether GDB attempts to perform these
129 operations; they are useful for targets that need to prevent
130 inadvertent disruption, such as in non-stop mode. */
131
132 bool may_write_registers = true;
133
134 bool may_write_memory = true;
135
136 bool may_insert_breakpoints = true;
137
138 bool may_insert_tracepoints = true;
139
140 bool may_insert_fast_tracepoints = true;
141
142 bool may_stop = true;
143
144 /* Non-zero if we want to see trace of target level stuff. */
145
146 static unsigned int targetdebug = 0;
147
148 static void
149 set_targetdebug (const char *args, int from_tty, struct cmd_list_element *c)
150 {
151 if (targetdebug)
152 current_inferior ()->push_target (the_debug_target);
153 else
154 current_inferior ()->unpush_target (the_debug_target);
155 }
156
157 static void
158 show_targetdebug (struct ui_file *file, int from_tty,
159 struct cmd_list_element *c, const char *value)
160 {
161 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
162 }
163
164 int
165 target_has_memory ()
166 {
167 for (target_ops *t = current_inferior ()->top_target ();
168 t != NULL;
169 t = t->beneath ())
170 if (t->has_memory ())
171 return 1;
172
173 return 0;
174 }
175
176 int
177 target_has_stack ()
178 {
179 for (target_ops *t = current_inferior ()->top_target ();
180 t != NULL;
181 t = t->beneath ())
182 if (t->has_stack ())
183 return 1;
184
185 return 0;
186 }
187
188 int
189 target_has_registers ()
190 {
191 for (target_ops *t = current_inferior ()->top_target ();
192 t != NULL;
193 t = t->beneath ())
194 if (t->has_registers ())
195 return 1;
196
197 return 0;
198 }
199
200 bool
201 target_has_execution (inferior *inf)
202 {
203 if (inf == nullptr)
204 inf = current_inferior ();
205
206 for (target_ops *t = inf->top_target ();
207 t != nullptr;
208 t = inf->find_target_beneath (t))
209 if (t->has_execution (inf))
210 return true;
211
212 return false;
213 }
214
215 const char *
216 target_shortname ()
217 {
218 return current_inferior ()->top_target ()->shortname ();
219 }
220
221 /* See target.h. */
222
223 bool
224 target_attach_no_wait ()
225 {
226 return current_inferior ()->top_target ()->attach_no_wait ();
227 }
228
229 /* See target.h. */
230
231 void
232 target_post_attach (int pid)
233 {
234 return current_inferior ()->top_target ()->post_attach (pid);
235 }
236
237 /* See target.h. */
238
239 void
240 target_prepare_to_store (regcache *regcache)
241 {
242 return current_inferior ()->top_target ()->prepare_to_store (regcache);
243 }
244
245 /* See target.h. */
246
247 bool
248 target_supports_enable_disable_tracepoint ()
249 {
250 target_ops *target = current_inferior ()->top_target ();
251
252 return target->supports_enable_disable_tracepoint ();
253 }
254
255 bool
256 target_supports_string_tracing ()
257 {
258 return current_inferior ()->top_target ()->supports_string_tracing ();
259 }
260
261 /* See target.h. */
262
263 bool
264 target_supports_evaluation_of_breakpoint_conditions ()
265 {
266 target_ops *target = current_inferior ()->top_target ();
267
268 return target->supports_evaluation_of_breakpoint_conditions ();
269 }
270
271 /* See target.h. */
272
273 bool
274 target_supports_dumpcore ()
275 {
276 return current_inferior ()->top_target ()->supports_dumpcore ();
277 }
278
279 /* See target.h. */
280
281 void
282 target_dumpcore (const char *filename)
283 {
284 return current_inferior ()->top_target ()->dumpcore (filename);
285 }
286
287 /* See target.h. */
288
289 bool
290 target_can_run_breakpoint_commands ()
291 {
292 return current_inferior ()->top_target ()->can_run_breakpoint_commands ();
293 }
294
295 /* See target.h. */
296
297 void
298 target_files_info ()
299 {
300 return current_inferior ()->top_target ()->files_info ();
301 }
302
303 /* See target.h. */
304
305 void
306 target_post_startup_inferior (ptid_t ptid)
307 {
308 return current_inferior ()->top_target ()->post_startup_inferior (ptid);
309 }
310
311 /* See target.h. */
312
313 int
314 target_insert_fork_catchpoint (int pid)
315 {
316 return current_inferior ()->top_target ()->insert_fork_catchpoint (pid);
317 }
318
319 /* See target.h. */
320
321 int
322 target_remove_fork_catchpoint (int pid)
323 {
324 return current_inferior ()->top_target ()->remove_fork_catchpoint (pid);
325 }
326
327 /* See target.h. */
328
329 int
330 target_insert_vfork_catchpoint (int pid)
331 {
332 return current_inferior ()->top_target ()->insert_vfork_catchpoint (pid);
333 }
334
335 /* See target.h. */
336
337 int
338 target_remove_vfork_catchpoint (int pid)
339 {
340 return current_inferior ()->top_target ()->remove_vfork_catchpoint (pid);
341 }
342
343 /* See target.h. */
344
345 int
346 target_insert_exec_catchpoint (int pid)
347 {
348 return current_inferior ()->top_target ()->insert_exec_catchpoint (pid);
349 }
350
351 /* See target.h. */
352
353 int
354 target_remove_exec_catchpoint (int pid)
355 {
356 return current_inferior ()->top_target ()->remove_exec_catchpoint (pid);
357 }
358
359 /* See target.h. */
360
361 int
362 target_set_syscall_catchpoint (int pid, bool needed, int any_count,
363 gdb::array_view<const int> syscall_counts)
364 {
365 target_ops *target = current_inferior ()->top_target ();
366
367 return target->set_syscall_catchpoint (pid, needed, any_count,
368 syscall_counts);
369 }
370
371 /* See target.h. */
372
373 void
374 target_rcmd (const char *command, struct ui_file *outbuf)
375 {
376 return current_inferior ()->top_target ()->rcmd (command, outbuf);
377 }
378
379 /* See target.h. */
380
381 bool
382 target_can_lock_scheduler ()
383 {
384 target_ops *target = current_inferior ()->top_target ();
385
386 return (target->get_thread_control_capabilities ()& tc_schedlock) != 0;
387 }
388
389 /* See target.h. */
390
391 bool
392 target_can_async_p ()
393 {
394 return current_inferior ()->top_target ()->can_async_p ();
395 }
396
397 /* See target.h. */
398
399 bool
400 target_is_async_p ()
401 {
402 return current_inferior ()->top_target ()->is_async_p ();
403 }
404
405 exec_direction_kind
406 target_execution_direction ()
407 {
408 return current_inferior ()->top_target ()->execution_direction ();
409 }
410
411 /* See target.h. */
412
413 const char *
414 target_extra_thread_info (thread_info *tp)
415 {
416 return current_inferior ()->top_target ()->extra_thread_info (tp);
417 }
418
419 /* See target.h. */
420
421 char *
422 target_pid_to_exec_file (int pid)
423 {
424 return current_inferior ()->top_target ()->pid_to_exec_file (pid);
425 }
426
427 /* See target.h. */
428
429 gdbarch *
430 target_thread_architecture (ptid_t ptid)
431 {
432 return current_inferior ()->top_target ()->thread_architecture (ptid);
433 }
434
435 /* See target.h. */
436
437 int
438 target_find_memory_regions (find_memory_region_ftype func, void *data)
439 {
440 return current_inferior ()->top_target ()->find_memory_regions (func, data);
441 }
442
443 /* See target.h. */
444
445 gdb::unique_xmalloc_ptr<char>
446 target_make_corefile_notes (bfd *bfd, int *size_p)
447 {
448 return current_inferior ()->top_target ()->make_corefile_notes (bfd, size_p);
449 }
450
451 gdb_byte *
452 target_get_bookmark (const char *args, int from_tty)
453 {
454 return current_inferior ()->top_target ()->get_bookmark (args, from_tty);
455 }
456
457 void
458 target_goto_bookmark (const gdb_byte *arg, int from_tty)
459 {
460 return current_inferior ()->top_target ()->goto_bookmark (arg, from_tty);
461 }
462
463 /* See target.h. */
464
465 bool
466 target_stopped_by_watchpoint ()
467 {
468 return current_inferior ()->top_target ()->stopped_by_watchpoint ();
469 }
470
471 /* See target.h. */
472
473 bool
474 target_stopped_by_sw_breakpoint ()
475 {
476 return current_inferior ()->top_target ()->stopped_by_sw_breakpoint ();
477 }
478
479 bool
480 target_supports_stopped_by_sw_breakpoint ()
481 {
482 target_ops *target = current_inferior ()->top_target ();
483
484 return target->supports_stopped_by_sw_breakpoint ();
485 }
486
487 bool
488 target_stopped_by_hw_breakpoint ()
489 {
490 return current_inferior ()->top_target ()->stopped_by_hw_breakpoint ();
491 }
492
493 bool
494 target_supports_stopped_by_hw_breakpoint ()
495 {
496 target_ops *target = current_inferior ()->top_target ();
497
498 return target->supports_stopped_by_hw_breakpoint ();
499 }
500
501 /* See target.h. */
502
503 bool
504 target_have_steppable_watchpoint ()
505 {
506 return current_inferior ()->top_target ()->have_steppable_watchpoint ();
507 }
508
509 /* See target.h. */
510
511 int
512 target_can_use_hardware_watchpoint (bptype type, int cnt, int othertype)
513 {
514 target_ops *target = current_inferior ()->top_target ();
515
516 return target->can_use_hw_breakpoint (type, cnt, othertype);
517 }
518
519 /* See target.h. */
520
521 int
522 target_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
523 {
524 target_ops *target = current_inferior ()->top_target ();
525
526 return target->region_ok_for_hw_watchpoint (addr, len);
527 }
528
529
530 int
531 target_can_do_single_step ()
532 {
533 return current_inferior ()->top_target ()->can_do_single_step ();
534 }
535
536 /* See target.h. */
537
538 int
539 target_insert_watchpoint (CORE_ADDR addr, int len, target_hw_bp_type type,
540 expression *cond)
541 {
542 target_ops *target = current_inferior ()->top_target ();
543
544 return target->insert_watchpoint (addr, len, type, cond);
545 }
546
547 /* See target.h. */
548
549 int
550 target_remove_watchpoint (CORE_ADDR addr, int len, target_hw_bp_type type,
551 expression *cond)
552 {
553 target_ops *target = current_inferior ()->top_target ();
554
555 return target->remove_watchpoint (addr, len, type, cond);
556 }
557
558 /* See target.h. */
559
560 int
561 target_insert_hw_breakpoint (gdbarch *gdbarch, bp_target_info *bp_tgt)
562 {
563 target_ops *target = current_inferior ()->top_target ();
564
565 return target->insert_hw_breakpoint (gdbarch, bp_tgt);
566 }
567
568 /* See target.h. */
569
570 int
571 target_remove_hw_breakpoint (gdbarch *gdbarch, bp_target_info *bp_tgt)
572 {
573 target_ops *target = current_inferior ()->top_target ();
574
575 return target->remove_hw_breakpoint (gdbarch, bp_tgt);
576 }
577
578 /* See target.h. */
579
580 bool
581 target_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int type,
582 expression *cond)
583 {
584 target_ops *target = current_inferior ()->top_target ();
585
586 return target->can_accel_watchpoint_condition (addr, len, type, cond);
587 }
588
589 /* See target.h. */
590
591 bool
592 target_can_execute_reverse ()
593 {
594 return current_inferior ()->top_target ()->can_execute_reverse ();
595 }
596
597 ptid_t
598 target_get_ada_task_ptid (long lwp, long tid)
599 {
600 return current_inferior ()->top_target ()->get_ada_task_ptid (lwp, tid);
601 }
602
603 bool
604 target_filesystem_is_local ()
605 {
606 return current_inferior ()->top_target ()->filesystem_is_local ();
607 }
608
609 void
610 target_trace_init ()
611 {
612 return current_inferior ()->top_target ()->trace_init ();
613 }
614
615 void
616 target_download_tracepoint (bp_location *location)
617 {
618 return current_inferior ()->top_target ()->download_tracepoint (location);
619 }
620
621 bool
622 target_can_download_tracepoint ()
623 {
624 return current_inferior ()->top_target ()->can_download_tracepoint ();
625 }
626
627 void
628 target_download_trace_state_variable (const trace_state_variable &tsv)
629 {
630 target_ops *target = current_inferior ()->top_target ();
631
632 return target->download_trace_state_variable (tsv);
633 }
634
635 void
636 target_enable_tracepoint (bp_location *loc)
637 {
638 return current_inferior ()->top_target ()->enable_tracepoint (loc);
639 }
640
641 void
642 target_disable_tracepoint (bp_location *loc)
643 {
644 return current_inferior ()->top_target ()->disable_tracepoint (loc);
645 }
646
647 void
648 target_trace_start ()
649 {
650 return current_inferior ()->top_target ()->trace_start ();
651 }
652
653 void
654 target_trace_set_readonly_regions ()
655 {
656 return current_inferior ()->top_target ()->trace_set_readonly_regions ();
657 }
658
659 int
660 target_get_trace_status (trace_status *ts)
661 {
662 return current_inferior ()->top_target ()->get_trace_status (ts);
663 }
664
665 void
666 target_get_tracepoint_status (breakpoint *tp, uploaded_tp *utp)
667 {
668 return current_inferior ()->top_target ()->get_tracepoint_status (tp, utp);
669 }
670
671 void
672 target_trace_stop ()
673 {
674 return current_inferior ()->top_target ()->trace_stop ();
675 }
676
677 int
678 target_trace_find (trace_find_type type, int num,
679 CORE_ADDR addr1, CORE_ADDR addr2, int *tpp)
680 {
681 target_ops *target = current_inferior ()->top_target ();
682
683 return target->trace_find (type, num, addr1, addr2, tpp);
684 }
685
686 bool
687 target_get_trace_state_variable_value (int tsv, LONGEST *val)
688 {
689 target_ops *target = current_inferior ()->top_target ();
690
691 return target->get_trace_state_variable_value (tsv, val);
692 }
693
694 int
695 target_save_trace_data (const char *filename)
696 {
697 return current_inferior ()->top_target ()->save_trace_data (filename);
698 }
699
700 int
701 target_upload_tracepoints (uploaded_tp **utpp)
702 {
703 return current_inferior ()->top_target ()->upload_tracepoints (utpp);
704 }
705
706 int
707 target_upload_trace_state_variables (uploaded_tsv **utsvp)
708 {
709 target_ops *target = current_inferior ()->top_target ();
710
711 return target->upload_trace_state_variables (utsvp);
712 }
713
714 LONGEST
715 target_get_raw_trace_data (gdb_byte *buf, ULONGEST offset, LONGEST len)
716 {
717 target_ops *target = current_inferior ()->top_target ();
718
719 return target->get_raw_trace_data (buf, offset, len);
720 }
721
722 int
723 target_get_min_fast_tracepoint_insn_len ()
724 {
725 target_ops *target = current_inferior ()->top_target ();
726
727 return target->get_min_fast_tracepoint_insn_len ();
728 }
729
730 void
731 target_set_disconnected_tracing (int val)
732 {
733 return current_inferior ()->top_target ()->set_disconnected_tracing (val);
734 }
735
736 void
737 target_set_circular_trace_buffer (int val)
738 {
739 return current_inferior ()->top_target ()->set_circular_trace_buffer (val);
740 }
741
742 void
743 target_set_trace_buffer_size (LONGEST val)
744 {
745 return current_inferior ()->top_target ()->set_trace_buffer_size (val);
746 }
747
748 bool
749 target_set_trace_notes (const char *user, const char *notes,
750 const char *stopnotes)
751 {
752 target_ops *target = current_inferior ()->top_target ();
753
754 return target->set_trace_notes (user, notes, stopnotes);
755 }
756
757 bool
758 target_get_tib_address (ptid_t ptid, CORE_ADDR *addr)
759 {
760 return current_inferior ()->top_target ()->get_tib_address (ptid, addr);
761 }
762
763 void
764 target_set_permissions ()
765 {
766 return current_inferior ()->top_target ()->set_permissions ();
767 }
768
769 bool
770 target_static_tracepoint_marker_at (CORE_ADDR addr,
771 static_tracepoint_marker *marker)
772 {
773 target_ops *target = current_inferior ()->top_target ();
774
775 return target->static_tracepoint_marker_at (addr, marker);
776 }
777
778 std::vector<static_tracepoint_marker>
779 target_static_tracepoint_markers_by_strid (const char *marker_id)
780 {
781 target_ops *target = current_inferior ()->top_target ();
782
783 return target->static_tracepoint_markers_by_strid (marker_id);
784 }
785
786 traceframe_info_up
787 target_traceframe_info ()
788 {
789 return current_inferior ()->top_target ()->traceframe_info ();
790 }
791
792 bool
793 target_use_agent (bool use)
794 {
795 return current_inferior ()->top_target ()->use_agent (use);
796 }
797
798 bool
799 target_can_use_agent ()
800 {
801 return current_inferior ()->top_target ()->can_use_agent ();
802 }
803
804 bool
805 target_augmented_libraries_svr4_read ()
806 {
807 return current_inferior ()->top_target ()->augmented_libraries_svr4_read ();
808 }
809
810 bool
811 target_supports_memory_tagging ()
812 {
813 return current_inferior ()->top_target ()->supports_memory_tagging ();
814 }
815
816 bool
817 target_fetch_memtags (CORE_ADDR address, size_t len, gdb::byte_vector &tags,
818 int type)
819 {
820 return current_inferior ()->top_target ()->fetch_memtags (address, len, tags, type);
821 }
822
823 bool
824 target_store_memtags (CORE_ADDR address, size_t len,
825 const gdb::byte_vector &tags, int type)
826 {
827 return current_inferior ()->top_target ()->store_memtags (address, len, tags, type);
828 }
829
830 void
831 target_log_command (const char *p)
832 {
833 return current_inferior ()->top_target ()->log_command (p);
834 }
835
836 /* This is used to implement the various target commands. */
837
838 static void
839 open_target (const char *args, int from_tty, struct cmd_list_element *command)
840 {
841 auto *ti = static_cast<target_info *> (command->context ());
842 target_open_ftype *func = target_factories[ti];
843
844 if (targetdebug)
845 fprintf_unfiltered (gdb_stdlog, "-> %s->open (...)\n",
846 ti->shortname);
847
848 func (args, from_tty);
849
850 if (targetdebug)
851 fprintf_unfiltered (gdb_stdlog, "<- %s->open (%s, %d)\n",
852 ti->shortname, args, from_tty);
853 }
854
855 /* See target.h. */
856
857 void
858 add_target (const target_info &t, target_open_ftype *func,
859 completer_ftype *completer)
860 {
861 struct cmd_list_element *c;
862
863 auto &func_slot = target_factories[&t];
864 if (func_slot != nullptr)
865 internal_error (__FILE__, __LINE__,
866 _("target already added (\"%s\")."), t.shortname);
867 func_slot = func;
868
869 if (targetlist == NULL)
870 add_basic_prefix_cmd ("target", class_run, _("\
871 Connect to a target machine or process.\n\
872 The first argument is the type or protocol of the target machine.\n\
873 Remaining arguments are interpreted by the target protocol. For more\n\
874 information on the arguments for a particular protocol, type\n\
875 `help target ' followed by the protocol name."),
876 &targetlist, 0, &cmdlist);
877 c = add_cmd (t.shortname, no_class, t.doc, &targetlist);
878 c->set_context ((void *) &t);
879 set_cmd_sfunc (c, open_target);
880 if (completer != NULL)
881 set_cmd_completer (c, completer);
882 }
883
884 /* See target.h. */
885
886 void
887 add_deprecated_target_alias (const target_info &tinfo, const char *alias)
888 {
889 struct cmd_list_element *c;
890 char *alt;
891
892 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
893 see PR cli/15104. */
894 c = add_cmd (alias, no_class, tinfo.doc, &targetlist);
895 set_cmd_sfunc (c, open_target);
896 c->set_context ((void *) &tinfo);
897 alt = xstrprintf ("target %s", tinfo.shortname);
898 deprecate_cmd (c, alt);
899 }
900
901 /* Stub functions */
902
903 void
904 target_kill (void)
905 {
906 current_inferior ()->top_target ()->kill ();
907 }
908
909 void
910 target_load (const char *arg, int from_tty)
911 {
912 target_dcache_invalidate ();
913 current_inferior ()->top_target ()->load (arg, from_tty);
914 }
915
916 /* Define it. */
917
918 target_terminal_state target_terminal::m_terminal_state
919 = target_terminal_state::is_ours;
920
921 /* See target/target.h. */
922
923 void
924 target_terminal::init (void)
925 {
926 current_inferior ()->top_target ()->terminal_init ();
927
928 m_terminal_state = target_terminal_state::is_ours;
929 }
930
931 /* See target/target.h. */
932
933 void
934 target_terminal::inferior (void)
935 {
936 struct ui *ui = current_ui;
937
938 /* A background resume (``run&'') should leave GDB in control of the
939 terminal. */
940 if (ui->prompt_state != PROMPT_BLOCKED)
941 return;
942
943 /* Since we always run the inferior in the main console (unless "set
944 inferior-tty" is in effect), when some UI other than the main one
945 calls target_terminal::inferior, then we leave the main UI's
946 terminal settings as is. */
947 if (ui != main_ui)
948 return;
949
950 /* If GDB is resuming the inferior in the foreground, install
951 inferior's terminal modes. */
952
953 struct inferior *inf = current_inferior ();
954
955 if (inf->terminal_state != target_terminal_state::is_inferior)
956 {
957 current_inferior ()->top_target ()->terminal_inferior ();
958 inf->terminal_state = target_terminal_state::is_inferior;
959 }
960
961 m_terminal_state = target_terminal_state::is_inferior;
962
963 /* If the user hit C-c before, pretend that it was hit right
964 here. */
965 if (check_quit_flag ())
966 target_pass_ctrlc ();
967 }
968
969 /* See target/target.h. */
970
971 void
972 target_terminal::restore_inferior (void)
973 {
974 struct ui *ui = current_ui;
975
976 /* See target_terminal::inferior(). */
977 if (ui->prompt_state != PROMPT_BLOCKED || ui != main_ui)
978 return;
979
980 /* Restore the terminal settings of inferiors that were in the
981 foreground but are now ours_for_output due to a temporary
982 target_target::ours_for_output() call. */
983
984 {
985 scoped_restore_current_inferior restore_inferior;
986
987 for (::inferior *inf : all_inferiors ())
988 {
989 if (inf->terminal_state == target_terminal_state::is_ours_for_output)
990 {
991 set_current_inferior (inf);
992 current_inferior ()->top_target ()->terminal_inferior ();
993 inf->terminal_state = target_terminal_state::is_inferior;
994 }
995 }
996 }
997
998 m_terminal_state = target_terminal_state::is_inferior;
999
1000 /* If the user hit C-c before, pretend that it was hit right
1001 here. */
1002 if (check_quit_flag ())
1003 target_pass_ctrlc ();
1004 }
1005
1006 /* Switch terminal state to DESIRED_STATE, either is_ours, or
1007 is_ours_for_output. */
1008
1009 static void
1010 target_terminal_is_ours_kind (target_terminal_state desired_state)
1011 {
1012 scoped_restore_current_inferior restore_inferior;
1013
1014 /* Must do this in two passes. First, have all inferiors save the
1015 current terminal settings. Then, after all inferiors have add a
1016 chance to safely save the terminal settings, restore GDB's
1017 terminal settings. */
1018
1019 for (inferior *inf : all_inferiors ())
1020 {
1021 if (inf->terminal_state == target_terminal_state::is_inferior)
1022 {
1023 set_current_inferior (inf);
1024 current_inferior ()->top_target ()->terminal_save_inferior ();
1025 }
1026 }
1027
1028 for (inferior *inf : all_inferiors ())
1029 {
1030 /* Note we don't check is_inferior here like above because we
1031 need to handle 'is_ours_for_output -> is_ours' too. Careful
1032 to never transition from 'is_ours' to 'is_ours_for_output',
1033 though. */
1034 if (inf->terminal_state != target_terminal_state::is_ours
1035 && inf->terminal_state != desired_state)
1036 {
1037 set_current_inferior (inf);
1038 if (desired_state == target_terminal_state::is_ours)
1039 current_inferior ()->top_target ()->terminal_ours ();
1040 else if (desired_state == target_terminal_state::is_ours_for_output)
1041 current_inferior ()->top_target ()->terminal_ours_for_output ();
1042 else
1043 gdb_assert_not_reached ("unhandled desired state");
1044 inf->terminal_state = desired_state;
1045 }
1046 }
1047 }
1048
1049 /* See target/target.h. */
1050
1051 void
1052 target_terminal::ours ()
1053 {
1054 struct ui *ui = current_ui;
1055
1056 /* See target_terminal::inferior. */
1057 if (ui != main_ui)
1058 return;
1059
1060 if (m_terminal_state == target_terminal_state::is_ours)
1061 return;
1062
1063 target_terminal_is_ours_kind (target_terminal_state::is_ours);
1064 m_terminal_state = target_terminal_state::is_ours;
1065 }
1066
1067 /* See target/target.h. */
1068
1069 void
1070 target_terminal::ours_for_output ()
1071 {
1072 struct ui *ui = current_ui;
1073
1074 /* See target_terminal::inferior. */
1075 if (ui != main_ui)
1076 return;
1077
1078 if (!target_terminal::is_inferior ())
1079 return;
1080
1081 target_terminal_is_ours_kind (target_terminal_state::is_ours_for_output);
1082 target_terminal::m_terminal_state = target_terminal_state::is_ours_for_output;
1083 }
1084
1085 /* See target/target.h. */
1086
1087 void
1088 target_terminal::info (const char *arg, int from_tty)
1089 {
1090 current_inferior ()->top_target ()->terminal_info (arg, from_tty);
1091 }
1092
1093 /* See target.h. */
1094
1095 bool
1096 target_supports_terminal_ours (void)
1097 {
1098 /* The current top target is the target at the top of the target
1099 stack of the current inferior. While normally there's always an
1100 inferior, we must check for nullptr here because we can get here
1101 very early during startup, before the initial inferior is first
1102 created. */
1103 inferior *inf = current_inferior ();
1104
1105 if (inf == nullptr)
1106 return false;
1107 return inf->top_target ()->supports_terminal_ours ();
1108 }
1109
1110 static void
1111 tcomplain (void)
1112 {
1113 error (_("You can't do that when your target is `%s'"),
1114 current_inferior ()->top_target ()->shortname ());
1115 }
1116
1117 void
1118 noprocess (void)
1119 {
1120 error (_("You can't do that without a process to debug."));
1121 }
1122
1123 static void
1124 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
1125 {
1126 printf_unfiltered (_("No saved terminal information.\n"));
1127 }
1128
1129 /* A default implementation for the to_get_ada_task_ptid target method.
1130
1131 This function builds the PTID by using both LWP and TID as part of
1132 the PTID lwp and tid elements. The pid used is the pid of the
1133 inferior_ptid. */
1134
1135 static ptid_t
1136 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
1137 {
1138 return ptid_t (inferior_ptid.pid (), lwp, tid);
1139 }
1140
1141 static enum exec_direction_kind
1142 default_execution_direction (struct target_ops *self)
1143 {
1144 if (!target_can_execute_reverse ())
1145 return EXEC_FORWARD;
1146 else if (!target_can_async_p ())
1147 return EXEC_FORWARD;
1148 else
1149 gdb_assert_not_reached ("\
1150 to_execution_direction must be implemented for reverse async");
1151 }
1152
1153 /* See target.h. */
1154
1155 void
1156 decref_target (target_ops *t)
1157 {
1158 t->decref ();
1159 if (t->refcount () == 0)
1160 {
1161 if (t->stratum () == process_stratum)
1162 connection_list_remove (as_process_stratum_target (t));
1163 target_close (t);
1164 }
1165 }
1166
1167 /* See target.h. */
1168
1169 void
1170 target_stack::push (target_ops *t)
1171 {
1172 t->incref ();
1173
1174 strata stratum = t->stratum ();
1175
1176 if (stratum == process_stratum)
1177 connection_list_add (as_process_stratum_target (t));
1178
1179 /* If there's already a target at this stratum, remove it. */
1180
1181 if (m_stack[stratum] != NULL)
1182 unpush (m_stack[stratum]);
1183
1184 /* Now add the new one. */
1185 m_stack[stratum] = t;
1186
1187 if (m_top < stratum)
1188 m_top = stratum;
1189 }
1190
1191 /* See target.h. */
1192
1193 bool
1194 target_stack::unpush (target_ops *t)
1195 {
1196 gdb_assert (t != NULL);
1197
1198 strata stratum = t->stratum ();
1199
1200 if (stratum == dummy_stratum)
1201 internal_error (__FILE__, __LINE__,
1202 _("Attempt to unpush the dummy target"));
1203
1204 /* Look for the specified target. Note that a target can only occur
1205 once in the target stack. */
1206
1207 if (m_stack[stratum] != t)
1208 {
1209 /* If T wasn't pushed, quit. Only open targets should be
1210 closed. */
1211 return false;
1212 }
1213
1214 /* Unchain the target. */
1215 m_stack[stratum] = NULL;
1216
1217 if (m_top == stratum)
1218 m_top = this->find_beneath (t)->stratum ();
1219
1220 /* Finally close the target, if there are no inferiors
1221 referencing this target still. Note we do this after unchaining,
1222 so any target method calls from within the target_close
1223 implementation don't end up in T anymore. Do leave the target
1224 open if we have are other inferiors referencing this target
1225 still. */
1226 decref_target (t);
1227
1228 return true;
1229 }
1230
1231 /* Unpush TARGET and assert that it worked. */
1232
1233 static void
1234 unpush_target_and_assert (struct target_ops *target)
1235 {
1236 if (!current_inferior ()->unpush_target (target))
1237 {
1238 fprintf_unfiltered (gdb_stderr,
1239 "pop_all_targets couldn't find target %s\n",
1240 target->shortname ());
1241 internal_error (__FILE__, __LINE__,
1242 _("failed internal consistency check"));
1243 }
1244 }
1245
1246 void
1247 pop_all_targets_above (enum strata above_stratum)
1248 {
1249 while ((int) (current_inferior ()->top_target ()->stratum ())
1250 > (int) above_stratum)
1251 unpush_target_and_assert (current_inferior ()->top_target ());
1252 }
1253
1254 /* See target.h. */
1255
1256 void
1257 pop_all_targets_at_and_above (enum strata stratum)
1258 {
1259 while ((int) (current_inferior ()->top_target ()->stratum ())
1260 >= (int) stratum)
1261 unpush_target_and_assert (current_inferior ()->top_target ());
1262 }
1263
1264 void
1265 pop_all_targets (void)
1266 {
1267 pop_all_targets_above (dummy_stratum);
1268 }
1269
1270 void
1271 target_unpusher::operator() (struct target_ops *ops) const
1272 {
1273 current_inferior ()->unpush_target (ops);
1274 }
1275
1276 /* Default implementation of to_get_thread_local_address. */
1277
1278 static void
1279 generic_tls_error (void)
1280 {
1281 throw_error (TLS_GENERIC_ERROR,
1282 _("Cannot find thread-local variables on this target"));
1283 }
1284
1285 /* Using the objfile specified in OBJFILE, find the address for the
1286 current thread's thread-local storage with offset OFFSET. */
1287 CORE_ADDR
1288 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1289 {
1290 volatile CORE_ADDR addr = 0;
1291 struct target_ops *target = current_inferior ()->top_target ();
1292 struct gdbarch *gdbarch = target_gdbarch ();
1293
1294 if (gdbarch_fetch_tls_load_module_address_p (gdbarch))
1295 {
1296 ptid_t ptid = inferior_ptid;
1297
1298 try
1299 {
1300 CORE_ADDR lm_addr;
1301
1302 /* Fetch the load module address for this objfile. */
1303 lm_addr = gdbarch_fetch_tls_load_module_address (gdbarch,
1304 objfile);
1305
1306 if (gdbarch_get_thread_local_address_p (gdbarch))
1307 addr = gdbarch_get_thread_local_address (gdbarch, ptid, lm_addr,
1308 offset);
1309 else
1310 addr = target->get_thread_local_address (ptid, lm_addr, offset);
1311 }
1312 /* If an error occurred, print TLS related messages here. Otherwise,
1313 throw the error to some higher catcher. */
1314 catch (const gdb_exception &ex)
1315 {
1316 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1317
1318 switch (ex.error)
1319 {
1320 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1321 error (_("Cannot find thread-local variables "
1322 "in this thread library."));
1323 break;
1324 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1325 if (objfile_is_library)
1326 error (_("Cannot find shared library `%s' in dynamic"
1327 " linker's load module list"), objfile_name (objfile));
1328 else
1329 error (_("Cannot find executable file `%s' in dynamic"
1330 " linker's load module list"), objfile_name (objfile));
1331 break;
1332 case TLS_NOT_ALLOCATED_YET_ERROR:
1333 if (objfile_is_library)
1334 error (_("The inferior has not yet allocated storage for"
1335 " thread-local variables in\n"
1336 "the shared library `%s'\n"
1337 "for %s"),
1338 objfile_name (objfile),
1339 target_pid_to_str (ptid).c_str ());
1340 else
1341 error (_("The inferior has not yet allocated storage for"
1342 " thread-local variables in\n"
1343 "the executable `%s'\n"
1344 "for %s"),
1345 objfile_name (objfile),
1346 target_pid_to_str (ptid).c_str ());
1347 break;
1348 case TLS_GENERIC_ERROR:
1349 if (objfile_is_library)
1350 error (_("Cannot find thread-local storage for %s, "
1351 "shared library %s:\n%s"),
1352 target_pid_to_str (ptid).c_str (),
1353 objfile_name (objfile), ex.what ());
1354 else
1355 error (_("Cannot find thread-local storage for %s, "
1356 "executable file %s:\n%s"),
1357 target_pid_to_str (ptid).c_str (),
1358 objfile_name (objfile), ex.what ());
1359 break;
1360 default:
1361 throw;
1362 break;
1363 }
1364 }
1365 }
1366 else
1367 error (_("Cannot find thread-local variables on this target"));
1368
1369 return addr;
1370 }
1371
1372 const char *
1373 target_xfer_status_to_string (enum target_xfer_status status)
1374 {
1375 #define CASE(X) case X: return #X
1376 switch (status)
1377 {
1378 CASE(TARGET_XFER_E_IO);
1379 CASE(TARGET_XFER_UNAVAILABLE);
1380 default:
1381 return "<unknown>";
1382 }
1383 #undef CASE
1384 };
1385
1386
1387 /* See target.h. */
1388
1389 gdb::unique_xmalloc_ptr<char>
1390 target_read_string (CORE_ADDR memaddr, int len, int *bytes_read)
1391 {
1392 gdb::unique_xmalloc_ptr<gdb_byte> buffer;
1393
1394 int ignore;
1395 if (bytes_read == nullptr)
1396 bytes_read = &ignore;
1397
1398 /* Note that the endian-ness does not matter here. */
1399 int errcode = read_string (memaddr, -1, 1, len, BFD_ENDIAN_LITTLE,
1400 &buffer, bytes_read);
1401 if (errcode != 0)
1402 return {};
1403
1404 return gdb::unique_xmalloc_ptr<char> ((char *) buffer.release ());
1405 }
1406
1407 const target_section_table *
1408 target_get_section_table (struct target_ops *target)
1409 {
1410 return target->get_section_table ();
1411 }
1412
1413 /* Find a section containing ADDR. */
1414
1415 const struct target_section *
1416 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1417 {
1418 const target_section_table *table = target_get_section_table (target);
1419
1420 if (table == NULL)
1421 return NULL;
1422
1423 for (const target_section &secp : *table)
1424 {
1425 if (addr >= secp.addr && addr < secp.endaddr)
1426 return &secp;
1427 }
1428 return NULL;
1429 }
1430
1431 /* See target.h. */
1432
1433 const target_section_table *
1434 default_get_section_table ()
1435 {
1436 return &current_program_space->target_sections ();
1437 }
1438
1439 /* Helper for the memory xfer routines. Checks the attributes of the
1440 memory region of MEMADDR against the read or write being attempted.
1441 If the access is permitted returns true, otherwise returns false.
1442 REGION_P is an optional output parameter. If not-NULL, it is
1443 filled with a pointer to the memory region of MEMADDR. REG_LEN
1444 returns LEN trimmed to the end of the region. This is how much the
1445 caller can continue requesting, if the access is permitted. A
1446 single xfer request must not straddle memory region boundaries. */
1447
1448 static int
1449 memory_xfer_check_region (gdb_byte *readbuf, const gdb_byte *writebuf,
1450 ULONGEST memaddr, ULONGEST len, ULONGEST *reg_len,
1451 struct mem_region **region_p)
1452 {
1453 struct mem_region *region;
1454
1455 region = lookup_mem_region (memaddr);
1456
1457 if (region_p != NULL)
1458 *region_p = region;
1459
1460 switch (region->attrib.mode)
1461 {
1462 case MEM_RO:
1463 if (writebuf != NULL)
1464 return 0;
1465 break;
1466
1467 case MEM_WO:
1468 if (readbuf != NULL)
1469 return 0;
1470 break;
1471
1472 case MEM_FLASH:
1473 /* We only support writing to flash during "load" for now. */
1474 if (writebuf != NULL)
1475 error (_("Writing to flash memory forbidden in this context"));
1476 break;
1477
1478 case MEM_NONE:
1479 return 0;
1480 }
1481
1482 /* region->hi == 0 means there's no upper bound. */
1483 if (memaddr + len < region->hi || region->hi == 0)
1484 *reg_len = len;
1485 else
1486 *reg_len = region->hi - memaddr;
1487
1488 return 1;
1489 }
1490
1491 /* Read memory from more than one valid target. A core file, for
1492 instance, could have some of memory but delegate other bits to
1493 the target below it. So, we must manually try all targets. */
1494
1495 enum target_xfer_status
1496 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1497 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1498 ULONGEST *xfered_len)
1499 {
1500 enum target_xfer_status res;
1501
1502 do
1503 {
1504 res = ops->xfer_partial (TARGET_OBJECT_MEMORY, NULL,
1505 readbuf, writebuf, memaddr, len,
1506 xfered_len);
1507 if (res == TARGET_XFER_OK)
1508 break;
1509
1510 /* Stop if the target reports that the memory is not available. */
1511 if (res == TARGET_XFER_UNAVAILABLE)
1512 break;
1513
1514 /* Don't continue past targets which have all the memory.
1515 At one time, this code was necessary to read data from
1516 executables / shared libraries when data for the requested
1517 addresses weren't available in the core file. But now the
1518 core target handles this case itself. */
1519 if (ops->has_all_memory ())
1520 break;
1521
1522 ops = ops->beneath ();
1523 }
1524 while (ops != NULL);
1525
1526 /* The cache works at the raw memory level. Make sure the cache
1527 gets updated with raw contents no matter what kind of memory
1528 object was originally being written. Note we do write-through
1529 first, so that if it fails, we don't write to the cache contents
1530 that never made it to the target. */
1531 if (writebuf != NULL
1532 && inferior_ptid != null_ptid
1533 && target_dcache_init_p ()
1534 && (stack_cache_enabled_p () || code_cache_enabled_p ()))
1535 {
1536 DCACHE *dcache = target_dcache_get ();
1537
1538 /* Note that writing to an area of memory which wasn't present
1539 in the cache doesn't cause it to be loaded in. */
1540 dcache_update (dcache, res, memaddr, writebuf, *xfered_len);
1541 }
1542
1543 return res;
1544 }
1545
1546 /* Perform a partial memory transfer.
1547 For docs see target.h, to_xfer_partial. */
1548
1549 static enum target_xfer_status
1550 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1551 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1552 ULONGEST len, ULONGEST *xfered_len)
1553 {
1554 enum target_xfer_status res;
1555 ULONGEST reg_len;
1556 struct mem_region *region;
1557 struct inferior *inf;
1558
1559 /* For accesses to unmapped overlay sections, read directly from
1560 files. Must do this first, as MEMADDR may need adjustment. */
1561 if (readbuf != NULL && overlay_debugging)
1562 {
1563 struct obj_section *section = find_pc_overlay (memaddr);
1564
1565 if (pc_in_unmapped_range (memaddr, section))
1566 {
1567 const target_section_table *table = target_get_section_table (ops);
1568 const char *section_name = section->the_bfd_section->name;
1569
1570 memaddr = overlay_mapped_address (memaddr, section);
1571
1572 auto match_cb = [=] (const struct target_section *s)
1573 {
1574 return (strcmp (section_name, s->the_bfd_section->name) == 0);
1575 };
1576
1577 return section_table_xfer_memory_partial (readbuf, writebuf,
1578 memaddr, len, xfered_len,
1579 *table, match_cb);
1580 }
1581 }
1582
1583 /* Try the executable files, if "trust-readonly-sections" is set. */
1584 if (readbuf != NULL && trust_readonly)
1585 {
1586 const struct target_section *secp
1587 = target_section_by_addr (ops, memaddr);
1588 if (secp != NULL
1589 && (bfd_section_flags (secp->the_bfd_section) & SEC_READONLY))
1590 {
1591 const target_section_table *table = target_get_section_table (ops);
1592 return section_table_xfer_memory_partial (readbuf, writebuf,
1593 memaddr, len, xfered_len,
1594 *table);
1595 }
1596 }
1597
1598 /* Try GDB's internal data cache. */
1599
1600 if (!memory_xfer_check_region (readbuf, writebuf, memaddr, len, &reg_len,
1601 &region))
1602 return TARGET_XFER_E_IO;
1603
1604 if (inferior_ptid != null_ptid)
1605 inf = current_inferior ();
1606 else
1607 inf = NULL;
1608
1609 if (inf != NULL
1610 && readbuf != NULL
1611 /* The dcache reads whole cache lines; that doesn't play well
1612 with reading from a trace buffer, because reading outside of
1613 the collected memory range fails. */
1614 && get_traceframe_number () == -1
1615 && (region->attrib.cache
1616 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1617 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1618 {
1619 DCACHE *dcache = target_dcache_get_or_init ();
1620
1621 return dcache_read_memory_partial (ops, dcache, memaddr, readbuf,
1622 reg_len, xfered_len);
1623 }
1624
1625 /* If none of those methods found the memory we wanted, fall back
1626 to a target partial transfer. Normally a single call to
1627 to_xfer_partial is enough; if it doesn't recognize an object
1628 it will call the to_xfer_partial of the next target down.
1629 But for memory this won't do. Memory is the only target
1630 object which can be read from more than one valid target.
1631 A core file, for instance, could have some of memory but
1632 delegate other bits to the target below it. So, we must
1633 manually try all targets. */
1634
1635 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1636 xfered_len);
1637
1638 /* If we still haven't got anything, return the last error. We
1639 give up. */
1640 return res;
1641 }
1642
1643 /* Perform a partial memory transfer. For docs see target.h,
1644 to_xfer_partial. */
1645
1646 static enum target_xfer_status
1647 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1648 gdb_byte *readbuf, const gdb_byte *writebuf,
1649 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1650 {
1651 enum target_xfer_status res;
1652
1653 /* Zero length requests are ok and require no work. */
1654 if (len == 0)
1655 return TARGET_XFER_EOF;
1656
1657 memaddr = address_significant (target_gdbarch (), memaddr);
1658
1659 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1660 breakpoint insns, thus hiding out from higher layers whether
1661 there are software breakpoints inserted in the code stream. */
1662 if (readbuf != NULL)
1663 {
1664 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1665 xfered_len);
1666
1667 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1668 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, *xfered_len);
1669 }
1670 else
1671 {
1672 /* A large write request is likely to be partially satisfied
1673 by memory_xfer_partial_1. We will continually malloc
1674 and free a copy of the entire write request for breakpoint
1675 shadow handling even though we only end up writing a small
1676 subset of it. Cap writes to a limit specified by the target
1677 to mitigate this. */
1678 len = std::min (ops->get_memory_xfer_limit (), len);
1679
1680 gdb::byte_vector buf (writebuf, writebuf + len);
1681 breakpoint_xfer_memory (NULL, buf.data (), writebuf, memaddr, len);
1682 res = memory_xfer_partial_1 (ops, object, NULL, buf.data (), memaddr, len,
1683 xfered_len);
1684 }
1685
1686 return res;
1687 }
1688
1689 scoped_restore_tmpl<int>
1690 make_scoped_restore_show_memory_breakpoints (int show)
1691 {
1692 return make_scoped_restore (&show_memory_breakpoints, show);
1693 }
1694
1695 /* For docs see target.h, to_xfer_partial. */
1696
1697 enum target_xfer_status
1698 target_xfer_partial (struct target_ops *ops,
1699 enum target_object object, const char *annex,
1700 gdb_byte *readbuf, const gdb_byte *writebuf,
1701 ULONGEST offset, ULONGEST len,
1702 ULONGEST *xfered_len)
1703 {
1704 enum target_xfer_status retval;
1705
1706 /* Transfer is done when LEN is zero. */
1707 if (len == 0)
1708 return TARGET_XFER_EOF;
1709
1710 if (writebuf && !may_write_memory)
1711 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1712 core_addr_to_string_nz (offset), plongest (len));
1713
1714 *xfered_len = 0;
1715
1716 /* If this is a memory transfer, let the memory-specific code
1717 have a look at it instead. Memory transfers are more
1718 complicated. */
1719 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1720 || object == TARGET_OBJECT_CODE_MEMORY)
1721 retval = memory_xfer_partial (ops, object, readbuf,
1722 writebuf, offset, len, xfered_len);
1723 else if (object == TARGET_OBJECT_RAW_MEMORY)
1724 {
1725 /* Skip/avoid accessing the target if the memory region
1726 attributes block the access. Check this here instead of in
1727 raw_memory_xfer_partial as otherwise we'd end up checking
1728 this twice in the case of the memory_xfer_partial path is
1729 taken; once before checking the dcache, and another in the
1730 tail call to raw_memory_xfer_partial. */
1731 if (!memory_xfer_check_region (readbuf, writebuf, offset, len, &len,
1732 NULL))
1733 return TARGET_XFER_E_IO;
1734
1735 /* Request the normal memory object from other layers. */
1736 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1737 xfered_len);
1738 }
1739 else
1740 retval = ops->xfer_partial (object, annex, readbuf,
1741 writebuf, offset, len, xfered_len);
1742
1743 if (targetdebug)
1744 {
1745 const unsigned char *myaddr = NULL;
1746
1747 fprintf_unfiltered (gdb_stdlog,
1748 "%s:target_xfer_partial "
1749 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1750 ops->shortname (),
1751 (int) object,
1752 (annex ? annex : "(null)"),
1753 host_address_to_string (readbuf),
1754 host_address_to_string (writebuf),
1755 core_addr_to_string_nz (offset),
1756 pulongest (len), retval,
1757 pulongest (*xfered_len));
1758
1759 if (readbuf)
1760 myaddr = readbuf;
1761 if (writebuf)
1762 myaddr = writebuf;
1763 if (retval == TARGET_XFER_OK && myaddr != NULL)
1764 {
1765 int i;
1766
1767 fputs_unfiltered (", bytes =", gdb_stdlog);
1768 for (i = 0; i < *xfered_len; i++)
1769 {
1770 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1771 {
1772 if (targetdebug < 2 && i > 0)
1773 {
1774 fprintf_unfiltered (gdb_stdlog, " ...");
1775 break;
1776 }
1777 fprintf_unfiltered (gdb_stdlog, "\n");
1778 }
1779
1780 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1781 }
1782 }
1783
1784 fputc_unfiltered ('\n', gdb_stdlog);
1785 }
1786
1787 /* Check implementations of to_xfer_partial update *XFERED_LEN
1788 properly. Do assertion after printing debug messages, so that we
1789 can find more clues on assertion failure from debugging messages. */
1790 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_UNAVAILABLE)
1791 gdb_assert (*xfered_len > 0);
1792
1793 return retval;
1794 }
1795
1796 /* Read LEN bytes of target memory at address MEMADDR, placing the
1797 results in GDB's memory at MYADDR. Returns either 0 for success or
1798 -1 if any error occurs.
1799
1800 If an error occurs, no guarantee is made about the contents of the data at
1801 MYADDR. In particular, the caller should not depend upon partial reads
1802 filling the buffer with good data. There is no way for the caller to know
1803 how much good data might have been transfered anyway. Callers that can
1804 deal with partial reads should call target_read (which will retry until
1805 it makes no progress, and then return how much was transferred). */
1806
1807 int
1808 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1809 {
1810 if (target_read (current_inferior ()->top_target (),
1811 TARGET_OBJECT_MEMORY, NULL,
1812 myaddr, memaddr, len) == len)
1813 return 0;
1814 else
1815 return -1;
1816 }
1817
1818 /* See target/target.h. */
1819
1820 int
1821 target_read_uint32 (CORE_ADDR memaddr, uint32_t *result)
1822 {
1823 gdb_byte buf[4];
1824 int r;
1825
1826 r = target_read_memory (memaddr, buf, sizeof buf);
1827 if (r != 0)
1828 return r;
1829 *result = extract_unsigned_integer (buf, sizeof buf,
1830 gdbarch_byte_order (target_gdbarch ()));
1831 return 0;
1832 }
1833
1834 /* Like target_read_memory, but specify explicitly that this is a read
1835 from the target's raw memory. That is, this read bypasses the
1836 dcache, breakpoint shadowing, etc. */
1837
1838 int
1839 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1840 {
1841 if (target_read (current_inferior ()->top_target (),
1842 TARGET_OBJECT_RAW_MEMORY, NULL,
1843 myaddr, memaddr, len) == len)
1844 return 0;
1845 else
1846 return -1;
1847 }
1848
1849 /* Like target_read_memory, but specify explicitly that this is a read from
1850 the target's stack. This may trigger different cache behavior. */
1851
1852 int
1853 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1854 {
1855 if (target_read (current_inferior ()->top_target (),
1856 TARGET_OBJECT_STACK_MEMORY, NULL,
1857 myaddr, memaddr, len) == len)
1858 return 0;
1859 else
1860 return -1;
1861 }
1862
1863 /* Like target_read_memory, but specify explicitly that this is a read from
1864 the target's code. This may trigger different cache behavior. */
1865
1866 int
1867 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1868 {
1869 if (target_read (current_inferior ()->top_target (),
1870 TARGET_OBJECT_CODE_MEMORY, NULL,
1871 myaddr, memaddr, len) == len)
1872 return 0;
1873 else
1874 return -1;
1875 }
1876
1877 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1878 Returns either 0 for success or -1 if any error occurs. If an
1879 error occurs, no guarantee is made about how much data got written.
1880 Callers that can deal with partial writes should call
1881 target_write. */
1882
1883 int
1884 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1885 {
1886 if (target_write (current_inferior ()->top_target (),
1887 TARGET_OBJECT_MEMORY, NULL,
1888 myaddr, memaddr, len) == len)
1889 return 0;
1890 else
1891 return -1;
1892 }
1893
1894 /* Write LEN bytes from MYADDR to target raw memory at address
1895 MEMADDR. Returns either 0 for success or -1 if any error occurs.
1896 If an error occurs, no guarantee is made about how much data got
1897 written. Callers that can deal with partial writes should call
1898 target_write. */
1899
1900 int
1901 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1902 {
1903 if (target_write (current_inferior ()->top_target (),
1904 TARGET_OBJECT_RAW_MEMORY, NULL,
1905 myaddr, memaddr, len) == len)
1906 return 0;
1907 else
1908 return -1;
1909 }
1910
1911 /* Fetch the target's memory map. */
1912
1913 std::vector<mem_region>
1914 target_memory_map (void)
1915 {
1916 target_ops *target = current_inferior ()->top_target ();
1917 std::vector<mem_region> result = target->memory_map ();
1918 if (result.empty ())
1919 return result;
1920
1921 std::sort (result.begin (), result.end ());
1922
1923 /* Check that regions do not overlap. Simultaneously assign
1924 a numbering for the "mem" commands to use to refer to
1925 each region. */
1926 mem_region *last_one = NULL;
1927 for (size_t ix = 0; ix < result.size (); ix++)
1928 {
1929 mem_region *this_one = &result[ix];
1930 this_one->number = ix;
1931
1932 if (last_one != NULL && last_one->hi > this_one->lo)
1933 {
1934 warning (_("Overlapping regions in memory map: ignoring"));
1935 return std::vector<mem_region> ();
1936 }
1937
1938 last_one = this_one;
1939 }
1940
1941 return result;
1942 }
1943
1944 void
1945 target_flash_erase (ULONGEST address, LONGEST length)
1946 {
1947 current_inferior ()->top_target ()->flash_erase (address, length);
1948 }
1949
1950 void
1951 target_flash_done (void)
1952 {
1953 current_inferior ()->top_target ()->flash_done ();
1954 }
1955
1956 static void
1957 show_trust_readonly (struct ui_file *file, int from_tty,
1958 struct cmd_list_element *c, const char *value)
1959 {
1960 fprintf_filtered (file,
1961 _("Mode for reading from readonly sections is %s.\n"),
1962 value);
1963 }
1964
1965 /* Target vector read/write partial wrapper functions. */
1966
1967 static enum target_xfer_status
1968 target_read_partial (struct target_ops *ops,
1969 enum target_object object,
1970 const char *annex, gdb_byte *buf,
1971 ULONGEST offset, ULONGEST len,
1972 ULONGEST *xfered_len)
1973 {
1974 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1975 xfered_len);
1976 }
1977
1978 static enum target_xfer_status
1979 target_write_partial (struct target_ops *ops,
1980 enum target_object object,
1981 const char *annex, const gdb_byte *buf,
1982 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1983 {
1984 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1985 xfered_len);
1986 }
1987
1988 /* Wrappers to perform the full transfer. */
1989
1990 /* For docs on target_read see target.h. */
1991
1992 LONGEST
1993 target_read (struct target_ops *ops,
1994 enum target_object object,
1995 const char *annex, gdb_byte *buf,
1996 ULONGEST offset, LONGEST len)
1997 {
1998 LONGEST xfered_total = 0;
1999 int unit_size = 1;
2000
2001 /* If we are reading from a memory object, find the length of an addressable
2002 unit for that architecture. */
2003 if (object == TARGET_OBJECT_MEMORY
2004 || object == TARGET_OBJECT_STACK_MEMORY
2005 || object == TARGET_OBJECT_CODE_MEMORY
2006 || object == TARGET_OBJECT_RAW_MEMORY)
2007 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2008
2009 while (xfered_total < len)
2010 {
2011 ULONGEST xfered_partial;
2012 enum target_xfer_status status;
2013
2014 status = target_read_partial (ops, object, annex,
2015 buf + xfered_total * unit_size,
2016 offset + xfered_total, len - xfered_total,
2017 &xfered_partial);
2018
2019 /* Call an observer, notifying them of the xfer progress? */
2020 if (status == TARGET_XFER_EOF)
2021 return xfered_total;
2022 else if (status == TARGET_XFER_OK)
2023 {
2024 xfered_total += xfered_partial;
2025 QUIT;
2026 }
2027 else
2028 return TARGET_XFER_E_IO;
2029
2030 }
2031 return len;
2032 }
2033
2034 /* Assuming that the entire [begin, end) range of memory cannot be
2035 read, try to read whatever subrange is possible to read.
2036
2037 The function returns, in RESULT, either zero or one memory block.
2038 If there's a readable subrange at the beginning, it is completely
2039 read and returned. Any further readable subrange will not be read.
2040 Otherwise, if there's a readable subrange at the end, it will be
2041 completely read and returned. Any readable subranges before it
2042 (obviously, not starting at the beginning), will be ignored. In
2043 other cases -- either no readable subrange, or readable subrange(s)
2044 that is neither at the beginning, or end, nothing is returned.
2045
2046 The purpose of this function is to handle a read across a boundary
2047 of accessible memory in a case when memory map is not available.
2048 The above restrictions are fine for this case, but will give
2049 incorrect results if the memory is 'patchy'. However, supporting
2050 'patchy' memory would require trying to read every single byte,
2051 and it seems unacceptable solution. Explicit memory map is
2052 recommended for this case -- and target_read_memory_robust will
2053 take care of reading multiple ranges then. */
2054
2055 static void
2056 read_whatever_is_readable (struct target_ops *ops,
2057 const ULONGEST begin, const ULONGEST end,
2058 int unit_size,
2059 std::vector<memory_read_result> *result)
2060 {
2061 ULONGEST current_begin = begin;
2062 ULONGEST current_end = end;
2063 int forward;
2064 ULONGEST xfered_len;
2065
2066 /* If we previously failed to read 1 byte, nothing can be done here. */
2067 if (end - begin <= 1)
2068 return;
2069
2070 gdb::unique_xmalloc_ptr<gdb_byte> buf ((gdb_byte *) xmalloc (end - begin));
2071
2072 /* Check that either first or the last byte is readable, and give up
2073 if not. This heuristic is meant to permit reading accessible memory
2074 at the boundary of accessible region. */
2075 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2076 buf.get (), begin, 1, &xfered_len) == TARGET_XFER_OK)
2077 {
2078 forward = 1;
2079 ++current_begin;
2080 }
2081 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2082 buf.get () + (end - begin) - 1, end - 1, 1,
2083 &xfered_len) == TARGET_XFER_OK)
2084 {
2085 forward = 0;
2086 --current_end;
2087 }
2088 else
2089 return;
2090
2091 /* Loop invariant is that the [current_begin, current_end) was previously
2092 found to be not readable as a whole.
2093
2094 Note loop condition -- if the range has 1 byte, we can't divide the range
2095 so there's no point trying further. */
2096 while (current_end - current_begin > 1)
2097 {
2098 ULONGEST first_half_begin, first_half_end;
2099 ULONGEST second_half_begin, second_half_end;
2100 LONGEST xfer;
2101 ULONGEST middle = current_begin + (current_end - current_begin) / 2;
2102
2103 if (forward)
2104 {
2105 first_half_begin = current_begin;
2106 first_half_end = middle;
2107 second_half_begin = middle;
2108 second_half_end = current_end;
2109 }
2110 else
2111 {
2112 first_half_begin = middle;
2113 first_half_end = current_end;
2114 second_half_begin = current_begin;
2115 second_half_end = middle;
2116 }
2117
2118 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2119 buf.get () + (first_half_begin - begin) * unit_size,
2120 first_half_begin,
2121 first_half_end - first_half_begin);
2122
2123 if (xfer == first_half_end - first_half_begin)
2124 {
2125 /* This half reads up fine. So, the error must be in the
2126 other half. */
2127 current_begin = second_half_begin;
2128 current_end = second_half_end;
2129 }
2130 else
2131 {
2132 /* This half is not readable. Because we've tried one byte, we
2133 know some part of this half if actually readable. Go to the next
2134 iteration to divide again and try to read.
2135
2136 We don't handle the other half, because this function only tries
2137 to read a single readable subrange. */
2138 current_begin = first_half_begin;
2139 current_end = first_half_end;
2140 }
2141 }
2142
2143 if (forward)
2144 {
2145 /* The [begin, current_begin) range has been read. */
2146 result->emplace_back (begin, current_end, std::move (buf));
2147 }
2148 else
2149 {
2150 /* The [current_end, end) range has been read. */
2151 LONGEST region_len = end - current_end;
2152
2153 gdb::unique_xmalloc_ptr<gdb_byte> data
2154 ((gdb_byte *) xmalloc (region_len * unit_size));
2155 memcpy (data.get (), buf.get () + (current_end - begin) * unit_size,
2156 region_len * unit_size);
2157 result->emplace_back (current_end, end, std::move (data));
2158 }
2159 }
2160
2161 std::vector<memory_read_result>
2162 read_memory_robust (struct target_ops *ops,
2163 const ULONGEST offset, const LONGEST len)
2164 {
2165 std::vector<memory_read_result> result;
2166 int unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2167
2168 LONGEST xfered_total = 0;
2169 while (xfered_total < len)
2170 {
2171 struct mem_region *region = lookup_mem_region (offset + xfered_total);
2172 LONGEST region_len;
2173
2174 /* If there is no explicit region, a fake one should be created. */
2175 gdb_assert (region);
2176
2177 if (region->hi == 0)
2178 region_len = len - xfered_total;
2179 else
2180 region_len = region->hi - offset;
2181
2182 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2183 {
2184 /* Cannot read this region. Note that we can end up here only
2185 if the region is explicitly marked inaccessible, or
2186 'inaccessible-by-default' is in effect. */
2187 xfered_total += region_len;
2188 }
2189 else
2190 {
2191 LONGEST to_read = std::min (len - xfered_total, region_len);
2192 gdb::unique_xmalloc_ptr<gdb_byte> buffer
2193 ((gdb_byte *) xmalloc (to_read * unit_size));
2194
2195 LONGEST xfered_partial =
2196 target_read (ops, TARGET_OBJECT_MEMORY, NULL, buffer.get (),
2197 offset + xfered_total, to_read);
2198 /* Call an observer, notifying them of the xfer progress? */
2199 if (xfered_partial <= 0)
2200 {
2201 /* Got an error reading full chunk. See if maybe we can read
2202 some subrange. */
2203 read_whatever_is_readable (ops, offset + xfered_total,
2204 offset + xfered_total + to_read,
2205 unit_size, &result);
2206 xfered_total += to_read;
2207 }
2208 else
2209 {
2210 result.emplace_back (offset + xfered_total,
2211 offset + xfered_total + xfered_partial,
2212 std::move (buffer));
2213 xfered_total += xfered_partial;
2214 }
2215 QUIT;
2216 }
2217 }
2218
2219 return result;
2220 }
2221
2222
2223 /* An alternative to target_write with progress callbacks. */
2224
2225 LONGEST
2226 target_write_with_progress (struct target_ops *ops,
2227 enum target_object object,
2228 const char *annex, const gdb_byte *buf,
2229 ULONGEST offset, LONGEST len,
2230 void (*progress) (ULONGEST, void *), void *baton)
2231 {
2232 LONGEST xfered_total = 0;
2233 int unit_size = 1;
2234
2235 /* If we are writing to a memory object, find the length of an addressable
2236 unit for that architecture. */
2237 if (object == TARGET_OBJECT_MEMORY
2238 || object == TARGET_OBJECT_STACK_MEMORY
2239 || object == TARGET_OBJECT_CODE_MEMORY
2240 || object == TARGET_OBJECT_RAW_MEMORY)
2241 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2242
2243 /* Give the progress callback a chance to set up. */
2244 if (progress)
2245 (*progress) (0, baton);
2246
2247 while (xfered_total < len)
2248 {
2249 ULONGEST xfered_partial;
2250 enum target_xfer_status status;
2251
2252 status = target_write_partial (ops, object, annex,
2253 buf + xfered_total * unit_size,
2254 offset + xfered_total, len - xfered_total,
2255 &xfered_partial);
2256
2257 if (status != TARGET_XFER_OK)
2258 return status == TARGET_XFER_EOF ? xfered_total : TARGET_XFER_E_IO;
2259
2260 if (progress)
2261 (*progress) (xfered_partial, baton);
2262
2263 xfered_total += xfered_partial;
2264 QUIT;
2265 }
2266 return len;
2267 }
2268
2269 /* For docs on target_write see target.h. */
2270
2271 LONGEST
2272 target_write (struct target_ops *ops,
2273 enum target_object object,
2274 const char *annex, const gdb_byte *buf,
2275 ULONGEST offset, LONGEST len)
2276 {
2277 return target_write_with_progress (ops, object, annex, buf, offset, len,
2278 NULL, NULL);
2279 }
2280
2281 /* Help for target_read_alloc and target_read_stralloc. See their comments
2282 for details. */
2283
2284 template <typename T>
2285 gdb::optional<gdb::def_vector<T>>
2286 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2287 const char *annex)
2288 {
2289 gdb::def_vector<T> buf;
2290 size_t buf_pos = 0;
2291 const int chunk = 4096;
2292
2293 /* This function does not have a length parameter; it reads the
2294 entire OBJECT). Also, it doesn't support objects fetched partly
2295 from one target and partly from another (in a different stratum,
2296 e.g. a core file and an executable). Both reasons make it
2297 unsuitable for reading memory. */
2298 gdb_assert (object != TARGET_OBJECT_MEMORY);
2299
2300 /* Start by reading up to 4K at a time. The target will throttle
2301 this number down if necessary. */
2302 while (1)
2303 {
2304 ULONGEST xfered_len;
2305 enum target_xfer_status status;
2306
2307 buf.resize (buf_pos + chunk);
2308
2309 status = target_read_partial (ops, object, annex,
2310 (gdb_byte *) &buf[buf_pos],
2311 buf_pos, chunk,
2312 &xfered_len);
2313
2314 if (status == TARGET_XFER_EOF)
2315 {
2316 /* Read all there was. */
2317 buf.resize (buf_pos);
2318 return buf;
2319 }
2320 else if (status != TARGET_XFER_OK)
2321 {
2322 /* An error occurred. */
2323 return {};
2324 }
2325
2326 buf_pos += xfered_len;
2327
2328 QUIT;
2329 }
2330 }
2331
2332 /* See target.h */
2333
2334 gdb::optional<gdb::byte_vector>
2335 target_read_alloc (struct target_ops *ops, enum target_object object,
2336 const char *annex)
2337 {
2338 return target_read_alloc_1<gdb_byte> (ops, object, annex);
2339 }
2340
2341 /* See target.h. */
2342
2343 gdb::optional<gdb::char_vector>
2344 target_read_stralloc (struct target_ops *ops, enum target_object object,
2345 const char *annex)
2346 {
2347 gdb::optional<gdb::char_vector> buf
2348 = target_read_alloc_1<char> (ops, object, annex);
2349
2350 if (!buf)
2351 return {};
2352
2353 if (buf->empty () || buf->back () != '\0')
2354 buf->push_back ('\0');
2355
2356 /* Check for embedded NUL bytes; but allow trailing NULs. */
2357 for (auto it = std::find (buf->begin (), buf->end (), '\0');
2358 it != buf->end (); it++)
2359 if (*it != '\0')
2360 {
2361 warning (_("target object %d, annex %s, "
2362 "contained unexpected null characters"),
2363 (int) object, annex ? annex : "(none)");
2364 break;
2365 }
2366
2367 return buf;
2368 }
2369
2370 /* Memory transfer methods. */
2371
2372 void
2373 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2374 LONGEST len)
2375 {
2376 /* This method is used to read from an alternate, non-current
2377 target. This read must bypass the overlay support (as symbols
2378 don't match this target), and GDB's internal cache (wrong cache
2379 for this target). */
2380 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2381 != len)
2382 memory_error (TARGET_XFER_E_IO, addr);
2383 }
2384
2385 ULONGEST
2386 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2387 int len, enum bfd_endian byte_order)
2388 {
2389 gdb_byte buf[sizeof (ULONGEST)];
2390
2391 gdb_assert (len <= sizeof (buf));
2392 get_target_memory (ops, addr, buf, len);
2393 return extract_unsigned_integer (buf, len, byte_order);
2394 }
2395
2396 /* See target.h. */
2397
2398 int
2399 target_insert_breakpoint (struct gdbarch *gdbarch,
2400 struct bp_target_info *bp_tgt)
2401 {
2402 if (!may_insert_breakpoints)
2403 {
2404 warning (_("May not insert breakpoints"));
2405 return 1;
2406 }
2407
2408 target_ops *target = current_inferior ()->top_target ();
2409
2410 return target->insert_breakpoint (gdbarch, bp_tgt);
2411 }
2412
2413 /* See target.h. */
2414
2415 int
2416 target_remove_breakpoint (struct gdbarch *gdbarch,
2417 struct bp_target_info *bp_tgt,
2418 enum remove_bp_reason reason)
2419 {
2420 /* This is kind of a weird case to handle, but the permission might
2421 have been changed after breakpoints were inserted - in which case
2422 we should just take the user literally and assume that any
2423 breakpoints should be left in place. */
2424 if (!may_insert_breakpoints)
2425 {
2426 warning (_("May not remove breakpoints"));
2427 return 1;
2428 }
2429
2430 target_ops *target = current_inferior ()->top_target ();
2431
2432 return target->remove_breakpoint (gdbarch, bp_tgt, reason);
2433 }
2434
2435 static void
2436 info_target_command (const char *args, int from_tty)
2437 {
2438 int has_all_mem = 0;
2439
2440 if (current_program_space->symfile_object_file != NULL)
2441 {
2442 objfile *objf = current_program_space->symfile_object_file;
2443 printf_unfiltered (_("Symbols from \"%s\".\n"),
2444 objfile_name (objf));
2445 }
2446
2447 for (target_ops *t = current_inferior ()->top_target ();
2448 t != NULL;
2449 t = t->beneath ())
2450 {
2451 if (!t->has_memory ())
2452 continue;
2453
2454 if ((int) (t->stratum ()) <= (int) dummy_stratum)
2455 continue;
2456 if (has_all_mem)
2457 printf_unfiltered (_("\tWhile running this, "
2458 "GDB does not access memory from...\n"));
2459 printf_unfiltered ("%s:\n", t->longname ());
2460 t->files_info ();
2461 has_all_mem = t->has_all_memory ();
2462 }
2463 }
2464
2465 /* This function is called before any new inferior is created, e.g.
2466 by running a program, attaching, or connecting to a target.
2467 It cleans up any state from previous invocations which might
2468 change between runs. This is a subset of what target_preopen
2469 resets (things which might change between targets). */
2470
2471 void
2472 target_pre_inferior (int from_tty)
2473 {
2474 /* Clear out solib state. Otherwise the solib state of the previous
2475 inferior might have survived and is entirely wrong for the new
2476 target. This has been observed on GNU/Linux using glibc 2.3. How
2477 to reproduce:
2478
2479 bash$ ./foo&
2480 [1] 4711
2481 bash$ ./foo&
2482 [1] 4712
2483 bash$ gdb ./foo
2484 [...]
2485 (gdb) attach 4711
2486 (gdb) detach
2487 (gdb) attach 4712
2488 Cannot access memory at address 0xdeadbeef
2489 */
2490
2491 /* In some OSs, the shared library list is the same/global/shared
2492 across inferiors. If code is shared between processes, so are
2493 memory regions and features. */
2494 if (!gdbarch_has_global_solist (target_gdbarch ()))
2495 {
2496 no_shared_libraries (NULL, from_tty);
2497
2498 invalidate_target_mem_regions ();
2499
2500 target_clear_description ();
2501 }
2502
2503 /* attach_flag may be set if the previous process associated with
2504 the inferior was attached to. */
2505 current_inferior ()->attach_flag = 0;
2506
2507 current_inferior ()->highest_thread_num = 0;
2508
2509 agent_capability_invalidate ();
2510 }
2511
2512 /* This is to be called by the open routine before it does
2513 anything. */
2514
2515 void
2516 target_preopen (int from_tty)
2517 {
2518 dont_repeat ();
2519
2520 if (current_inferior ()->pid != 0)
2521 {
2522 if (!from_tty
2523 || !target_has_execution ()
2524 || query (_("A program is being debugged already. Kill it? ")))
2525 {
2526 /* Core inferiors actually should be detached, not
2527 killed. */
2528 if (target_has_execution ())
2529 target_kill ();
2530 else
2531 target_detach (current_inferior (), 0);
2532 }
2533 else
2534 error (_("Program not killed."));
2535 }
2536
2537 /* Calling target_kill may remove the target from the stack. But if
2538 it doesn't (which seems like a win for UDI), remove it now. */
2539 /* Leave the exec target, though. The user may be switching from a
2540 live process to a core of the same program. */
2541 pop_all_targets_above (file_stratum);
2542
2543 target_pre_inferior (from_tty);
2544 }
2545
2546 /* See target.h. */
2547
2548 void
2549 target_detach (inferior *inf, int from_tty)
2550 {
2551 /* After we have detached, we will clear the register cache for this inferior
2552 by calling registers_changed_ptid. We must save the pid_ptid before
2553 detaching, as the target detach method will clear inf->pid. */
2554 ptid_t save_pid_ptid = ptid_t (inf->pid);
2555
2556 /* As long as some to_detach implementations rely on the current_inferior
2557 (either directly, or indirectly, like through target_gdbarch or by
2558 reading memory), INF needs to be the current inferior. When that
2559 requirement will become no longer true, then we can remove this
2560 assertion. */
2561 gdb_assert (inf == current_inferior ());
2562
2563 prepare_for_detach ();
2564
2565 /* Hold a strong reference because detaching may unpush the
2566 target. */
2567 auto proc_target_ref = target_ops_ref::new_reference (inf->process_target ());
2568
2569 current_inferior ()->top_target ()->detach (inf, from_tty);
2570
2571 process_stratum_target *proc_target
2572 = as_process_stratum_target (proc_target_ref.get ());
2573
2574 registers_changed_ptid (proc_target, save_pid_ptid);
2575
2576 /* We have to ensure we have no frame cache left. Normally,
2577 registers_changed_ptid (save_pid_ptid) calls reinit_frame_cache when
2578 inferior_ptid matches save_pid_ptid, but in our case, it does not
2579 call it, as inferior_ptid has been reset. */
2580 reinit_frame_cache ();
2581 }
2582
2583 void
2584 target_disconnect (const char *args, int from_tty)
2585 {
2586 /* If we're in breakpoints-always-inserted mode or if breakpoints
2587 are global across processes, we have to remove them before
2588 disconnecting. */
2589 remove_breakpoints ();
2590
2591 current_inferior ()->top_target ()->disconnect (args, from_tty);
2592 }
2593
2594 /* See target/target.h. */
2595
2596 ptid_t
2597 target_wait (ptid_t ptid, struct target_waitstatus *status,
2598 target_wait_flags options)
2599 {
2600 target_ops *target = current_inferior ()->top_target ();
2601 process_stratum_target *proc_target = current_inferior ()->process_target ();
2602
2603 gdb_assert (!proc_target->commit_resumed_state);
2604
2605 if (!target->can_async_p ())
2606 gdb_assert ((options & TARGET_WNOHANG) == 0);
2607
2608 return target->wait (ptid, status, options);
2609 }
2610
2611 /* See target.h. */
2612
2613 ptid_t
2614 default_target_wait (struct target_ops *ops,
2615 ptid_t ptid, struct target_waitstatus *status,
2616 target_wait_flags options)
2617 {
2618 status->kind = TARGET_WAITKIND_IGNORE;
2619 return minus_one_ptid;
2620 }
2621
2622 std::string
2623 target_pid_to_str (ptid_t ptid)
2624 {
2625 return current_inferior ()->top_target ()->pid_to_str (ptid);
2626 }
2627
2628 const char *
2629 target_thread_name (struct thread_info *info)
2630 {
2631 gdb_assert (info->inf == current_inferior ());
2632
2633 return current_inferior ()->top_target ()->thread_name (info);
2634 }
2635
2636 struct thread_info *
2637 target_thread_handle_to_thread_info (const gdb_byte *thread_handle,
2638 int handle_len,
2639 struct inferior *inf)
2640 {
2641 target_ops *target = current_inferior ()->top_target ();
2642
2643 return target->thread_handle_to_thread_info (thread_handle, handle_len, inf);
2644 }
2645
2646 /* See target.h. */
2647
2648 gdb::byte_vector
2649 target_thread_info_to_thread_handle (struct thread_info *tip)
2650 {
2651 target_ops *target = current_inferior ()->top_target ();
2652
2653 return target->thread_info_to_thread_handle (tip);
2654 }
2655
2656 void
2657 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2658 {
2659 process_stratum_target *curr_target = current_inferior ()->process_target ();
2660 gdb_assert (!curr_target->commit_resumed_state);
2661
2662 target_dcache_invalidate ();
2663
2664 current_inferior ()->top_target ()->resume (ptid, step, signal);
2665
2666 registers_changed_ptid (curr_target, ptid);
2667 /* We only set the internal executing state here. The user/frontend
2668 running state is set at a higher level. This also clears the
2669 thread's stop_pc as side effect. */
2670 set_executing (curr_target, ptid, true);
2671 clear_inline_frame_state (curr_target, ptid);
2672 }
2673
2674 /* See target.h. */
2675
2676 void
2677 target_commit_resumed ()
2678 {
2679 gdb_assert (current_inferior ()->process_target ()->commit_resumed_state);
2680 current_inferior ()->top_target ()->commit_resumed ();
2681 }
2682
2683 /* See target.h. */
2684
2685 bool
2686 target_has_pending_events ()
2687 {
2688 return current_inferior ()->top_target ()->has_pending_events ();
2689 }
2690
2691 void
2692 target_pass_signals (gdb::array_view<const unsigned char> pass_signals)
2693 {
2694 current_inferior ()->top_target ()->pass_signals (pass_signals);
2695 }
2696
2697 void
2698 target_program_signals (gdb::array_view<const unsigned char> program_signals)
2699 {
2700 current_inferior ()->top_target ()->program_signals (program_signals);
2701 }
2702
2703 static void
2704 default_follow_fork (struct target_ops *self, ptid_t child_ptid,
2705 target_waitkind fork_kind, bool follow_child,
2706 bool detach_fork)
2707 {
2708 /* Some target returned a fork event, but did not know how to follow it. */
2709 internal_error (__FILE__, __LINE__,
2710 _("could not find a target to follow fork"));
2711 }
2712
2713 /* See target.h. */
2714
2715 void
2716 target_follow_fork (ptid_t child_ptid, target_waitkind fork_kind,
2717 bool follow_child, bool detach_fork)
2718 {
2719 target_ops *target = current_inferior ()->top_target ();
2720
2721 return target->follow_fork (child_ptid, fork_kind, follow_child, detach_fork);
2722 }
2723
2724 /* See target.h. */
2725
2726 void
2727 target_follow_exec (inferior *follow_inf, ptid_t ptid,
2728 const char *execd_pathname)
2729 {
2730 current_inferior ()->top_target ()->follow_exec (follow_inf, ptid,
2731 execd_pathname);
2732 }
2733
2734 static void
2735 default_mourn_inferior (struct target_ops *self)
2736 {
2737 internal_error (__FILE__, __LINE__,
2738 _("could not find a target to follow mourn inferior"));
2739 }
2740
2741 void
2742 target_mourn_inferior (ptid_t ptid)
2743 {
2744 gdb_assert (ptid.pid () == inferior_ptid.pid ());
2745 current_inferior ()->top_target ()->mourn_inferior ();
2746
2747 /* We no longer need to keep handles on any of the object files.
2748 Make sure to release them to avoid unnecessarily locking any
2749 of them while we're not actually debugging. */
2750 bfd_cache_close_all ();
2751 }
2752
2753 /* Look for a target which can describe architectural features, starting
2754 from TARGET. If we find one, return its description. */
2755
2756 const struct target_desc *
2757 target_read_description (struct target_ops *target)
2758 {
2759 return target->read_description ();
2760 }
2761
2762
2763 /* Default implementation of memory-searching. */
2764
2765 static int
2766 default_search_memory (struct target_ops *self,
2767 CORE_ADDR start_addr, ULONGEST search_space_len,
2768 const gdb_byte *pattern, ULONGEST pattern_len,
2769 CORE_ADDR *found_addrp)
2770 {
2771 auto read_memory = [=] (CORE_ADDR addr, gdb_byte *result, size_t len)
2772 {
2773 return target_read (current_inferior ()->top_target (),
2774 TARGET_OBJECT_MEMORY, NULL,
2775 result, addr, len) == len;
2776 };
2777
2778 /* Start over from the top of the target stack. */
2779 return simple_search_memory (read_memory, start_addr, search_space_len,
2780 pattern, pattern_len, found_addrp);
2781 }
2782
2783 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2784 sequence of bytes in PATTERN with length PATTERN_LEN.
2785
2786 The result is 1 if found, 0 if not found, and -1 if there was an error
2787 requiring halting of the search (e.g. memory read error).
2788 If the pattern is found the address is recorded in FOUND_ADDRP. */
2789
2790 int
2791 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2792 const gdb_byte *pattern, ULONGEST pattern_len,
2793 CORE_ADDR *found_addrp)
2794 {
2795 target_ops *target = current_inferior ()->top_target ();
2796
2797 return target->search_memory (start_addr, search_space_len, pattern,
2798 pattern_len, found_addrp);
2799 }
2800
2801 /* Look through the currently pushed targets. If none of them will
2802 be able to restart the currently running process, issue an error
2803 message. */
2804
2805 void
2806 target_require_runnable (void)
2807 {
2808 for (target_ops *t = current_inferior ()->top_target ();
2809 t != NULL;
2810 t = t->beneath ())
2811 {
2812 /* If this target knows how to create a new program, then
2813 assume we will still be able to after killing the current
2814 one. Either killing and mourning will not pop T, or else
2815 find_default_run_target will find it again. */
2816 if (t->can_create_inferior ())
2817 return;
2818
2819 /* Do not worry about targets at certain strata that can not
2820 create inferiors. Assume they will be pushed again if
2821 necessary, and continue to the process_stratum. */
2822 if (t->stratum () > process_stratum)
2823 continue;
2824
2825 error (_("The \"%s\" target does not support \"run\". "
2826 "Try \"help target\" or \"continue\"."),
2827 t->shortname ());
2828 }
2829
2830 /* This function is only called if the target is running. In that
2831 case there should have been a process_stratum target and it
2832 should either know how to create inferiors, or not... */
2833 internal_error (__FILE__, __LINE__, _("No targets found"));
2834 }
2835
2836 /* Whether GDB is allowed to fall back to the default run target for
2837 "run", "attach", etc. when no target is connected yet. */
2838 static bool auto_connect_native_target = true;
2839
2840 static void
2841 show_auto_connect_native_target (struct ui_file *file, int from_tty,
2842 struct cmd_list_element *c, const char *value)
2843 {
2844 fprintf_filtered (file,
2845 _("Whether GDB may automatically connect to the "
2846 "native target is %s.\n"),
2847 value);
2848 }
2849
2850 /* A pointer to the target that can respond to "run" or "attach".
2851 Native targets are always singletons and instantiated early at GDB
2852 startup. */
2853 static target_ops *the_native_target;
2854
2855 /* See target.h. */
2856
2857 void
2858 set_native_target (target_ops *target)
2859 {
2860 if (the_native_target != NULL)
2861 internal_error (__FILE__, __LINE__,
2862 _("native target already set (\"%s\")."),
2863 the_native_target->longname ());
2864
2865 the_native_target = target;
2866 }
2867
2868 /* See target.h. */
2869
2870 target_ops *
2871 get_native_target ()
2872 {
2873 return the_native_target;
2874 }
2875
2876 /* Look through the list of possible targets for a target that can
2877 execute a run or attach command without any other data. This is
2878 used to locate the default process stratum.
2879
2880 If DO_MESG is not NULL, the result is always valid (error() is
2881 called for errors); else, return NULL on error. */
2882
2883 static struct target_ops *
2884 find_default_run_target (const char *do_mesg)
2885 {
2886 if (auto_connect_native_target && the_native_target != NULL)
2887 return the_native_target;
2888
2889 if (do_mesg != NULL)
2890 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2891 return NULL;
2892 }
2893
2894 /* See target.h. */
2895
2896 struct target_ops *
2897 find_attach_target (void)
2898 {
2899 /* If a target on the current stack can attach, use it. */
2900 for (target_ops *t = current_inferior ()->top_target ();
2901 t != NULL;
2902 t = t->beneath ())
2903 {
2904 if (t->can_attach ())
2905 return t;
2906 }
2907
2908 /* Otherwise, use the default run target for attaching. */
2909 return find_default_run_target ("attach");
2910 }
2911
2912 /* See target.h. */
2913
2914 struct target_ops *
2915 find_run_target (void)
2916 {
2917 /* If a target on the current stack can run, use it. */
2918 for (target_ops *t = current_inferior ()->top_target ();
2919 t != NULL;
2920 t = t->beneath ())
2921 {
2922 if (t->can_create_inferior ())
2923 return t;
2924 }
2925
2926 /* Otherwise, use the default run target. */
2927 return find_default_run_target ("run");
2928 }
2929
2930 bool
2931 target_ops::info_proc (const char *args, enum info_proc_what what)
2932 {
2933 return false;
2934 }
2935
2936 /* Implement the "info proc" command. */
2937
2938 int
2939 target_info_proc (const char *args, enum info_proc_what what)
2940 {
2941 struct target_ops *t;
2942
2943 /* If we're already connected to something that can get us OS
2944 related data, use it. Otherwise, try using the native
2945 target. */
2946 t = find_target_at (process_stratum);
2947 if (t == NULL)
2948 t = find_default_run_target (NULL);
2949
2950 for (; t != NULL; t = t->beneath ())
2951 {
2952 if (t->info_proc (args, what))
2953 {
2954 if (targetdebug)
2955 fprintf_unfiltered (gdb_stdlog,
2956 "target_info_proc (\"%s\", %d)\n", args, what);
2957
2958 return 1;
2959 }
2960 }
2961
2962 return 0;
2963 }
2964
2965 static int
2966 find_default_supports_disable_randomization (struct target_ops *self)
2967 {
2968 struct target_ops *t;
2969
2970 t = find_default_run_target (NULL);
2971 if (t != NULL)
2972 return t->supports_disable_randomization ();
2973 return 0;
2974 }
2975
2976 int
2977 target_supports_disable_randomization (void)
2978 {
2979 return current_inferior ()->top_target ()->supports_disable_randomization ();
2980 }
2981
2982 /* See target/target.h. */
2983
2984 int
2985 target_supports_multi_process (void)
2986 {
2987 return current_inferior ()->top_target ()->supports_multi_process ();
2988 }
2989
2990 /* See target.h. */
2991
2992 gdb::optional<gdb::char_vector>
2993 target_get_osdata (const char *type)
2994 {
2995 struct target_ops *t;
2996
2997 /* If we're already connected to something that can get us OS
2998 related data, use it. Otherwise, try using the native
2999 target. */
3000 t = find_target_at (process_stratum);
3001 if (t == NULL)
3002 t = find_default_run_target ("get OS data");
3003
3004 if (!t)
3005 return {};
3006
3007 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3008 }
3009
3010 /* Determine the current address space of thread PTID. */
3011
3012 struct address_space *
3013 target_thread_address_space (ptid_t ptid)
3014 {
3015 struct address_space *aspace;
3016
3017 aspace = current_inferior ()->top_target ()->thread_address_space (ptid);
3018 gdb_assert (aspace != NULL);
3019
3020 return aspace;
3021 }
3022
3023 /* See target.h. */
3024
3025 target_ops *
3026 target_ops::beneath () const
3027 {
3028 return current_inferior ()->find_target_beneath (this);
3029 }
3030
3031 void
3032 target_ops::close ()
3033 {
3034 }
3035
3036 bool
3037 target_ops::can_attach ()
3038 {
3039 return 0;
3040 }
3041
3042 void
3043 target_ops::attach (const char *, int)
3044 {
3045 gdb_assert_not_reached ("target_ops::attach called");
3046 }
3047
3048 bool
3049 target_ops::can_create_inferior ()
3050 {
3051 return 0;
3052 }
3053
3054 void
3055 target_ops::create_inferior (const char *, const std::string &,
3056 char **, int)
3057 {
3058 gdb_assert_not_reached ("target_ops::create_inferior called");
3059 }
3060
3061 bool
3062 target_ops::can_run ()
3063 {
3064 return false;
3065 }
3066
3067 int
3068 target_can_run ()
3069 {
3070 for (target_ops *t = current_inferior ()->top_target ();
3071 t != NULL;
3072 t = t->beneath ())
3073 {
3074 if (t->can_run ())
3075 return 1;
3076 }
3077
3078 return 0;
3079 }
3080
3081 /* Target file operations. */
3082
3083 static struct target_ops *
3084 default_fileio_target (void)
3085 {
3086 struct target_ops *t;
3087
3088 /* If we're already connected to something that can perform
3089 file I/O, use it. Otherwise, try using the native target. */
3090 t = find_target_at (process_stratum);
3091 if (t != NULL)
3092 return t;
3093 return find_default_run_target ("file I/O");
3094 }
3095
3096 /* File handle for target file operations. */
3097
3098 struct fileio_fh_t
3099 {
3100 /* The target on which this file is open. NULL if the target is
3101 meanwhile closed while the handle is open. */
3102 target_ops *target;
3103
3104 /* The file descriptor on the target. */
3105 int target_fd;
3106
3107 /* Check whether this fileio_fh_t represents a closed file. */
3108 bool is_closed ()
3109 {
3110 return target_fd < 0;
3111 }
3112 };
3113
3114 /* Vector of currently open file handles. The value returned by
3115 target_fileio_open and passed as the FD argument to other
3116 target_fileio_* functions is an index into this vector. This
3117 vector's entries are never freed; instead, files are marked as
3118 closed, and the handle becomes available for reuse. */
3119 static std::vector<fileio_fh_t> fileio_fhandles;
3120
3121 /* Index into fileio_fhandles of the lowest handle that might be
3122 closed. This permits handle reuse without searching the whole
3123 list each time a new file is opened. */
3124 static int lowest_closed_fd;
3125
3126 /* See target.h. */
3127
3128 void
3129 fileio_handles_invalidate_target (target_ops *targ)
3130 {
3131 for (fileio_fh_t &fh : fileio_fhandles)
3132 if (fh.target == targ)
3133 fh.target = NULL;
3134 }
3135
3136 /* Acquire a target fileio file descriptor. */
3137
3138 static int
3139 acquire_fileio_fd (target_ops *target, int target_fd)
3140 {
3141 /* Search for closed handles to reuse. */
3142 for (; lowest_closed_fd < fileio_fhandles.size (); lowest_closed_fd++)
3143 {
3144 fileio_fh_t &fh = fileio_fhandles[lowest_closed_fd];
3145
3146 if (fh.is_closed ())
3147 break;
3148 }
3149
3150 /* Push a new handle if no closed handles were found. */
3151 if (lowest_closed_fd == fileio_fhandles.size ())
3152 fileio_fhandles.push_back (fileio_fh_t {target, target_fd});
3153 else
3154 fileio_fhandles[lowest_closed_fd] = {target, target_fd};
3155
3156 /* Should no longer be marked closed. */
3157 gdb_assert (!fileio_fhandles[lowest_closed_fd].is_closed ());
3158
3159 /* Return its index, and start the next lookup at
3160 the next index. */
3161 return lowest_closed_fd++;
3162 }
3163
3164 /* Release a target fileio file descriptor. */
3165
3166 static void
3167 release_fileio_fd (int fd, fileio_fh_t *fh)
3168 {
3169 fh->target_fd = -1;
3170 lowest_closed_fd = std::min (lowest_closed_fd, fd);
3171 }
3172
3173 /* Return a pointer to the fileio_fhandle_t corresponding to FD. */
3174
3175 static fileio_fh_t *
3176 fileio_fd_to_fh (int fd)
3177 {
3178 return &fileio_fhandles[fd];
3179 }
3180
3181
3182 /* Default implementations of file i/o methods. We don't want these
3183 to delegate automatically, because we need to know which target
3184 supported the method, in order to call it directly from within
3185 pread/pwrite, etc. */
3186
3187 int
3188 target_ops::fileio_open (struct inferior *inf, const char *filename,
3189 int flags, int mode, int warn_if_slow,
3190 int *target_errno)
3191 {
3192 *target_errno = FILEIO_ENOSYS;
3193 return -1;
3194 }
3195
3196 int
3197 target_ops::fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3198 ULONGEST offset, int *target_errno)
3199 {
3200 *target_errno = FILEIO_ENOSYS;
3201 return -1;
3202 }
3203
3204 int
3205 target_ops::fileio_pread (int fd, gdb_byte *read_buf, int len,
3206 ULONGEST offset, int *target_errno)
3207 {
3208 *target_errno = FILEIO_ENOSYS;
3209 return -1;
3210 }
3211
3212 int
3213 target_ops::fileio_fstat (int fd, struct stat *sb, int *target_errno)
3214 {
3215 *target_errno = FILEIO_ENOSYS;
3216 return -1;
3217 }
3218
3219 int
3220 target_ops::fileio_close (int fd, int *target_errno)
3221 {
3222 *target_errno = FILEIO_ENOSYS;
3223 return -1;
3224 }
3225
3226 int
3227 target_ops::fileio_unlink (struct inferior *inf, const char *filename,
3228 int *target_errno)
3229 {
3230 *target_errno = FILEIO_ENOSYS;
3231 return -1;
3232 }
3233
3234 gdb::optional<std::string>
3235 target_ops::fileio_readlink (struct inferior *inf, const char *filename,
3236 int *target_errno)
3237 {
3238 *target_errno = FILEIO_ENOSYS;
3239 return {};
3240 }
3241
3242 /* See target.h. */
3243
3244 int
3245 target_fileio_open (struct inferior *inf, const char *filename,
3246 int flags, int mode, bool warn_if_slow, int *target_errno)
3247 {
3248 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3249 {
3250 int fd = t->fileio_open (inf, filename, flags, mode,
3251 warn_if_slow, target_errno);
3252
3253 if (fd == -1 && *target_errno == FILEIO_ENOSYS)
3254 continue;
3255
3256 if (fd < 0)
3257 fd = -1;
3258 else
3259 fd = acquire_fileio_fd (t, fd);
3260
3261 if (targetdebug)
3262 fprintf_unfiltered (gdb_stdlog,
3263 "target_fileio_open (%d,%s,0x%x,0%o,%d)"
3264 " = %d (%d)\n",
3265 inf == NULL ? 0 : inf->num,
3266 filename, flags, mode,
3267 warn_if_slow, fd,
3268 fd != -1 ? 0 : *target_errno);
3269 return fd;
3270 }
3271
3272 *target_errno = FILEIO_ENOSYS;
3273 return -1;
3274 }
3275
3276 /* See target.h. */
3277
3278 int
3279 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3280 ULONGEST offset, int *target_errno)
3281 {
3282 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3283 int ret = -1;
3284
3285 if (fh->is_closed ())
3286 *target_errno = EBADF;
3287 else if (fh->target == NULL)
3288 *target_errno = EIO;
3289 else
3290 ret = fh->target->fileio_pwrite (fh->target_fd, write_buf,
3291 len, offset, target_errno);
3292
3293 if (targetdebug)
3294 fprintf_unfiltered (gdb_stdlog,
3295 "target_fileio_pwrite (%d,...,%d,%s) "
3296 "= %d (%d)\n",
3297 fd, len, pulongest (offset),
3298 ret, ret != -1 ? 0 : *target_errno);
3299 return ret;
3300 }
3301
3302 /* See target.h. */
3303
3304 int
3305 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3306 ULONGEST offset, int *target_errno)
3307 {
3308 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3309 int ret = -1;
3310
3311 if (fh->is_closed ())
3312 *target_errno = EBADF;
3313 else if (fh->target == NULL)
3314 *target_errno = EIO;
3315 else
3316 ret = fh->target->fileio_pread (fh->target_fd, read_buf,
3317 len, offset, target_errno);
3318
3319 if (targetdebug)
3320 fprintf_unfiltered (gdb_stdlog,
3321 "target_fileio_pread (%d,...,%d,%s) "
3322 "= %d (%d)\n",
3323 fd, len, pulongest (offset),
3324 ret, ret != -1 ? 0 : *target_errno);
3325 return ret;
3326 }
3327
3328 /* See target.h. */
3329
3330 int
3331 target_fileio_fstat (int fd, struct stat *sb, int *target_errno)
3332 {
3333 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3334 int ret = -1;
3335
3336 if (fh->is_closed ())
3337 *target_errno = EBADF;
3338 else if (fh->target == NULL)
3339 *target_errno = EIO;
3340 else
3341 ret = fh->target->fileio_fstat (fh->target_fd, sb, target_errno);
3342
3343 if (targetdebug)
3344 fprintf_unfiltered (gdb_stdlog,
3345 "target_fileio_fstat (%d) = %d (%d)\n",
3346 fd, ret, ret != -1 ? 0 : *target_errno);
3347 return ret;
3348 }
3349
3350 /* See target.h. */
3351
3352 int
3353 target_fileio_close (int fd, int *target_errno)
3354 {
3355 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3356 int ret = -1;
3357
3358 if (fh->is_closed ())
3359 *target_errno = EBADF;
3360 else
3361 {
3362 if (fh->target != NULL)
3363 ret = fh->target->fileio_close (fh->target_fd,
3364 target_errno);
3365 else
3366 ret = 0;
3367 release_fileio_fd (fd, fh);
3368 }
3369
3370 if (targetdebug)
3371 fprintf_unfiltered (gdb_stdlog,
3372 "target_fileio_close (%d) = %d (%d)\n",
3373 fd, ret, ret != -1 ? 0 : *target_errno);
3374 return ret;
3375 }
3376
3377 /* See target.h. */
3378
3379 int
3380 target_fileio_unlink (struct inferior *inf, const char *filename,
3381 int *target_errno)
3382 {
3383 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3384 {
3385 int ret = t->fileio_unlink (inf, filename, target_errno);
3386
3387 if (ret == -1 && *target_errno == FILEIO_ENOSYS)
3388 continue;
3389
3390 if (targetdebug)
3391 fprintf_unfiltered (gdb_stdlog,
3392 "target_fileio_unlink (%d,%s)"
3393 " = %d (%d)\n",
3394 inf == NULL ? 0 : inf->num, filename,
3395 ret, ret != -1 ? 0 : *target_errno);
3396 return ret;
3397 }
3398
3399 *target_errno = FILEIO_ENOSYS;
3400 return -1;
3401 }
3402
3403 /* See target.h. */
3404
3405 gdb::optional<std::string>
3406 target_fileio_readlink (struct inferior *inf, const char *filename,
3407 int *target_errno)
3408 {
3409 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3410 {
3411 gdb::optional<std::string> ret
3412 = t->fileio_readlink (inf, filename, target_errno);
3413
3414 if (!ret.has_value () && *target_errno == FILEIO_ENOSYS)
3415 continue;
3416
3417 if (targetdebug)
3418 fprintf_unfiltered (gdb_stdlog,
3419 "target_fileio_readlink (%d,%s)"
3420 " = %s (%d)\n",
3421 inf == NULL ? 0 : inf->num,
3422 filename, ret ? ret->c_str () : "(nil)",
3423 ret ? 0 : *target_errno);
3424 return ret;
3425 }
3426
3427 *target_errno = FILEIO_ENOSYS;
3428 return {};
3429 }
3430
3431 /* Like scoped_fd, but specific to target fileio. */
3432
3433 class scoped_target_fd
3434 {
3435 public:
3436 explicit scoped_target_fd (int fd) noexcept
3437 : m_fd (fd)
3438 {
3439 }
3440
3441 ~scoped_target_fd ()
3442 {
3443 if (m_fd >= 0)
3444 {
3445 int target_errno;
3446
3447 target_fileio_close (m_fd, &target_errno);
3448 }
3449 }
3450
3451 DISABLE_COPY_AND_ASSIGN (scoped_target_fd);
3452
3453 int get () const noexcept
3454 {
3455 return m_fd;
3456 }
3457
3458 private:
3459 int m_fd;
3460 };
3461
3462 /* Read target file FILENAME, in the filesystem as seen by INF. If
3463 INF is NULL, use the filesystem seen by the debugger (GDB or, for
3464 remote targets, the remote stub). Store the result in *BUF_P and
3465 return the size of the transferred data. PADDING additional bytes
3466 are available in *BUF_P. This is a helper function for
3467 target_fileio_read_alloc; see the declaration of that function for
3468 more information. */
3469
3470 static LONGEST
3471 target_fileio_read_alloc_1 (struct inferior *inf, const char *filename,
3472 gdb_byte **buf_p, int padding)
3473 {
3474 size_t buf_alloc, buf_pos;
3475 gdb_byte *buf;
3476 LONGEST n;
3477 int target_errno;
3478
3479 scoped_target_fd fd (target_fileio_open (inf, filename, FILEIO_O_RDONLY,
3480 0700, false, &target_errno));
3481 if (fd.get () == -1)
3482 return -1;
3483
3484 /* Start by reading up to 4K at a time. The target will throttle
3485 this number down if necessary. */
3486 buf_alloc = 4096;
3487 buf = (gdb_byte *) xmalloc (buf_alloc);
3488 buf_pos = 0;
3489 while (1)
3490 {
3491 n = target_fileio_pread (fd.get (), &buf[buf_pos],
3492 buf_alloc - buf_pos - padding, buf_pos,
3493 &target_errno);
3494 if (n < 0)
3495 {
3496 /* An error occurred. */
3497 xfree (buf);
3498 return -1;
3499 }
3500 else if (n == 0)
3501 {
3502 /* Read all there was. */
3503 if (buf_pos == 0)
3504 xfree (buf);
3505 else
3506 *buf_p = buf;
3507 return buf_pos;
3508 }
3509
3510 buf_pos += n;
3511
3512 /* If the buffer is filling up, expand it. */
3513 if (buf_alloc < buf_pos * 2)
3514 {
3515 buf_alloc *= 2;
3516 buf = (gdb_byte *) xrealloc (buf, buf_alloc);
3517 }
3518
3519 QUIT;
3520 }
3521 }
3522
3523 /* See target.h. */
3524
3525 LONGEST
3526 target_fileio_read_alloc (struct inferior *inf, const char *filename,
3527 gdb_byte **buf_p)
3528 {
3529 return target_fileio_read_alloc_1 (inf, filename, buf_p, 0);
3530 }
3531
3532 /* See target.h. */
3533
3534 gdb::unique_xmalloc_ptr<char>
3535 target_fileio_read_stralloc (struct inferior *inf, const char *filename)
3536 {
3537 gdb_byte *buffer;
3538 char *bufstr;
3539 LONGEST i, transferred;
3540
3541 transferred = target_fileio_read_alloc_1 (inf, filename, &buffer, 1);
3542 bufstr = (char *) buffer;
3543
3544 if (transferred < 0)
3545 return gdb::unique_xmalloc_ptr<char> (nullptr);
3546
3547 if (transferred == 0)
3548 return make_unique_xstrdup ("");
3549
3550 bufstr[transferred] = 0;
3551
3552 /* Check for embedded NUL bytes; but allow trailing NULs. */
3553 for (i = strlen (bufstr); i < transferred; i++)
3554 if (bufstr[i] != 0)
3555 {
3556 warning (_("target file %s "
3557 "contained unexpected null characters"),
3558 filename);
3559 break;
3560 }
3561
3562 return gdb::unique_xmalloc_ptr<char> (bufstr);
3563 }
3564
3565
3566 static int
3567 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3568 CORE_ADDR addr, int len)
3569 {
3570 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3571 }
3572
3573 static int
3574 default_watchpoint_addr_within_range (struct target_ops *target,
3575 CORE_ADDR addr,
3576 CORE_ADDR start, int length)
3577 {
3578 return addr >= start && addr < start + length;
3579 }
3580
3581 /* See target.h. */
3582
3583 target_ops *
3584 target_stack::find_beneath (const target_ops *t) const
3585 {
3586 /* Look for a non-empty slot at stratum levels beneath T's. */
3587 for (int stratum = t->stratum () - 1; stratum >= 0; --stratum)
3588 if (m_stack[stratum] != NULL)
3589 return m_stack[stratum];
3590
3591 return NULL;
3592 }
3593
3594 /* See target.h. */
3595
3596 struct target_ops *
3597 find_target_at (enum strata stratum)
3598 {
3599 return current_inferior ()->target_at (stratum);
3600 }
3601
3602 \f
3603
3604 /* See target.h */
3605
3606 void
3607 target_announce_detach (int from_tty)
3608 {
3609 pid_t pid;
3610 const char *exec_file;
3611
3612 if (!from_tty)
3613 return;
3614
3615 exec_file = get_exec_file (0);
3616 if (exec_file == NULL)
3617 exec_file = "";
3618
3619 pid = inferior_ptid.pid ();
3620 printf_unfiltered (_("Detaching from program: %s, %s\n"), exec_file,
3621 target_pid_to_str (ptid_t (pid)).c_str ());
3622 }
3623
3624 /* The inferior process has died. Long live the inferior! */
3625
3626 void
3627 generic_mourn_inferior (void)
3628 {
3629 inferior *inf = current_inferior ();
3630
3631 switch_to_no_thread ();
3632
3633 /* Mark breakpoints uninserted in case something tries to delete a
3634 breakpoint while we delete the inferior's threads (which would
3635 fail, since the inferior is long gone). */
3636 mark_breakpoints_out ();
3637
3638 if (inf->pid != 0)
3639 exit_inferior (inf);
3640
3641 /* Note this wipes step-resume breakpoints, so needs to be done
3642 after exit_inferior, which ends up referencing the step-resume
3643 breakpoints through clear_thread_inferior_resources. */
3644 breakpoint_init_inferior (inf_exited);
3645
3646 registers_changed ();
3647
3648 reopen_exec_file ();
3649 reinit_frame_cache ();
3650
3651 if (deprecated_detach_hook)
3652 deprecated_detach_hook ();
3653 }
3654 \f
3655 /* Convert a normal process ID to a string. Returns the string in a
3656 static buffer. */
3657
3658 std::string
3659 normal_pid_to_str (ptid_t ptid)
3660 {
3661 return string_printf ("process %d", ptid.pid ());
3662 }
3663
3664 static std::string
3665 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
3666 {
3667 return normal_pid_to_str (ptid);
3668 }
3669
3670 /* Error-catcher for target_find_memory_regions. */
3671 static int
3672 dummy_find_memory_regions (struct target_ops *self,
3673 find_memory_region_ftype ignore1, void *ignore2)
3674 {
3675 error (_("Command not implemented for this target."));
3676 return 0;
3677 }
3678
3679 /* Error-catcher for target_make_corefile_notes. */
3680 static gdb::unique_xmalloc_ptr<char>
3681 dummy_make_corefile_notes (struct target_ops *self,
3682 bfd *ignore1, int *ignore2)
3683 {
3684 error (_("Command not implemented for this target."));
3685 return NULL;
3686 }
3687
3688 #include "target-delegates.c"
3689
3690 /* The initial current target, so that there is always a semi-valid
3691 current target. */
3692
3693 static dummy_target the_dummy_target;
3694
3695 /* See target.h. */
3696
3697 target_ops *
3698 get_dummy_target ()
3699 {
3700 return &the_dummy_target;
3701 }
3702
3703 static const target_info dummy_target_info = {
3704 "None",
3705 N_("None"),
3706 ""
3707 };
3708
3709 strata
3710 dummy_target::stratum () const
3711 {
3712 return dummy_stratum;
3713 }
3714
3715 strata
3716 debug_target::stratum () const
3717 {
3718 return debug_stratum;
3719 }
3720
3721 const target_info &
3722 dummy_target::info () const
3723 {
3724 return dummy_target_info;
3725 }
3726
3727 const target_info &
3728 debug_target::info () const
3729 {
3730 return beneath ()->info ();
3731 }
3732
3733 \f
3734
3735 void
3736 target_close (struct target_ops *targ)
3737 {
3738 for (inferior *inf : all_inferiors ())
3739 gdb_assert (!inf->target_is_pushed (targ));
3740
3741 fileio_handles_invalidate_target (targ);
3742
3743 targ->close ();
3744
3745 if (targetdebug)
3746 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3747 }
3748
3749 int
3750 target_thread_alive (ptid_t ptid)
3751 {
3752 return current_inferior ()->top_target ()->thread_alive (ptid);
3753 }
3754
3755 void
3756 target_update_thread_list (void)
3757 {
3758 current_inferior ()->top_target ()->update_thread_list ();
3759 }
3760
3761 void
3762 target_stop (ptid_t ptid)
3763 {
3764 process_stratum_target *proc_target = current_inferior ()->process_target ();
3765
3766 gdb_assert (!proc_target->commit_resumed_state);
3767
3768 if (!may_stop)
3769 {
3770 warning (_("May not interrupt or stop the target, ignoring attempt"));
3771 return;
3772 }
3773
3774 current_inferior ()->top_target ()->stop (ptid);
3775 }
3776
3777 void
3778 target_interrupt ()
3779 {
3780 if (!may_stop)
3781 {
3782 warning (_("May not interrupt or stop the target, ignoring attempt"));
3783 return;
3784 }
3785
3786 current_inferior ()->top_target ()->interrupt ();
3787 }
3788
3789 /* See target.h. */
3790
3791 void
3792 target_pass_ctrlc (void)
3793 {
3794 /* Pass the Ctrl-C to the first target that has a thread
3795 running. */
3796 for (inferior *inf : all_inferiors ())
3797 {
3798 target_ops *proc_target = inf->process_target ();
3799 if (proc_target == NULL)
3800 continue;
3801
3802 for (thread_info *thr : inf->non_exited_threads ())
3803 {
3804 /* A thread can be THREAD_STOPPED and executing, while
3805 running an infcall. */
3806 if (thr->state == THREAD_RUNNING || thr->executing)
3807 {
3808 /* We can get here quite deep in target layers. Avoid
3809 switching thread context or anything that would
3810 communicate with the target (e.g., to fetch
3811 registers), or flushing e.g., the frame cache. We
3812 just switch inferior in order to be able to call
3813 through the target_stack. */
3814 scoped_restore_current_inferior restore_inferior;
3815 set_current_inferior (inf);
3816 current_inferior ()->top_target ()->pass_ctrlc ();
3817 return;
3818 }
3819 }
3820 }
3821 }
3822
3823 /* See target.h. */
3824
3825 void
3826 default_target_pass_ctrlc (struct target_ops *ops)
3827 {
3828 target_interrupt ();
3829 }
3830
3831 /* See target/target.h. */
3832
3833 void
3834 target_stop_and_wait (ptid_t ptid)
3835 {
3836 struct target_waitstatus status;
3837 bool was_non_stop = non_stop;
3838
3839 non_stop = true;
3840 target_stop (ptid);
3841
3842 memset (&status, 0, sizeof (status));
3843 target_wait (ptid, &status, 0);
3844
3845 non_stop = was_non_stop;
3846 }
3847
3848 /* See target/target.h. */
3849
3850 void
3851 target_continue_no_signal (ptid_t ptid)
3852 {
3853 target_resume (ptid, 0, GDB_SIGNAL_0);
3854 }
3855
3856 /* See target/target.h. */
3857
3858 void
3859 target_continue (ptid_t ptid, enum gdb_signal signal)
3860 {
3861 target_resume (ptid, 0, signal);
3862 }
3863
3864 /* Concatenate ELEM to LIST, a comma-separated list. */
3865
3866 static void
3867 str_comma_list_concat_elem (std::string *list, const char *elem)
3868 {
3869 if (!list->empty ())
3870 list->append (", ");
3871
3872 list->append (elem);
3873 }
3874
3875 /* Helper for target_options_to_string. If OPT is present in
3876 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3877 OPT is removed from TARGET_OPTIONS. */
3878
3879 static void
3880 do_option (target_wait_flags *target_options, std::string *ret,
3881 target_wait_flag opt, const char *opt_str)
3882 {
3883 if ((*target_options & opt) != 0)
3884 {
3885 str_comma_list_concat_elem (ret, opt_str);
3886 *target_options &= ~opt;
3887 }
3888 }
3889
3890 /* See target.h. */
3891
3892 std::string
3893 target_options_to_string (target_wait_flags target_options)
3894 {
3895 std::string ret;
3896
3897 #define DO_TARG_OPTION(OPT) \
3898 do_option (&target_options, &ret, OPT, #OPT)
3899
3900 DO_TARG_OPTION (TARGET_WNOHANG);
3901
3902 if (target_options != 0)
3903 str_comma_list_concat_elem (&ret, "unknown???");
3904
3905 return ret;
3906 }
3907
3908 void
3909 target_fetch_registers (struct regcache *regcache, int regno)
3910 {
3911 current_inferior ()->top_target ()->fetch_registers (regcache, regno);
3912 if (targetdebug)
3913 regcache->debug_print_register ("target_fetch_registers", regno);
3914 }
3915
3916 void
3917 target_store_registers (struct regcache *regcache, int regno)
3918 {
3919 if (!may_write_registers)
3920 error (_("Writing to registers is not allowed (regno %d)"), regno);
3921
3922 current_inferior ()->top_target ()->store_registers (regcache, regno);
3923 if (targetdebug)
3924 {
3925 regcache->debug_print_register ("target_store_registers", regno);
3926 }
3927 }
3928
3929 int
3930 target_core_of_thread (ptid_t ptid)
3931 {
3932 return current_inferior ()->top_target ()->core_of_thread (ptid);
3933 }
3934
3935 int
3936 simple_verify_memory (struct target_ops *ops,
3937 const gdb_byte *data, CORE_ADDR lma, ULONGEST size)
3938 {
3939 LONGEST total_xfered = 0;
3940
3941 while (total_xfered < size)
3942 {
3943 ULONGEST xfered_len;
3944 enum target_xfer_status status;
3945 gdb_byte buf[1024];
3946 ULONGEST howmuch = std::min<ULONGEST> (sizeof (buf), size - total_xfered);
3947
3948 status = target_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
3949 buf, NULL, lma + total_xfered, howmuch,
3950 &xfered_len);
3951 if (status == TARGET_XFER_OK
3952 && memcmp (data + total_xfered, buf, xfered_len) == 0)
3953 {
3954 total_xfered += xfered_len;
3955 QUIT;
3956 }
3957 else
3958 return 0;
3959 }
3960 return 1;
3961 }
3962
3963 /* Default implementation of memory verification. */
3964
3965 static int
3966 default_verify_memory (struct target_ops *self,
3967 const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3968 {
3969 /* Start over from the top of the target stack. */
3970 return simple_verify_memory (current_inferior ()->top_target (),
3971 data, memaddr, size);
3972 }
3973
3974 int
3975 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3976 {
3977 target_ops *target = current_inferior ()->top_target ();
3978
3979 return target->verify_memory (data, memaddr, size);
3980 }
3981
3982 /* The documentation for this function is in its prototype declaration in
3983 target.h. */
3984
3985 int
3986 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
3987 enum target_hw_bp_type rw)
3988 {
3989 target_ops *target = current_inferior ()->top_target ();
3990
3991 return target->insert_mask_watchpoint (addr, mask, rw);
3992 }
3993
3994 /* The documentation for this function is in its prototype declaration in
3995 target.h. */
3996
3997 int
3998 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
3999 enum target_hw_bp_type rw)
4000 {
4001 target_ops *target = current_inferior ()->top_target ();
4002
4003 return target->remove_mask_watchpoint (addr, mask, rw);
4004 }
4005
4006 /* The documentation for this function is in its prototype declaration
4007 in target.h. */
4008
4009 int
4010 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4011 {
4012 target_ops *target = current_inferior ()->top_target ();
4013
4014 return target->masked_watch_num_registers (addr, mask);
4015 }
4016
4017 /* The documentation for this function is in its prototype declaration
4018 in target.h. */
4019
4020 int
4021 target_ranged_break_num_registers (void)
4022 {
4023 return current_inferior ()->top_target ()->ranged_break_num_registers ();
4024 }
4025
4026 /* See target.h. */
4027
4028 struct btrace_target_info *
4029 target_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
4030 {
4031 return current_inferior ()->top_target ()->enable_btrace (ptid, conf);
4032 }
4033
4034 /* See target.h. */
4035
4036 void
4037 target_disable_btrace (struct btrace_target_info *btinfo)
4038 {
4039 current_inferior ()->top_target ()->disable_btrace (btinfo);
4040 }
4041
4042 /* See target.h. */
4043
4044 void
4045 target_teardown_btrace (struct btrace_target_info *btinfo)
4046 {
4047 current_inferior ()->top_target ()->teardown_btrace (btinfo);
4048 }
4049
4050 /* See target.h. */
4051
4052 enum btrace_error
4053 target_read_btrace (struct btrace_data *btrace,
4054 struct btrace_target_info *btinfo,
4055 enum btrace_read_type type)
4056 {
4057 target_ops *target = current_inferior ()->top_target ();
4058
4059 return target->read_btrace (btrace, btinfo, type);
4060 }
4061
4062 /* See target.h. */
4063
4064 const struct btrace_config *
4065 target_btrace_conf (const struct btrace_target_info *btinfo)
4066 {
4067 return current_inferior ()->top_target ()->btrace_conf (btinfo);
4068 }
4069
4070 /* See target.h. */
4071
4072 void
4073 target_stop_recording (void)
4074 {
4075 current_inferior ()->top_target ()->stop_recording ();
4076 }
4077
4078 /* See target.h. */
4079
4080 void
4081 target_save_record (const char *filename)
4082 {
4083 current_inferior ()->top_target ()->save_record (filename);
4084 }
4085
4086 /* See target.h. */
4087
4088 int
4089 target_supports_delete_record ()
4090 {
4091 return current_inferior ()->top_target ()->supports_delete_record ();
4092 }
4093
4094 /* See target.h. */
4095
4096 void
4097 target_delete_record (void)
4098 {
4099 current_inferior ()->top_target ()->delete_record ();
4100 }
4101
4102 /* See target.h. */
4103
4104 enum record_method
4105 target_record_method (ptid_t ptid)
4106 {
4107 return current_inferior ()->top_target ()->record_method (ptid);
4108 }
4109
4110 /* See target.h. */
4111
4112 int
4113 target_record_is_replaying (ptid_t ptid)
4114 {
4115 return current_inferior ()->top_target ()->record_is_replaying (ptid);
4116 }
4117
4118 /* See target.h. */
4119
4120 int
4121 target_record_will_replay (ptid_t ptid, int dir)
4122 {
4123 return current_inferior ()->top_target ()->record_will_replay (ptid, dir);
4124 }
4125
4126 /* See target.h. */
4127
4128 void
4129 target_record_stop_replaying (void)
4130 {
4131 current_inferior ()->top_target ()->record_stop_replaying ();
4132 }
4133
4134 /* See target.h. */
4135
4136 void
4137 target_goto_record_begin (void)
4138 {
4139 current_inferior ()->top_target ()->goto_record_begin ();
4140 }
4141
4142 /* See target.h. */
4143
4144 void
4145 target_goto_record_end (void)
4146 {
4147 current_inferior ()->top_target ()->goto_record_end ();
4148 }
4149
4150 /* See target.h. */
4151
4152 void
4153 target_goto_record (ULONGEST insn)
4154 {
4155 current_inferior ()->top_target ()->goto_record (insn);
4156 }
4157
4158 /* See target.h. */
4159
4160 void
4161 target_insn_history (int size, gdb_disassembly_flags flags)
4162 {
4163 current_inferior ()->top_target ()->insn_history (size, flags);
4164 }
4165
4166 /* See target.h. */
4167
4168 void
4169 target_insn_history_from (ULONGEST from, int size,
4170 gdb_disassembly_flags flags)
4171 {
4172 current_inferior ()->top_target ()->insn_history_from (from, size, flags);
4173 }
4174
4175 /* See target.h. */
4176
4177 void
4178 target_insn_history_range (ULONGEST begin, ULONGEST end,
4179 gdb_disassembly_flags flags)
4180 {
4181 current_inferior ()->top_target ()->insn_history_range (begin, end, flags);
4182 }
4183
4184 /* See target.h. */
4185
4186 void
4187 target_call_history (int size, record_print_flags flags)
4188 {
4189 current_inferior ()->top_target ()->call_history (size, flags);
4190 }
4191
4192 /* See target.h. */
4193
4194 void
4195 target_call_history_from (ULONGEST begin, int size, record_print_flags flags)
4196 {
4197 current_inferior ()->top_target ()->call_history_from (begin, size, flags);
4198 }
4199
4200 /* See target.h. */
4201
4202 void
4203 target_call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
4204 {
4205 current_inferior ()->top_target ()->call_history_range (begin, end, flags);
4206 }
4207
4208 /* See target.h. */
4209
4210 const struct frame_unwind *
4211 target_get_unwinder (void)
4212 {
4213 return current_inferior ()->top_target ()->get_unwinder ();
4214 }
4215
4216 /* See target.h. */
4217
4218 const struct frame_unwind *
4219 target_get_tailcall_unwinder (void)
4220 {
4221 return current_inferior ()->top_target ()->get_tailcall_unwinder ();
4222 }
4223
4224 /* See target.h. */
4225
4226 void
4227 target_prepare_to_generate_core (void)
4228 {
4229 current_inferior ()->top_target ()->prepare_to_generate_core ();
4230 }
4231
4232 /* See target.h. */
4233
4234 void
4235 target_done_generating_core (void)
4236 {
4237 current_inferior ()->top_target ()->done_generating_core ();
4238 }
4239
4240 \f
4241
4242 static char targ_desc[] =
4243 "Names of targets and files being debugged.\nShows the entire \
4244 stack of targets currently in use (including the exec-file,\n\
4245 core-file, and process, if any), as well as the symbol file name.";
4246
4247 static void
4248 default_rcmd (struct target_ops *self, const char *command,
4249 struct ui_file *output)
4250 {
4251 error (_("\"monitor\" command not supported by this target."));
4252 }
4253
4254 static void
4255 do_monitor_command (const char *cmd, int from_tty)
4256 {
4257 target_rcmd (cmd, gdb_stdtarg);
4258 }
4259
4260 /* Erases all the memory regions marked as flash. CMD and FROM_TTY are
4261 ignored. */
4262
4263 void
4264 flash_erase_command (const char *cmd, int from_tty)
4265 {
4266 /* Used to communicate termination of flash operations to the target. */
4267 bool found_flash_region = false;
4268 struct gdbarch *gdbarch = target_gdbarch ();
4269
4270 std::vector<mem_region> mem_regions = target_memory_map ();
4271
4272 /* Iterate over all memory regions. */
4273 for (const mem_region &m : mem_regions)
4274 {
4275 /* Is this a flash memory region? */
4276 if (m.attrib.mode == MEM_FLASH)
4277 {
4278 found_flash_region = true;
4279 target_flash_erase (m.lo, m.hi - m.lo);
4280
4281 ui_out_emit_tuple tuple_emitter (current_uiout, "erased-regions");
4282
4283 current_uiout->message (_("Erasing flash memory region at address "));
4284 current_uiout->field_core_addr ("address", gdbarch, m.lo);
4285 current_uiout->message (", size = ");
4286 current_uiout->field_string ("size", hex_string (m.hi - m.lo));
4287 current_uiout->message ("\n");
4288 }
4289 }
4290
4291 /* Did we do any flash operations? If so, we need to finalize them. */
4292 if (found_flash_region)
4293 target_flash_done ();
4294 else
4295 current_uiout->message (_("No flash memory regions found.\n"));
4296 }
4297
4298 /* Print the name of each layers of our target stack. */
4299
4300 static void
4301 maintenance_print_target_stack (const char *cmd, int from_tty)
4302 {
4303 printf_filtered (_("The current target stack is:\n"));
4304
4305 for (target_ops *t = current_inferior ()->top_target ();
4306 t != NULL;
4307 t = t->beneath ())
4308 {
4309 if (t->stratum () == debug_stratum)
4310 continue;
4311 printf_filtered (" - %s (%s)\n", t->shortname (), t->longname ());
4312 }
4313 }
4314
4315 /* See target.h. */
4316
4317 void
4318 target_async (int enable)
4319 {
4320 infrun_async (enable);
4321 current_inferior ()->top_target ()->async (enable);
4322 }
4323
4324 /* See target.h. */
4325
4326 void
4327 target_thread_events (int enable)
4328 {
4329 current_inferior ()->top_target ()->thread_events (enable);
4330 }
4331
4332 /* Controls if targets can report that they can/are async. This is
4333 just for maintainers to use when debugging gdb. */
4334 bool target_async_permitted = true;
4335
4336 /* The set command writes to this variable. If the inferior is
4337 executing, target_async_permitted is *not* updated. */
4338 static bool target_async_permitted_1 = true;
4339
4340 static void
4341 maint_set_target_async_command (const char *args, int from_tty,
4342 struct cmd_list_element *c)
4343 {
4344 if (have_live_inferiors ())
4345 {
4346 target_async_permitted_1 = target_async_permitted;
4347 error (_("Cannot change this setting while the inferior is running."));
4348 }
4349
4350 target_async_permitted = target_async_permitted_1;
4351 }
4352
4353 static void
4354 maint_show_target_async_command (struct ui_file *file, int from_tty,
4355 struct cmd_list_element *c,
4356 const char *value)
4357 {
4358 fprintf_filtered (file,
4359 _("Controlling the inferior in "
4360 "asynchronous mode is %s.\n"), value);
4361 }
4362
4363 /* Return true if the target operates in non-stop mode even with "set
4364 non-stop off". */
4365
4366 static int
4367 target_always_non_stop_p (void)
4368 {
4369 return current_inferior ()->top_target ()->always_non_stop_p ();
4370 }
4371
4372 /* See target.h. */
4373
4374 bool
4375 target_is_non_stop_p ()
4376 {
4377 return ((non_stop
4378 || target_non_stop_enabled == AUTO_BOOLEAN_TRUE
4379 || (target_non_stop_enabled == AUTO_BOOLEAN_AUTO
4380 && target_always_non_stop_p ()))
4381 && target_can_async_p ());
4382 }
4383
4384 /* See target.h. */
4385
4386 bool
4387 exists_non_stop_target ()
4388 {
4389 if (target_is_non_stop_p ())
4390 return true;
4391
4392 scoped_restore_current_thread restore_thread;
4393
4394 for (inferior *inf : all_inferiors ())
4395 {
4396 switch_to_inferior_no_thread (inf);
4397 if (target_is_non_stop_p ())
4398 return true;
4399 }
4400
4401 return false;
4402 }
4403
4404 /* Controls if targets can report that they always run in non-stop
4405 mode. This is just for maintainers to use when debugging gdb. */
4406 enum auto_boolean target_non_stop_enabled = AUTO_BOOLEAN_AUTO;
4407
4408 /* The set command writes to this variable. If the inferior is
4409 executing, target_non_stop_enabled is *not* updated. */
4410 static enum auto_boolean target_non_stop_enabled_1 = AUTO_BOOLEAN_AUTO;
4411
4412 /* Implementation of "maint set target-non-stop". */
4413
4414 static void
4415 maint_set_target_non_stop_command (const char *args, int from_tty,
4416 struct cmd_list_element *c)
4417 {
4418 if (have_live_inferiors ())
4419 {
4420 target_non_stop_enabled_1 = target_non_stop_enabled;
4421 error (_("Cannot change this setting while the inferior is running."));
4422 }
4423
4424 target_non_stop_enabled = target_non_stop_enabled_1;
4425 }
4426
4427 /* Implementation of "maint show target-non-stop". */
4428
4429 static void
4430 maint_show_target_non_stop_command (struct ui_file *file, int from_tty,
4431 struct cmd_list_element *c,
4432 const char *value)
4433 {
4434 if (target_non_stop_enabled == AUTO_BOOLEAN_AUTO)
4435 fprintf_filtered (file,
4436 _("Whether the target is always in non-stop mode "
4437 "is %s (currently %s).\n"), value,
4438 target_always_non_stop_p () ? "on" : "off");
4439 else
4440 fprintf_filtered (file,
4441 _("Whether the target is always in non-stop mode "
4442 "is %s.\n"), value);
4443 }
4444
4445 /* Temporary copies of permission settings. */
4446
4447 static bool may_write_registers_1 = true;
4448 static bool may_write_memory_1 = true;
4449 static bool may_insert_breakpoints_1 = true;
4450 static bool may_insert_tracepoints_1 = true;
4451 static bool may_insert_fast_tracepoints_1 = true;
4452 static bool may_stop_1 = true;
4453
4454 /* Make the user-set values match the real values again. */
4455
4456 void
4457 update_target_permissions (void)
4458 {
4459 may_write_registers_1 = may_write_registers;
4460 may_write_memory_1 = may_write_memory;
4461 may_insert_breakpoints_1 = may_insert_breakpoints;
4462 may_insert_tracepoints_1 = may_insert_tracepoints;
4463 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4464 may_stop_1 = may_stop;
4465 }
4466
4467 /* The one function handles (most of) the permission flags in the same
4468 way. */
4469
4470 static void
4471 set_target_permissions (const char *args, int from_tty,
4472 struct cmd_list_element *c)
4473 {
4474 if (target_has_execution ())
4475 {
4476 update_target_permissions ();
4477 error (_("Cannot change this setting while the inferior is running."));
4478 }
4479
4480 /* Make the real values match the user-changed values. */
4481 may_write_registers = may_write_registers_1;
4482 may_insert_breakpoints = may_insert_breakpoints_1;
4483 may_insert_tracepoints = may_insert_tracepoints_1;
4484 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4485 may_stop = may_stop_1;
4486 update_observer_mode ();
4487 }
4488
4489 /* Set memory write permission independently of observer mode. */
4490
4491 static void
4492 set_write_memory_permission (const char *args, int from_tty,
4493 struct cmd_list_element *c)
4494 {
4495 /* Make the real values match the user-changed values. */
4496 may_write_memory = may_write_memory_1;
4497 update_observer_mode ();
4498 }
4499
4500 void _initialize_target ();
4501
4502 void
4503 _initialize_target ()
4504 {
4505 the_debug_target = new debug_target ();
4506
4507 add_info ("target", info_target_command, targ_desc);
4508 add_info ("files", info_target_command, targ_desc);
4509
4510 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4511 Set target debugging."), _("\
4512 Show target debugging."), _("\
4513 When non-zero, target debugging is enabled. Higher numbers are more\n\
4514 verbose."),
4515 set_targetdebug,
4516 show_targetdebug,
4517 &setdebuglist, &showdebuglist);
4518
4519 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4520 &trust_readonly, _("\
4521 Set mode for reading from readonly sections."), _("\
4522 Show mode for reading from readonly sections."), _("\
4523 When this mode is on, memory reads from readonly sections (such as .text)\n\
4524 will be read from the object file instead of from the target. This will\n\
4525 result in significant performance improvement for remote targets."),
4526 NULL,
4527 show_trust_readonly,
4528 &setlist, &showlist);
4529
4530 add_com ("monitor", class_obscure, do_monitor_command,
4531 _("Send a command to the remote monitor (remote targets only)."));
4532
4533 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4534 _("Print the name of each layer of the internal target stack."),
4535 &maintenanceprintlist);
4536
4537 add_setshow_boolean_cmd ("target-async", no_class,
4538 &target_async_permitted_1, _("\
4539 Set whether gdb controls the inferior in asynchronous mode."), _("\
4540 Show whether gdb controls the inferior in asynchronous mode."), _("\
4541 Tells gdb whether to control the inferior in asynchronous mode."),
4542 maint_set_target_async_command,
4543 maint_show_target_async_command,
4544 &maintenance_set_cmdlist,
4545 &maintenance_show_cmdlist);
4546
4547 add_setshow_auto_boolean_cmd ("target-non-stop", no_class,
4548 &target_non_stop_enabled_1, _("\
4549 Set whether gdb always controls the inferior in non-stop mode."), _("\
4550 Show whether gdb always controls the inferior in non-stop mode."), _("\
4551 Tells gdb whether to control the inferior in non-stop mode."),
4552 maint_set_target_non_stop_command,
4553 maint_show_target_non_stop_command,
4554 &maintenance_set_cmdlist,
4555 &maintenance_show_cmdlist);
4556
4557 add_setshow_boolean_cmd ("may-write-registers", class_support,
4558 &may_write_registers_1, _("\
4559 Set permission to write into registers."), _("\
4560 Show permission to write into registers."), _("\
4561 When this permission is on, GDB may write into the target's registers.\n\
4562 Otherwise, any sort of write attempt will result in an error."),
4563 set_target_permissions, NULL,
4564 &setlist, &showlist);
4565
4566 add_setshow_boolean_cmd ("may-write-memory", class_support,
4567 &may_write_memory_1, _("\
4568 Set permission to write into target memory."), _("\
4569 Show permission to write into target memory."), _("\
4570 When this permission is on, GDB may write into the target's memory.\n\
4571 Otherwise, any sort of write attempt will result in an error."),
4572 set_write_memory_permission, NULL,
4573 &setlist, &showlist);
4574
4575 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4576 &may_insert_breakpoints_1, _("\
4577 Set permission to insert breakpoints in the target."), _("\
4578 Show permission to insert breakpoints in the target."), _("\
4579 When this permission is on, GDB may insert breakpoints in the program.\n\
4580 Otherwise, any sort of insertion attempt will result in an error."),
4581 set_target_permissions, NULL,
4582 &setlist, &showlist);
4583
4584 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4585 &may_insert_tracepoints_1, _("\
4586 Set permission to insert tracepoints in the target."), _("\
4587 Show permission to insert tracepoints in the target."), _("\
4588 When this permission is on, GDB may insert tracepoints in the program.\n\
4589 Otherwise, any sort of insertion attempt will result in an error."),
4590 set_target_permissions, NULL,
4591 &setlist, &showlist);
4592
4593 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4594 &may_insert_fast_tracepoints_1, _("\
4595 Set permission to insert fast tracepoints in the target."), _("\
4596 Show permission to insert fast tracepoints in the target."), _("\
4597 When this permission is on, GDB may insert fast tracepoints.\n\
4598 Otherwise, any sort of insertion attempt will result in an error."),
4599 set_target_permissions, NULL,
4600 &setlist, &showlist);
4601
4602 add_setshow_boolean_cmd ("may-interrupt", class_support,
4603 &may_stop_1, _("\
4604 Set permission to interrupt or signal the target."), _("\
4605 Show permission to interrupt or signal the target."), _("\
4606 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4607 Otherwise, any attempt to interrupt or stop will be ignored."),
4608 set_target_permissions, NULL,
4609 &setlist, &showlist);
4610
4611 add_com ("flash-erase", no_class, flash_erase_command,
4612 _("Erase all flash memory regions."));
4613
4614 add_setshow_boolean_cmd ("auto-connect-native-target", class_support,
4615 &auto_connect_native_target, _("\
4616 Set whether GDB may automatically connect to the native target."), _("\
4617 Show whether GDB may automatically connect to the native target."), _("\
4618 When on, and GDB is not connected to a target yet, GDB\n\
4619 attempts \"run\" and other commands with the native target."),
4620 NULL, show_auto_connect_native_target,
4621 &setlist, &showlist);
4622 }