]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/target.c
e3df9275c9a0b5512c99861fa9d12c7950442962
[thirdparty/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2022 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "target.h"
24 #include "target-dcache.h"
25 #include "gdbcmd.h"
26 #include "symtab.h"
27 #include "inferior.h"
28 #include "infrun.h"
29 #include "observable.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdbcore.h"
37 #include "target-descriptions.h"
38 #include "gdbthread.h"
39 #include "solib.h"
40 #include "exec.h"
41 #include "inline-frame.h"
42 #include "tracepoint.h"
43 #include "gdb/fileio.h"
44 #include "gdbsupport/agent.h"
45 #include "auxv.h"
46 #include "target-debug.h"
47 #include "top.h"
48 #include "event-top.h"
49 #include <algorithm>
50 #include "gdbsupport/byte-vector.h"
51 #include "gdbsupport/search.h"
52 #include "terminal.h"
53 #include <unordered_map>
54 #include "target-connection.h"
55 #include "valprint.h"
56 #include "cli/cli-decode.h"
57
58 static void generic_tls_error (void) ATTRIBUTE_NORETURN;
59
60 static void default_terminal_info (struct target_ops *, const char *, int);
61
62 static int default_watchpoint_addr_within_range (struct target_ops *,
63 CORE_ADDR, CORE_ADDR, int);
64
65 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
66 CORE_ADDR, int);
67
68 static void default_rcmd (struct target_ops *, const char *, struct ui_file *);
69
70 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
71 long lwp, ULONGEST tid);
72
73 static void default_mourn_inferior (struct target_ops *self);
74
75 static int default_search_memory (struct target_ops *ops,
76 CORE_ADDR start_addr,
77 ULONGEST search_space_len,
78 const gdb_byte *pattern,
79 ULONGEST pattern_len,
80 CORE_ADDR *found_addrp);
81
82 static int default_verify_memory (struct target_ops *self,
83 const gdb_byte *data,
84 CORE_ADDR memaddr, ULONGEST size);
85
86 static void tcomplain (void) ATTRIBUTE_NORETURN;
87
88 static struct target_ops *find_default_run_target (const char *);
89
90 static int dummy_find_memory_regions (struct target_ops *self,
91 find_memory_region_ftype ignore1,
92 void *ignore2);
93
94 static gdb::unique_xmalloc_ptr<char> dummy_make_corefile_notes
95 (struct target_ops *self, bfd *ignore1, int *ignore2);
96
97 static std::string default_pid_to_str (struct target_ops *ops, ptid_t ptid);
98
99 static enum exec_direction_kind default_execution_direction
100 (struct target_ops *self);
101
102 /* Mapping between target_info objects (which have address identity)
103 and corresponding open/factory function/callback. Each add_target
104 call adds one entry to this map, and registers a "target
105 TARGET_NAME" command that when invoked calls the factory registered
106 here. The target_info object is associated with the command via
107 the command's context. */
108 static std::unordered_map<const target_info *, target_open_ftype *>
109 target_factories;
110
111 /* The singleton debug target. */
112
113 static struct target_ops *the_debug_target;
114
115 /* Command list for target. */
116
117 static struct cmd_list_element *targetlist = NULL;
118
119 /* True if we should trust readonly sections from the
120 executable when reading memory. */
121
122 static bool trust_readonly = false;
123
124 /* Nonzero if we should show true memory content including
125 memory breakpoint inserted by gdb. */
126
127 static int show_memory_breakpoints = 0;
128
129 /* These globals control whether GDB attempts to perform these
130 operations; they are useful for targets that need to prevent
131 inadvertent disruption, such as in non-stop mode. */
132
133 bool may_write_registers = true;
134
135 bool may_write_memory = true;
136
137 bool may_insert_breakpoints = true;
138
139 bool may_insert_tracepoints = true;
140
141 bool may_insert_fast_tracepoints = true;
142
143 bool may_stop = true;
144
145 /* Non-zero if we want to see trace of target level stuff. */
146
147 static unsigned int targetdebug = 0;
148
149 static void
150 set_targetdebug (const char *args, int from_tty, struct cmd_list_element *c)
151 {
152 if (targetdebug)
153 current_inferior ()->push_target (the_debug_target);
154 else
155 current_inferior ()->unpush_target (the_debug_target);
156 }
157
158 static void
159 show_targetdebug (struct ui_file *file, int from_tty,
160 struct cmd_list_element *c, const char *value)
161 {
162 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
163 }
164
165 int
166 target_has_memory ()
167 {
168 for (target_ops *t = current_inferior ()->top_target ();
169 t != NULL;
170 t = t->beneath ())
171 if (t->has_memory ())
172 return 1;
173
174 return 0;
175 }
176
177 int
178 target_has_stack ()
179 {
180 for (target_ops *t = current_inferior ()->top_target ();
181 t != NULL;
182 t = t->beneath ())
183 if (t->has_stack ())
184 return 1;
185
186 return 0;
187 }
188
189 int
190 target_has_registers ()
191 {
192 for (target_ops *t = current_inferior ()->top_target ();
193 t != NULL;
194 t = t->beneath ())
195 if (t->has_registers ())
196 return 1;
197
198 return 0;
199 }
200
201 bool
202 target_has_execution (inferior *inf)
203 {
204 if (inf == nullptr)
205 inf = current_inferior ();
206
207 for (target_ops *t = inf->top_target ();
208 t != nullptr;
209 t = inf->find_target_beneath (t))
210 if (t->has_execution (inf))
211 return true;
212
213 return false;
214 }
215
216 const char *
217 target_shortname ()
218 {
219 return current_inferior ()->top_target ()->shortname ();
220 }
221
222 /* See target.h. */
223
224 bool
225 target_attach_no_wait ()
226 {
227 return current_inferior ()->top_target ()->attach_no_wait ();
228 }
229
230 /* See target.h. */
231
232 void
233 target_post_attach (int pid)
234 {
235 return current_inferior ()->top_target ()->post_attach (pid);
236 }
237
238 /* See target.h. */
239
240 void
241 target_prepare_to_store (regcache *regcache)
242 {
243 return current_inferior ()->top_target ()->prepare_to_store (regcache);
244 }
245
246 /* See target.h. */
247
248 bool
249 target_supports_enable_disable_tracepoint ()
250 {
251 target_ops *target = current_inferior ()->top_target ();
252
253 return target->supports_enable_disable_tracepoint ();
254 }
255
256 bool
257 target_supports_string_tracing ()
258 {
259 return current_inferior ()->top_target ()->supports_string_tracing ();
260 }
261
262 /* See target.h. */
263
264 bool
265 target_supports_evaluation_of_breakpoint_conditions ()
266 {
267 target_ops *target = current_inferior ()->top_target ();
268
269 return target->supports_evaluation_of_breakpoint_conditions ();
270 }
271
272 /* See target.h. */
273
274 bool
275 target_supports_dumpcore ()
276 {
277 return current_inferior ()->top_target ()->supports_dumpcore ();
278 }
279
280 /* See target.h. */
281
282 void
283 target_dumpcore (const char *filename)
284 {
285 return current_inferior ()->top_target ()->dumpcore (filename);
286 }
287
288 /* See target.h. */
289
290 bool
291 target_can_run_breakpoint_commands ()
292 {
293 return current_inferior ()->top_target ()->can_run_breakpoint_commands ();
294 }
295
296 /* See target.h. */
297
298 void
299 target_files_info ()
300 {
301 return current_inferior ()->top_target ()->files_info ();
302 }
303
304 /* See target.h. */
305
306 int
307 target_insert_fork_catchpoint (int pid)
308 {
309 return current_inferior ()->top_target ()->insert_fork_catchpoint (pid);
310 }
311
312 /* See target.h. */
313
314 int
315 target_remove_fork_catchpoint (int pid)
316 {
317 return current_inferior ()->top_target ()->remove_fork_catchpoint (pid);
318 }
319
320 /* See target.h. */
321
322 int
323 target_insert_vfork_catchpoint (int pid)
324 {
325 return current_inferior ()->top_target ()->insert_vfork_catchpoint (pid);
326 }
327
328 /* See target.h. */
329
330 int
331 target_remove_vfork_catchpoint (int pid)
332 {
333 return current_inferior ()->top_target ()->remove_vfork_catchpoint (pid);
334 }
335
336 /* See target.h. */
337
338 int
339 target_insert_exec_catchpoint (int pid)
340 {
341 return current_inferior ()->top_target ()->insert_exec_catchpoint (pid);
342 }
343
344 /* See target.h. */
345
346 int
347 target_remove_exec_catchpoint (int pid)
348 {
349 return current_inferior ()->top_target ()->remove_exec_catchpoint (pid);
350 }
351
352 /* See target.h. */
353
354 int
355 target_set_syscall_catchpoint (int pid, bool needed, int any_count,
356 gdb::array_view<const int> syscall_counts)
357 {
358 target_ops *target = current_inferior ()->top_target ();
359
360 return target->set_syscall_catchpoint (pid, needed, any_count,
361 syscall_counts);
362 }
363
364 /* See target.h. */
365
366 void
367 target_rcmd (const char *command, struct ui_file *outbuf)
368 {
369 return current_inferior ()->top_target ()->rcmd (command, outbuf);
370 }
371
372 /* See target.h. */
373
374 bool
375 target_can_lock_scheduler ()
376 {
377 target_ops *target = current_inferior ()->top_target ();
378
379 return (target->get_thread_control_capabilities ()& tc_schedlock) != 0;
380 }
381
382 /* See target.h. */
383
384 bool
385 target_can_async_p ()
386 {
387 return target_can_async_p (current_inferior ()->top_target ());
388 }
389
390 /* See target.h. */
391
392 bool
393 target_can_async_p (struct target_ops *target)
394 {
395 if (!target_async_permitted)
396 return false;
397 return target->can_async_p ();
398 }
399
400 /* See target.h. */
401
402 bool
403 target_is_async_p ()
404 {
405 bool result = current_inferior ()->top_target ()->is_async_p ();
406 gdb_assert (target_async_permitted || !result);
407 return result;
408 }
409
410 exec_direction_kind
411 target_execution_direction ()
412 {
413 return current_inferior ()->top_target ()->execution_direction ();
414 }
415
416 /* See target.h. */
417
418 const char *
419 target_extra_thread_info (thread_info *tp)
420 {
421 return current_inferior ()->top_target ()->extra_thread_info (tp);
422 }
423
424 /* See target.h. */
425
426 char *
427 target_pid_to_exec_file (int pid)
428 {
429 return current_inferior ()->top_target ()->pid_to_exec_file (pid);
430 }
431
432 /* See target.h. */
433
434 gdbarch *
435 target_thread_architecture (ptid_t ptid)
436 {
437 return current_inferior ()->top_target ()->thread_architecture (ptid);
438 }
439
440 /* See target.h. */
441
442 int
443 target_find_memory_regions (find_memory_region_ftype func, void *data)
444 {
445 return current_inferior ()->top_target ()->find_memory_regions (func, data);
446 }
447
448 /* See target.h. */
449
450 gdb::unique_xmalloc_ptr<char>
451 target_make_corefile_notes (bfd *bfd, int *size_p)
452 {
453 return current_inferior ()->top_target ()->make_corefile_notes (bfd, size_p);
454 }
455
456 gdb_byte *
457 target_get_bookmark (const char *args, int from_tty)
458 {
459 return current_inferior ()->top_target ()->get_bookmark (args, from_tty);
460 }
461
462 void
463 target_goto_bookmark (const gdb_byte *arg, int from_tty)
464 {
465 return current_inferior ()->top_target ()->goto_bookmark (arg, from_tty);
466 }
467
468 /* See target.h. */
469
470 bool
471 target_stopped_by_watchpoint ()
472 {
473 return current_inferior ()->top_target ()->stopped_by_watchpoint ();
474 }
475
476 /* See target.h. */
477
478 bool
479 target_stopped_by_sw_breakpoint ()
480 {
481 return current_inferior ()->top_target ()->stopped_by_sw_breakpoint ();
482 }
483
484 bool
485 target_supports_stopped_by_sw_breakpoint ()
486 {
487 target_ops *target = current_inferior ()->top_target ();
488
489 return target->supports_stopped_by_sw_breakpoint ();
490 }
491
492 bool
493 target_stopped_by_hw_breakpoint ()
494 {
495 return current_inferior ()->top_target ()->stopped_by_hw_breakpoint ();
496 }
497
498 bool
499 target_supports_stopped_by_hw_breakpoint ()
500 {
501 target_ops *target = current_inferior ()->top_target ();
502
503 return target->supports_stopped_by_hw_breakpoint ();
504 }
505
506 /* See target.h. */
507
508 bool
509 target_have_steppable_watchpoint ()
510 {
511 return current_inferior ()->top_target ()->have_steppable_watchpoint ();
512 }
513
514 /* See target.h. */
515
516 int
517 target_can_use_hardware_watchpoint (bptype type, int cnt, int othertype)
518 {
519 target_ops *target = current_inferior ()->top_target ();
520
521 return target->can_use_hw_breakpoint (type, cnt, othertype);
522 }
523
524 /* See target.h. */
525
526 int
527 target_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
528 {
529 target_ops *target = current_inferior ()->top_target ();
530
531 return target->region_ok_for_hw_watchpoint (addr, len);
532 }
533
534
535 int
536 target_can_do_single_step ()
537 {
538 return current_inferior ()->top_target ()->can_do_single_step ();
539 }
540
541 /* See target.h. */
542
543 int
544 target_insert_watchpoint (CORE_ADDR addr, int len, target_hw_bp_type type,
545 expression *cond)
546 {
547 target_ops *target = current_inferior ()->top_target ();
548
549 return target->insert_watchpoint (addr, len, type, cond);
550 }
551
552 /* See target.h. */
553
554 int
555 target_remove_watchpoint (CORE_ADDR addr, int len, target_hw_bp_type type,
556 expression *cond)
557 {
558 target_ops *target = current_inferior ()->top_target ();
559
560 return target->remove_watchpoint (addr, len, type, cond);
561 }
562
563 /* See target.h. */
564
565 int
566 target_insert_hw_breakpoint (gdbarch *gdbarch, bp_target_info *bp_tgt)
567 {
568 target_ops *target = current_inferior ()->top_target ();
569
570 return target->insert_hw_breakpoint (gdbarch, bp_tgt);
571 }
572
573 /* See target.h. */
574
575 int
576 target_remove_hw_breakpoint (gdbarch *gdbarch, bp_target_info *bp_tgt)
577 {
578 target_ops *target = current_inferior ()->top_target ();
579
580 return target->remove_hw_breakpoint (gdbarch, bp_tgt);
581 }
582
583 /* See target.h. */
584
585 bool
586 target_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int type,
587 expression *cond)
588 {
589 target_ops *target = current_inferior ()->top_target ();
590
591 return target->can_accel_watchpoint_condition (addr, len, type, cond);
592 }
593
594 /* See target.h. */
595
596 bool
597 target_can_execute_reverse ()
598 {
599 return current_inferior ()->top_target ()->can_execute_reverse ();
600 }
601
602 ptid_t
603 target_get_ada_task_ptid (long lwp, ULONGEST tid)
604 {
605 return current_inferior ()->top_target ()->get_ada_task_ptid (lwp, tid);
606 }
607
608 bool
609 target_filesystem_is_local ()
610 {
611 return current_inferior ()->top_target ()->filesystem_is_local ();
612 }
613
614 void
615 target_trace_init ()
616 {
617 return current_inferior ()->top_target ()->trace_init ();
618 }
619
620 void
621 target_download_tracepoint (bp_location *location)
622 {
623 return current_inferior ()->top_target ()->download_tracepoint (location);
624 }
625
626 bool
627 target_can_download_tracepoint ()
628 {
629 return current_inferior ()->top_target ()->can_download_tracepoint ();
630 }
631
632 void
633 target_download_trace_state_variable (const trace_state_variable &tsv)
634 {
635 target_ops *target = current_inferior ()->top_target ();
636
637 return target->download_trace_state_variable (tsv);
638 }
639
640 void
641 target_enable_tracepoint (bp_location *loc)
642 {
643 return current_inferior ()->top_target ()->enable_tracepoint (loc);
644 }
645
646 void
647 target_disable_tracepoint (bp_location *loc)
648 {
649 return current_inferior ()->top_target ()->disable_tracepoint (loc);
650 }
651
652 void
653 target_trace_start ()
654 {
655 return current_inferior ()->top_target ()->trace_start ();
656 }
657
658 void
659 target_trace_set_readonly_regions ()
660 {
661 return current_inferior ()->top_target ()->trace_set_readonly_regions ();
662 }
663
664 int
665 target_get_trace_status (trace_status *ts)
666 {
667 return current_inferior ()->top_target ()->get_trace_status (ts);
668 }
669
670 void
671 target_get_tracepoint_status (breakpoint *tp, uploaded_tp *utp)
672 {
673 return current_inferior ()->top_target ()->get_tracepoint_status (tp, utp);
674 }
675
676 void
677 target_trace_stop ()
678 {
679 return current_inferior ()->top_target ()->trace_stop ();
680 }
681
682 int
683 target_trace_find (trace_find_type type, int num,
684 CORE_ADDR addr1, CORE_ADDR addr2, int *tpp)
685 {
686 target_ops *target = current_inferior ()->top_target ();
687
688 return target->trace_find (type, num, addr1, addr2, tpp);
689 }
690
691 bool
692 target_get_trace_state_variable_value (int tsv, LONGEST *val)
693 {
694 target_ops *target = current_inferior ()->top_target ();
695
696 return target->get_trace_state_variable_value (tsv, val);
697 }
698
699 int
700 target_save_trace_data (const char *filename)
701 {
702 return current_inferior ()->top_target ()->save_trace_data (filename);
703 }
704
705 int
706 target_upload_tracepoints (uploaded_tp **utpp)
707 {
708 return current_inferior ()->top_target ()->upload_tracepoints (utpp);
709 }
710
711 int
712 target_upload_trace_state_variables (uploaded_tsv **utsvp)
713 {
714 target_ops *target = current_inferior ()->top_target ();
715
716 return target->upload_trace_state_variables (utsvp);
717 }
718
719 LONGEST
720 target_get_raw_trace_data (gdb_byte *buf, ULONGEST offset, LONGEST len)
721 {
722 target_ops *target = current_inferior ()->top_target ();
723
724 return target->get_raw_trace_data (buf, offset, len);
725 }
726
727 int
728 target_get_min_fast_tracepoint_insn_len ()
729 {
730 target_ops *target = current_inferior ()->top_target ();
731
732 return target->get_min_fast_tracepoint_insn_len ();
733 }
734
735 void
736 target_set_disconnected_tracing (int val)
737 {
738 return current_inferior ()->top_target ()->set_disconnected_tracing (val);
739 }
740
741 void
742 target_set_circular_trace_buffer (int val)
743 {
744 return current_inferior ()->top_target ()->set_circular_trace_buffer (val);
745 }
746
747 void
748 target_set_trace_buffer_size (LONGEST val)
749 {
750 return current_inferior ()->top_target ()->set_trace_buffer_size (val);
751 }
752
753 bool
754 target_set_trace_notes (const char *user, const char *notes,
755 const char *stopnotes)
756 {
757 target_ops *target = current_inferior ()->top_target ();
758
759 return target->set_trace_notes (user, notes, stopnotes);
760 }
761
762 bool
763 target_get_tib_address (ptid_t ptid, CORE_ADDR *addr)
764 {
765 return current_inferior ()->top_target ()->get_tib_address (ptid, addr);
766 }
767
768 void
769 target_set_permissions ()
770 {
771 return current_inferior ()->top_target ()->set_permissions ();
772 }
773
774 bool
775 target_static_tracepoint_marker_at (CORE_ADDR addr,
776 static_tracepoint_marker *marker)
777 {
778 target_ops *target = current_inferior ()->top_target ();
779
780 return target->static_tracepoint_marker_at (addr, marker);
781 }
782
783 std::vector<static_tracepoint_marker>
784 target_static_tracepoint_markers_by_strid (const char *marker_id)
785 {
786 target_ops *target = current_inferior ()->top_target ();
787
788 return target->static_tracepoint_markers_by_strid (marker_id);
789 }
790
791 traceframe_info_up
792 target_traceframe_info ()
793 {
794 return current_inferior ()->top_target ()->traceframe_info ();
795 }
796
797 bool
798 target_use_agent (bool use)
799 {
800 return current_inferior ()->top_target ()->use_agent (use);
801 }
802
803 bool
804 target_can_use_agent ()
805 {
806 return current_inferior ()->top_target ()->can_use_agent ();
807 }
808
809 bool
810 target_augmented_libraries_svr4_read ()
811 {
812 return current_inferior ()->top_target ()->augmented_libraries_svr4_read ();
813 }
814
815 bool
816 target_supports_memory_tagging ()
817 {
818 return current_inferior ()->top_target ()->supports_memory_tagging ();
819 }
820
821 bool
822 target_fetch_memtags (CORE_ADDR address, size_t len, gdb::byte_vector &tags,
823 int type)
824 {
825 return current_inferior ()->top_target ()->fetch_memtags (address, len, tags, type);
826 }
827
828 bool
829 target_store_memtags (CORE_ADDR address, size_t len,
830 const gdb::byte_vector &tags, int type)
831 {
832 return current_inferior ()->top_target ()->store_memtags (address, len, tags, type);
833 }
834
835 void
836 target_log_command (const char *p)
837 {
838 return current_inferior ()->top_target ()->log_command (p);
839 }
840
841 /* This is used to implement the various target commands. */
842
843 static void
844 open_target (const char *args, int from_tty, struct cmd_list_element *command)
845 {
846 auto *ti = static_cast<target_info *> (command->context ());
847 target_open_ftype *func = target_factories[ti];
848
849 if (targetdebug)
850 fprintf_unfiltered (gdb_stdlog, "-> %s->open (...)\n",
851 ti->shortname);
852
853 func (args, from_tty);
854
855 if (targetdebug)
856 fprintf_unfiltered (gdb_stdlog, "<- %s->open (%s, %d)\n",
857 ti->shortname, args, from_tty);
858 }
859
860 /* See target.h. */
861
862 void
863 add_target (const target_info &t, target_open_ftype *func,
864 completer_ftype *completer)
865 {
866 struct cmd_list_element *c;
867
868 auto &func_slot = target_factories[&t];
869 if (func_slot != nullptr)
870 internal_error (__FILE__, __LINE__,
871 _("target already added (\"%s\")."), t.shortname);
872 func_slot = func;
873
874 if (targetlist == NULL)
875 add_basic_prefix_cmd ("target", class_run, _("\
876 Connect to a target machine or process.\n\
877 The first argument is the type or protocol of the target machine.\n\
878 Remaining arguments are interpreted by the target protocol. For more\n\
879 information on the arguments for a particular protocol, type\n\
880 `help target ' followed by the protocol name."),
881 &targetlist, 0, &cmdlist);
882 c = add_cmd (t.shortname, no_class, t.doc, &targetlist);
883 c->set_context ((void *) &t);
884 c->func = open_target;
885 if (completer != NULL)
886 set_cmd_completer (c, completer);
887 }
888
889 /* See target.h. */
890
891 void
892 add_deprecated_target_alias (const target_info &tinfo, const char *alias)
893 {
894 struct cmd_list_element *c;
895
896 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
897 see PR cli/15104. */
898 c = add_cmd (alias, no_class, tinfo.doc, &targetlist);
899 c->func = open_target;
900 c->set_context ((void *) &tinfo);
901 gdb::unique_xmalloc_ptr<char> alt
902 = xstrprintf ("target %s", tinfo.shortname);
903 deprecate_cmd (c, alt.release ());
904 }
905
906 /* Stub functions */
907
908 void
909 target_kill (void)
910 {
911 current_inferior ()->top_target ()->kill ();
912 }
913
914 void
915 target_load (const char *arg, int from_tty)
916 {
917 target_dcache_invalidate ();
918 current_inferior ()->top_target ()->load (arg, from_tty);
919 }
920
921 /* Define it. */
922
923 target_terminal_state target_terminal::m_terminal_state
924 = target_terminal_state::is_ours;
925
926 /* See target/target.h. */
927
928 void
929 target_terminal::init (void)
930 {
931 current_inferior ()->top_target ()->terminal_init ();
932
933 m_terminal_state = target_terminal_state::is_ours;
934 }
935
936 /* See target/target.h. */
937
938 void
939 target_terminal::inferior (void)
940 {
941 struct ui *ui = current_ui;
942
943 /* A background resume (``run&'') should leave GDB in control of the
944 terminal. */
945 if (ui->prompt_state != PROMPT_BLOCKED)
946 return;
947
948 /* Since we always run the inferior in the main console (unless "set
949 inferior-tty" is in effect), when some UI other than the main one
950 calls target_terminal::inferior, then we leave the main UI's
951 terminal settings as is. */
952 if (ui != main_ui)
953 return;
954
955 /* If GDB is resuming the inferior in the foreground, install
956 inferior's terminal modes. */
957
958 struct inferior *inf = current_inferior ();
959
960 if (inf->terminal_state != target_terminal_state::is_inferior)
961 {
962 current_inferior ()->top_target ()->terminal_inferior ();
963 inf->terminal_state = target_terminal_state::is_inferior;
964 }
965
966 m_terminal_state = target_terminal_state::is_inferior;
967
968 /* If the user hit C-c before, pretend that it was hit right
969 here. */
970 if (check_quit_flag ())
971 target_pass_ctrlc ();
972 }
973
974 /* See target/target.h. */
975
976 void
977 target_terminal::restore_inferior (void)
978 {
979 struct ui *ui = current_ui;
980
981 /* See target_terminal::inferior(). */
982 if (ui->prompt_state != PROMPT_BLOCKED || ui != main_ui)
983 return;
984
985 /* Restore the terminal settings of inferiors that were in the
986 foreground but are now ours_for_output due to a temporary
987 target_target::ours_for_output() call. */
988
989 {
990 scoped_restore_current_inferior restore_inferior;
991
992 for (::inferior *inf : all_inferiors ())
993 {
994 if (inf->terminal_state == target_terminal_state::is_ours_for_output)
995 {
996 set_current_inferior (inf);
997 current_inferior ()->top_target ()->terminal_inferior ();
998 inf->terminal_state = target_terminal_state::is_inferior;
999 }
1000 }
1001 }
1002
1003 m_terminal_state = target_terminal_state::is_inferior;
1004
1005 /* If the user hit C-c before, pretend that it was hit right
1006 here. */
1007 if (check_quit_flag ())
1008 target_pass_ctrlc ();
1009 }
1010
1011 /* Switch terminal state to DESIRED_STATE, either is_ours, or
1012 is_ours_for_output. */
1013
1014 static void
1015 target_terminal_is_ours_kind (target_terminal_state desired_state)
1016 {
1017 scoped_restore_current_inferior restore_inferior;
1018
1019 /* Must do this in two passes. First, have all inferiors save the
1020 current terminal settings. Then, after all inferiors have add a
1021 chance to safely save the terminal settings, restore GDB's
1022 terminal settings. */
1023
1024 for (inferior *inf : all_inferiors ())
1025 {
1026 if (inf->terminal_state == target_terminal_state::is_inferior)
1027 {
1028 set_current_inferior (inf);
1029 current_inferior ()->top_target ()->terminal_save_inferior ();
1030 }
1031 }
1032
1033 for (inferior *inf : all_inferiors ())
1034 {
1035 /* Note we don't check is_inferior here like above because we
1036 need to handle 'is_ours_for_output -> is_ours' too. Careful
1037 to never transition from 'is_ours' to 'is_ours_for_output',
1038 though. */
1039 if (inf->terminal_state != target_terminal_state::is_ours
1040 && inf->terminal_state != desired_state)
1041 {
1042 set_current_inferior (inf);
1043 if (desired_state == target_terminal_state::is_ours)
1044 current_inferior ()->top_target ()->terminal_ours ();
1045 else if (desired_state == target_terminal_state::is_ours_for_output)
1046 current_inferior ()->top_target ()->terminal_ours_for_output ();
1047 else
1048 gdb_assert_not_reached ("unhandled desired state");
1049 inf->terminal_state = desired_state;
1050 }
1051 }
1052 }
1053
1054 /* See target/target.h. */
1055
1056 void
1057 target_terminal::ours ()
1058 {
1059 struct ui *ui = current_ui;
1060
1061 /* See target_terminal::inferior. */
1062 if (ui != main_ui)
1063 return;
1064
1065 if (m_terminal_state == target_terminal_state::is_ours)
1066 return;
1067
1068 target_terminal_is_ours_kind (target_terminal_state::is_ours);
1069 m_terminal_state = target_terminal_state::is_ours;
1070 }
1071
1072 /* See target/target.h. */
1073
1074 void
1075 target_terminal::ours_for_output ()
1076 {
1077 struct ui *ui = current_ui;
1078
1079 /* See target_terminal::inferior. */
1080 if (ui != main_ui)
1081 return;
1082
1083 if (!target_terminal::is_inferior ())
1084 return;
1085
1086 target_terminal_is_ours_kind (target_terminal_state::is_ours_for_output);
1087 target_terminal::m_terminal_state = target_terminal_state::is_ours_for_output;
1088 }
1089
1090 /* See target/target.h. */
1091
1092 void
1093 target_terminal::info (const char *arg, int from_tty)
1094 {
1095 current_inferior ()->top_target ()->terminal_info (arg, from_tty);
1096 }
1097
1098 /* See target.h. */
1099
1100 bool
1101 target_supports_terminal_ours (void)
1102 {
1103 /* The current top target is the target at the top of the target
1104 stack of the current inferior. While normally there's always an
1105 inferior, we must check for nullptr here because we can get here
1106 very early during startup, before the initial inferior is first
1107 created. */
1108 inferior *inf = current_inferior ();
1109
1110 if (inf == nullptr)
1111 return false;
1112 return inf->top_target ()->supports_terminal_ours ();
1113 }
1114
1115 static void
1116 tcomplain (void)
1117 {
1118 error (_("You can't do that when your target is `%s'"),
1119 current_inferior ()->top_target ()->shortname ());
1120 }
1121
1122 void
1123 noprocess (void)
1124 {
1125 error (_("You can't do that without a process to debug."));
1126 }
1127
1128 static void
1129 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
1130 {
1131 printf_filtered (_("No saved terminal information.\n"));
1132 }
1133
1134 /* A default implementation for the to_get_ada_task_ptid target method.
1135
1136 This function builds the PTID by using both LWP and TID as part of
1137 the PTID lwp and tid elements. The pid used is the pid of the
1138 inferior_ptid. */
1139
1140 static ptid_t
1141 default_get_ada_task_ptid (struct target_ops *self, long lwp, ULONGEST tid)
1142 {
1143 return ptid_t (inferior_ptid.pid (), lwp, tid);
1144 }
1145
1146 static enum exec_direction_kind
1147 default_execution_direction (struct target_ops *self)
1148 {
1149 if (!target_can_execute_reverse ())
1150 return EXEC_FORWARD;
1151 else if (!target_can_async_p ())
1152 return EXEC_FORWARD;
1153 else
1154 gdb_assert_not_reached ("\
1155 to_execution_direction must be implemented for reverse async");
1156 }
1157
1158 /* See target.h. */
1159
1160 void
1161 decref_target (target_ops *t)
1162 {
1163 t->decref ();
1164 if (t->refcount () == 0)
1165 {
1166 if (t->stratum () == process_stratum)
1167 connection_list_remove (as_process_stratum_target (t));
1168 target_close (t);
1169 }
1170 }
1171
1172 /* See target.h. */
1173
1174 void
1175 target_stack::push (target_ops *t)
1176 {
1177 t->incref ();
1178
1179 strata stratum = t->stratum ();
1180
1181 if (stratum == process_stratum)
1182 connection_list_add (as_process_stratum_target (t));
1183
1184 /* If there's already a target at this stratum, remove it. */
1185
1186 if (m_stack[stratum] != NULL)
1187 unpush (m_stack[stratum]);
1188
1189 /* Now add the new one. */
1190 m_stack[stratum] = t;
1191
1192 if (m_top < stratum)
1193 m_top = stratum;
1194 }
1195
1196 /* See target.h. */
1197
1198 bool
1199 target_stack::unpush (target_ops *t)
1200 {
1201 gdb_assert (t != NULL);
1202
1203 strata stratum = t->stratum ();
1204
1205 if (stratum == dummy_stratum)
1206 internal_error (__FILE__, __LINE__,
1207 _("Attempt to unpush the dummy target"));
1208
1209 /* Look for the specified target. Note that a target can only occur
1210 once in the target stack. */
1211
1212 if (m_stack[stratum] != t)
1213 {
1214 /* If T wasn't pushed, quit. Only open targets should be
1215 closed. */
1216 return false;
1217 }
1218
1219 /* Unchain the target. */
1220 m_stack[stratum] = NULL;
1221
1222 if (m_top == stratum)
1223 m_top = this->find_beneath (t)->stratum ();
1224
1225 /* Finally close the target, if there are no inferiors
1226 referencing this target still. Note we do this after unchaining,
1227 so any target method calls from within the target_close
1228 implementation don't end up in T anymore. Do leave the target
1229 open if we have are other inferiors referencing this target
1230 still. */
1231 decref_target (t);
1232
1233 return true;
1234 }
1235
1236 /* Unpush TARGET and assert that it worked. */
1237
1238 static void
1239 unpush_target_and_assert (struct target_ops *target)
1240 {
1241 if (!current_inferior ()->unpush_target (target))
1242 {
1243 fprintf_unfiltered (gdb_stderr,
1244 "pop_all_targets couldn't find target %s\n",
1245 target->shortname ());
1246 internal_error (__FILE__, __LINE__,
1247 _("failed internal consistency check"));
1248 }
1249 }
1250
1251 void
1252 pop_all_targets_above (enum strata above_stratum)
1253 {
1254 while ((int) (current_inferior ()->top_target ()->stratum ())
1255 > (int) above_stratum)
1256 unpush_target_and_assert (current_inferior ()->top_target ());
1257 }
1258
1259 /* See target.h. */
1260
1261 void
1262 pop_all_targets_at_and_above (enum strata stratum)
1263 {
1264 while ((int) (current_inferior ()->top_target ()->stratum ())
1265 >= (int) stratum)
1266 unpush_target_and_assert (current_inferior ()->top_target ());
1267 }
1268
1269 void
1270 pop_all_targets (void)
1271 {
1272 pop_all_targets_above (dummy_stratum);
1273 }
1274
1275 void
1276 target_unpusher::operator() (struct target_ops *ops) const
1277 {
1278 current_inferior ()->unpush_target (ops);
1279 }
1280
1281 /* Default implementation of to_get_thread_local_address. */
1282
1283 static void
1284 generic_tls_error (void)
1285 {
1286 throw_error (TLS_GENERIC_ERROR,
1287 _("Cannot find thread-local variables on this target"));
1288 }
1289
1290 /* Using the objfile specified in OBJFILE, find the address for the
1291 current thread's thread-local storage with offset OFFSET. */
1292 CORE_ADDR
1293 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1294 {
1295 volatile CORE_ADDR addr = 0;
1296 struct target_ops *target = current_inferior ()->top_target ();
1297 struct gdbarch *gdbarch = target_gdbarch ();
1298
1299 if (gdbarch_fetch_tls_load_module_address_p (gdbarch))
1300 {
1301 ptid_t ptid = inferior_ptid;
1302
1303 try
1304 {
1305 CORE_ADDR lm_addr;
1306
1307 /* Fetch the load module address for this objfile. */
1308 lm_addr = gdbarch_fetch_tls_load_module_address (gdbarch,
1309 objfile);
1310
1311 if (gdbarch_get_thread_local_address_p (gdbarch))
1312 addr = gdbarch_get_thread_local_address (gdbarch, ptid, lm_addr,
1313 offset);
1314 else
1315 addr = target->get_thread_local_address (ptid, lm_addr, offset);
1316 }
1317 /* If an error occurred, print TLS related messages here. Otherwise,
1318 throw the error to some higher catcher. */
1319 catch (const gdb_exception &ex)
1320 {
1321 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1322
1323 switch (ex.error)
1324 {
1325 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1326 error (_("Cannot find thread-local variables "
1327 "in this thread library."));
1328 break;
1329 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1330 if (objfile_is_library)
1331 error (_("Cannot find shared library `%s' in dynamic"
1332 " linker's load module list"), objfile_name (objfile));
1333 else
1334 error (_("Cannot find executable file `%s' in dynamic"
1335 " linker's load module list"), objfile_name (objfile));
1336 break;
1337 case TLS_NOT_ALLOCATED_YET_ERROR:
1338 if (objfile_is_library)
1339 error (_("The inferior has not yet allocated storage for"
1340 " thread-local variables in\n"
1341 "the shared library `%s'\n"
1342 "for %s"),
1343 objfile_name (objfile),
1344 target_pid_to_str (ptid).c_str ());
1345 else
1346 error (_("The inferior has not yet allocated storage for"
1347 " thread-local variables in\n"
1348 "the executable `%s'\n"
1349 "for %s"),
1350 objfile_name (objfile),
1351 target_pid_to_str (ptid).c_str ());
1352 break;
1353 case TLS_GENERIC_ERROR:
1354 if (objfile_is_library)
1355 error (_("Cannot find thread-local storage for %s, "
1356 "shared library %s:\n%s"),
1357 target_pid_to_str (ptid).c_str (),
1358 objfile_name (objfile), ex.what ());
1359 else
1360 error (_("Cannot find thread-local storage for %s, "
1361 "executable file %s:\n%s"),
1362 target_pid_to_str (ptid).c_str (),
1363 objfile_name (objfile), ex.what ());
1364 break;
1365 default:
1366 throw;
1367 break;
1368 }
1369 }
1370 }
1371 else
1372 error (_("Cannot find thread-local variables on this target"));
1373
1374 return addr;
1375 }
1376
1377 const char *
1378 target_xfer_status_to_string (enum target_xfer_status status)
1379 {
1380 #define CASE(X) case X: return #X
1381 switch (status)
1382 {
1383 CASE(TARGET_XFER_E_IO);
1384 CASE(TARGET_XFER_UNAVAILABLE);
1385 default:
1386 return "<unknown>";
1387 }
1388 #undef CASE
1389 };
1390
1391
1392 /* See target.h. */
1393
1394 gdb::unique_xmalloc_ptr<char>
1395 target_read_string (CORE_ADDR memaddr, int len, int *bytes_read)
1396 {
1397 gdb::unique_xmalloc_ptr<gdb_byte> buffer;
1398
1399 int ignore;
1400 if (bytes_read == nullptr)
1401 bytes_read = &ignore;
1402
1403 /* Note that the endian-ness does not matter here. */
1404 int errcode = read_string (memaddr, -1, 1, len, BFD_ENDIAN_LITTLE,
1405 &buffer, bytes_read);
1406 if (errcode != 0)
1407 return {};
1408
1409 return gdb::unique_xmalloc_ptr<char> ((char *) buffer.release ());
1410 }
1411
1412 const target_section_table *
1413 target_get_section_table (struct target_ops *target)
1414 {
1415 return target->get_section_table ();
1416 }
1417
1418 /* Find a section containing ADDR. */
1419
1420 const struct target_section *
1421 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1422 {
1423 const target_section_table *table = target_get_section_table (target);
1424
1425 if (table == NULL)
1426 return NULL;
1427
1428 for (const target_section &secp : *table)
1429 {
1430 if (addr >= secp.addr && addr < secp.endaddr)
1431 return &secp;
1432 }
1433 return NULL;
1434 }
1435
1436 /* See target.h. */
1437
1438 const target_section_table *
1439 default_get_section_table ()
1440 {
1441 return &current_program_space->target_sections ();
1442 }
1443
1444 /* Helper for the memory xfer routines. Checks the attributes of the
1445 memory region of MEMADDR against the read or write being attempted.
1446 If the access is permitted returns true, otherwise returns false.
1447 REGION_P is an optional output parameter. If not-NULL, it is
1448 filled with a pointer to the memory region of MEMADDR. REG_LEN
1449 returns LEN trimmed to the end of the region. This is how much the
1450 caller can continue requesting, if the access is permitted. A
1451 single xfer request must not straddle memory region boundaries. */
1452
1453 static int
1454 memory_xfer_check_region (gdb_byte *readbuf, const gdb_byte *writebuf,
1455 ULONGEST memaddr, ULONGEST len, ULONGEST *reg_len,
1456 struct mem_region **region_p)
1457 {
1458 struct mem_region *region;
1459
1460 region = lookup_mem_region (memaddr);
1461
1462 if (region_p != NULL)
1463 *region_p = region;
1464
1465 switch (region->attrib.mode)
1466 {
1467 case MEM_RO:
1468 if (writebuf != NULL)
1469 return 0;
1470 break;
1471
1472 case MEM_WO:
1473 if (readbuf != NULL)
1474 return 0;
1475 break;
1476
1477 case MEM_FLASH:
1478 /* We only support writing to flash during "load" for now. */
1479 if (writebuf != NULL)
1480 error (_("Writing to flash memory forbidden in this context"));
1481 break;
1482
1483 case MEM_NONE:
1484 return 0;
1485 }
1486
1487 /* region->hi == 0 means there's no upper bound. */
1488 if (memaddr + len < region->hi || region->hi == 0)
1489 *reg_len = len;
1490 else
1491 *reg_len = region->hi - memaddr;
1492
1493 return 1;
1494 }
1495
1496 /* Read memory from more than one valid target. A core file, for
1497 instance, could have some of memory but delegate other bits to
1498 the target below it. So, we must manually try all targets. */
1499
1500 enum target_xfer_status
1501 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1502 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1503 ULONGEST *xfered_len)
1504 {
1505 enum target_xfer_status res;
1506
1507 do
1508 {
1509 res = ops->xfer_partial (TARGET_OBJECT_MEMORY, NULL,
1510 readbuf, writebuf, memaddr, len,
1511 xfered_len);
1512 if (res == TARGET_XFER_OK)
1513 break;
1514
1515 /* Stop if the target reports that the memory is not available. */
1516 if (res == TARGET_XFER_UNAVAILABLE)
1517 break;
1518
1519 /* Don't continue past targets which have all the memory.
1520 At one time, this code was necessary to read data from
1521 executables / shared libraries when data for the requested
1522 addresses weren't available in the core file. But now the
1523 core target handles this case itself. */
1524 if (ops->has_all_memory ())
1525 break;
1526
1527 ops = ops->beneath ();
1528 }
1529 while (ops != NULL);
1530
1531 /* The cache works at the raw memory level. Make sure the cache
1532 gets updated with raw contents no matter what kind of memory
1533 object was originally being written. Note we do write-through
1534 first, so that if it fails, we don't write to the cache contents
1535 that never made it to the target. */
1536 if (writebuf != NULL
1537 && inferior_ptid != null_ptid
1538 && target_dcache_init_p ()
1539 && (stack_cache_enabled_p () || code_cache_enabled_p ()))
1540 {
1541 DCACHE *dcache = target_dcache_get ();
1542
1543 /* Note that writing to an area of memory which wasn't present
1544 in the cache doesn't cause it to be loaded in. */
1545 dcache_update (dcache, res, memaddr, writebuf, *xfered_len);
1546 }
1547
1548 return res;
1549 }
1550
1551 /* Perform a partial memory transfer.
1552 For docs see target.h, to_xfer_partial. */
1553
1554 static enum target_xfer_status
1555 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1556 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1557 ULONGEST len, ULONGEST *xfered_len)
1558 {
1559 enum target_xfer_status res;
1560 ULONGEST reg_len;
1561 struct mem_region *region;
1562 struct inferior *inf;
1563
1564 /* For accesses to unmapped overlay sections, read directly from
1565 files. Must do this first, as MEMADDR may need adjustment. */
1566 if (readbuf != NULL && overlay_debugging)
1567 {
1568 struct obj_section *section = find_pc_overlay (memaddr);
1569
1570 if (pc_in_unmapped_range (memaddr, section))
1571 {
1572 const target_section_table *table = target_get_section_table (ops);
1573 const char *section_name = section->the_bfd_section->name;
1574
1575 memaddr = overlay_mapped_address (memaddr, section);
1576
1577 auto match_cb = [=] (const struct target_section *s)
1578 {
1579 return (strcmp (section_name, s->the_bfd_section->name) == 0);
1580 };
1581
1582 return section_table_xfer_memory_partial (readbuf, writebuf,
1583 memaddr, len, xfered_len,
1584 *table, match_cb);
1585 }
1586 }
1587
1588 /* Try the executable files, if "trust-readonly-sections" is set. */
1589 if (readbuf != NULL && trust_readonly)
1590 {
1591 const struct target_section *secp
1592 = target_section_by_addr (ops, memaddr);
1593 if (secp != NULL
1594 && (bfd_section_flags (secp->the_bfd_section) & SEC_READONLY))
1595 {
1596 const target_section_table *table = target_get_section_table (ops);
1597 return section_table_xfer_memory_partial (readbuf, writebuf,
1598 memaddr, len, xfered_len,
1599 *table);
1600 }
1601 }
1602
1603 /* Try GDB's internal data cache. */
1604
1605 if (!memory_xfer_check_region (readbuf, writebuf, memaddr, len, &reg_len,
1606 &region))
1607 return TARGET_XFER_E_IO;
1608
1609 if (inferior_ptid != null_ptid)
1610 inf = current_inferior ();
1611 else
1612 inf = NULL;
1613
1614 if (inf != NULL
1615 && readbuf != NULL
1616 /* The dcache reads whole cache lines; that doesn't play well
1617 with reading from a trace buffer, because reading outside of
1618 the collected memory range fails. */
1619 && get_traceframe_number () == -1
1620 && (region->attrib.cache
1621 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1622 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1623 {
1624 DCACHE *dcache = target_dcache_get_or_init ();
1625
1626 return dcache_read_memory_partial (ops, dcache, memaddr, readbuf,
1627 reg_len, xfered_len);
1628 }
1629
1630 /* If none of those methods found the memory we wanted, fall back
1631 to a target partial transfer. Normally a single call to
1632 to_xfer_partial is enough; if it doesn't recognize an object
1633 it will call the to_xfer_partial of the next target down.
1634 But for memory this won't do. Memory is the only target
1635 object which can be read from more than one valid target.
1636 A core file, for instance, could have some of memory but
1637 delegate other bits to the target below it. So, we must
1638 manually try all targets. */
1639
1640 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1641 xfered_len);
1642
1643 /* If we still haven't got anything, return the last error. We
1644 give up. */
1645 return res;
1646 }
1647
1648 /* Perform a partial memory transfer. For docs see target.h,
1649 to_xfer_partial. */
1650
1651 static enum target_xfer_status
1652 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1653 gdb_byte *readbuf, const gdb_byte *writebuf,
1654 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1655 {
1656 enum target_xfer_status res;
1657
1658 /* Zero length requests are ok and require no work. */
1659 if (len == 0)
1660 return TARGET_XFER_EOF;
1661
1662 memaddr = address_significant (target_gdbarch (), memaddr);
1663
1664 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1665 breakpoint insns, thus hiding out from higher layers whether
1666 there are software breakpoints inserted in the code stream. */
1667 if (readbuf != NULL)
1668 {
1669 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1670 xfered_len);
1671
1672 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1673 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, *xfered_len);
1674 }
1675 else
1676 {
1677 /* A large write request is likely to be partially satisfied
1678 by memory_xfer_partial_1. We will continually malloc
1679 and free a copy of the entire write request for breakpoint
1680 shadow handling even though we only end up writing a small
1681 subset of it. Cap writes to a limit specified by the target
1682 to mitigate this. */
1683 len = std::min (ops->get_memory_xfer_limit (), len);
1684
1685 gdb::byte_vector buf (writebuf, writebuf + len);
1686 breakpoint_xfer_memory (NULL, buf.data (), writebuf, memaddr, len);
1687 res = memory_xfer_partial_1 (ops, object, NULL, buf.data (), memaddr, len,
1688 xfered_len);
1689 }
1690
1691 return res;
1692 }
1693
1694 scoped_restore_tmpl<int>
1695 make_scoped_restore_show_memory_breakpoints (int show)
1696 {
1697 return make_scoped_restore (&show_memory_breakpoints, show);
1698 }
1699
1700 /* For docs see target.h, to_xfer_partial. */
1701
1702 enum target_xfer_status
1703 target_xfer_partial (struct target_ops *ops,
1704 enum target_object object, const char *annex,
1705 gdb_byte *readbuf, const gdb_byte *writebuf,
1706 ULONGEST offset, ULONGEST len,
1707 ULONGEST *xfered_len)
1708 {
1709 enum target_xfer_status retval;
1710
1711 /* Transfer is done when LEN is zero. */
1712 if (len == 0)
1713 return TARGET_XFER_EOF;
1714
1715 if (writebuf && !may_write_memory)
1716 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1717 core_addr_to_string_nz (offset), plongest (len));
1718
1719 *xfered_len = 0;
1720
1721 /* If this is a memory transfer, let the memory-specific code
1722 have a look at it instead. Memory transfers are more
1723 complicated. */
1724 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1725 || object == TARGET_OBJECT_CODE_MEMORY)
1726 retval = memory_xfer_partial (ops, object, readbuf,
1727 writebuf, offset, len, xfered_len);
1728 else if (object == TARGET_OBJECT_RAW_MEMORY)
1729 {
1730 /* Skip/avoid accessing the target if the memory region
1731 attributes block the access. Check this here instead of in
1732 raw_memory_xfer_partial as otherwise we'd end up checking
1733 this twice in the case of the memory_xfer_partial path is
1734 taken; once before checking the dcache, and another in the
1735 tail call to raw_memory_xfer_partial. */
1736 if (!memory_xfer_check_region (readbuf, writebuf, offset, len, &len,
1737 NULL))
1738 return TARGET_XFER_E_IO;
1739
1740 /* Request the normal memory object from other layers. */
1741 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1742 xfered_len);
1743 }
1744 else
1745 retval = ops->xfer_partial (object, annex, readbuf,
1746 writebuf, offset, len, xfered_len);
1747
1748 if (targetdebug)
1749 {
1750 const unsigned char *myaddr = NULL;
1751
1752 fprintf_unfiltered (gdb_stdlog,
1753 "%s:target_xfer_partial "
1754 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1755 ops->shortname (),
1756 (int) object,
1757 (annex ? annex : "(null)"),
1758 host_address_to_string (readbuf),
1759 host_address_to_string (writebuf),
1760 core_addr_to_string_nz (offset),
1761 pulongest (len), retval,
1762 pulongest (*xfered_len));
1763
1764 if (readbuf)
1765 myaddr = readbuf;
1766 if (writebuf)
1767 myaddr = writebuf;
1768 if (retval == TARGET_XFER_OK && myaddr != NULL)
1769 {
1770 int i;
1771
1772 fputs_unfiltered (", bytes =", gdb_stdlog);
1773 for (i = 0; i < *xfered_len; i++)
1774 {
1775 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1776 {
1777 if (targetdebug < 2 && i > 0)
1778 {
1779 fprintf_unfiltered (gdb_stdlog, " ...");
1780 break;
1781 }
1782 fprintf_unfiltered (gdb_stdlog, "\n");
1783 }
1784
1785 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1786 }
1787 }
1788
1789 fputc_unfiltered ('\n', gdb_stdlog);
1790 }
1791
1792 /* Check implementations of to_xfer_partial update *XFERED_LEN
1793 properly. Do assertion after printing debug messages, so that we
1794 can find more clues on assertion failure from debugging messages. */
1795 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_UNAVAILABLE)
1796 gdb_assert (*xfered_len > 0);
1797
1798 return retval;
1799 }
1800
1801 /* Read LEN bytes of target memory at address MEMADDR, placing the
1802 results in GDB's memory at MYADDR. Returns either 0 for success or
1803 -1 if any error occurs.
1804
1805 If an error occurs, no guarantee is made about the contents of the data at
1806 MYADDR. In particular, the caller should not depend upon partial reads
1807 filling the buffer with good data. There is no way for the caller to know
1808 how much good data might have been transfered anyway. Callers that can
1809 deal with partial reads should call target_read (which will retry until
1810 it makes no progress, and then return how much was transferred). */
1811
1812 int
1813 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1814 {
1815 if (target_read (current_inferior ()->top_target (),
1816 TARGET_OBJECT_MEMORY, NULL,
1817 myaddr, memaddr, len) == len)
1818 return 0;
1819 else
1820 return -1;
1821 }
1822
1823 /* See target/target.h. */
1824
1825 int
1826 target_read_uint32 (CORE_ADDR memaddr, uint32_t *result)
1827 {
1828 gdb_byte buf[4];
1829 int r;
1830
1831 r = target_read_memory (memaddr, buf, sizeof buf);
1832 if (r != 0)
1833 return r;
1834 *result = extract_unsigned_integer (buf, sizeof buf,
1835 gdbarch_byte_order (target_gdbarch ()));
1836 return 0;
1837 }
1838
1839 /* Like target_read_memory, but specify explicitly that this is a read
1840 from the target's raw memory. That is, this read bypasses the
1841 dcache, breakpoint shadowing, etc. */
1842
1843 int
1844 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1845 {
1846 if (target_read (current_inferior ()->top_target (),
1847 TARGET_OBJECT_RAW_MEMORY, NULL,
1848 myaddr, memaddr, len) == len)
1849 return 0;
1850 else
1851 return -1;
1852 }
1853
1854 /* Like target_read_memory, but specify explicitly that this is a read from
1855 the target's stack. This may trigger different cache behavior. */
1856
1857 int
1858 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1859 {
1860 if (target_read (current_inferior ()->top_target (),
1861 TARGET_OBJECT_STACK_MEMORY, NULL,
1862 myaddr, memaddr, len) == len)
1863 return 0;
1864 else
1865 return -1;
1866 }
1867
1868 /* Like target_read_memory, but specify explicitly that this is a read from
1869 the target's code. This may trigger different cache behavior. */
1870
1871 int
1872 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1873 {
1874 if (target_read (current_inferior ()->top_target (),
1875 TARGET_OBJECT_CODE_MEMORY, NULL,
1876 myaddr, memaddr, len) == len)
1877 return 0;
1878 else
1879 return -1;
1880 }
1881
1882 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1883 Returns either 0 for success or -1 if any error occurs. If an
1884 error occurs, no guarantee is made about how much data got written.
1885 Callers that can deal with partial writes should call
1886 target_write. */
1887
1888 int
1889 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1890 {
1891 if (target_write (current_inferior ()->top_target (),
1892 TARGET_OBJECT_MEMORY, NULL,
1893 myaddr, memaddr, len) == len)
1894 return 0;
1895 else
1896 return -1;
1897 }
1898
1899 /* Write LEN bytes from MYADDR to target raw memory at address
1900 MEMADDR. Returns either 0 for success or -1 if any error occurs.
1901 If an error occurs, no guarantee is made about how much data got
1902 written. Callers that can deal with partial writes should call
1903 target_write. */
1904
1905 int
1906 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1907 {
1908 if (target_write (current_inferior ()->top_target (),
1909 TARGET_OBJECT_RAW_MEMORY, NULL,
1910 myaddr, memaddr, len) == len)
1911 return 0;
1912 else
1913 return -1;
1914 }
1915
1916 /* Fetch the target's memory map. */
1917
1918 std::vector<mem_region>
1919 target_memory_map (void)
1920 {
1921 target_ops *target = current_inferior ()->top_target ();
1922 std::vector<mem_region> result = target->memory_map ();
1923 if (result.empty ())
1924 return result;
1925
1926 std::sort (result.begin (), result.end ());
1927
1928 /* Check that regions do not overlap. Simultaneously assign
1929 a numbering for the "mem" commands to use to refer to
1930 each region. */
1931 mem_region *last_one = NULL;
1932 for (size_t ix = 0; ix < result.size (); ix++)
1933 {
1934 mem_region *this_one = &result[ix];
1935 this_one->number = ix;
1936
1937 if (last_one != NULL && last_one->hi > this_one->lo)
1938 {
1939 warning (_("Overlapping regions in memory map: ignoring"));
1940 return std::vector<mem_region> ();
1941 }
1942
1943 last_one = this_one;
1944 }
1945
1946 return result;
1947 }
1948
1949 void
1950 target_flash_erase (ULONGEST address, LONGEST length)
1951 {
1952 current_inferior ()->top_target ()->flash_erase (address, length);
1953 }
1954
1955 void
1956 target_flash_done (void)
1957 {
1958 current_inferior ()->top_target ()->flash_done ();
1959 }
1960
1961 static void
1962 show_trust_readonly (struct ui_file *file, int from_tty,
1963 struct cmd_list_element *c, const char *value)
1964 {
1965 fprintf_filtered (file,
1966 _("Mode for reading from readonly sections is %s.\n"),
1967 value);
1968 }
1969
1970 /* Target vector read/write partial wrapper functions. */
1971
1972 static enum target_xfer_status
1973 target_read_partial (struct target_ops *ops,
1974 enum target_object object,
1975 const char *annex, gdb_byte *buf,
1976 ULONGEST offset, ULONGEST len,
1977 ULONGEST *xfered_len)
1978 {
1979 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1980 xfered_len);
1981 }
1982
1983 static enum target_xfer_status
1984 target_write_partial (struct target_ops *ops,
1985 enum target_object object,
1986 const char *annex, const gdb_byte *buf,
1987 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1988 {
1989 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1990 xfered_len);
1991 }
1992
1993 /* Wrappers to perform the full transfer. */
1994
1995 /* For docs on target_read see target.h. */
1996
1997 LONGEST
1998 target_read (struct target_ops *ops,
1999 enum target_object object,
2000 const char *annex, gdb_byte *buf,
2001 ULONGEST offset, LONGEST len)
2002 {
2003 LONGEST xfered_total = 0;
2004 int unit_size = 1;
2005
2006 /* If we are reading from a memory object, find the length of an addressable
2007 unit for that architecture. */
2008 if (object == TARGET_OBJECT_MEMORY
2009 || object == TARGET_OBJECT_STACK_MEMORY
2010 || object == TARGET_OBJECT_CODE_MEMORY
2011 || object == TARGET_OBJECT_RAW_MEMORY)
2012 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2013
2014 while (xfered_total < len)
2015 {
2016 ULONGEST xfered_partial;
2017 enum target_xfer_status status;
2018
2019 status = target_read_partial (ops, object, annex,
2020 buf + xfered_total * unit_size,
2021 offset + xfered_total, len - xfered_total,
2022 &xfered_partial);
2023
2024 /* Call an observer, notifying them of the xfer progress? */
2025 if (status == TARGET_XFER_EOF)
2026 return xfered_total;
2027 else if (status == TARGET_XFER_OK)
2028 {
2029 xfered_total += xfered_partial;
2030 QUIT;
2031 }
2032 else
2033 return TARGET_XFER_E_IO;
2034
2035 }
2036 return len;
2037 }
2038
2039 /* Assuming that the entire [begin, end) range of memory cannot be
2040 read, try to read whatever subrange is possible to read.
2041
2042 The function returns, in RESULT, either zero or one memory block.
2043 If there's a readable subrange at the beginning, it is completely
2044 read and returned. Any further readable subrange will not be read.
2045 Otherwise, if there's a readable subrange at the end, it will be
2046 completely read and returned. Any readable subranges before it
2047 (obviously, not starting at the beginning), will be ignored. In
2048 other cases -- either no readable subrange, or readable subrange(s)
2049 that is neither at the beginning, or end, nothing is returned.
2050
2051 The purpose of this function is to handle a read across a boundary
2052 of accessible memory in a case when memory map is not available.
2053 The above restrictions are fine for this case, but will give
2054 incorrect results if the memory is 'patchy'. However, supporting
2055 'patchy' memory would require trying to read every single byte,
2056 and it seems unacceptable solution. Explicit memory map is
2057 recommended for this case -- and target_read_memory_robust will
2058 take care of reading multiple ranges then. */
2059
2060 static void
2061 read_whatever_is_readable (struct target_ops *ops,
2062 const ULONGEST begin, const ULONGEST end,
2063 int unit_size,
2064 std::vector<memory_read_result> *result)
2065 {
2066 ULONGEST current_begin = begin;
2067 ULONGEST current_end = end;
2068 int forward;
2069 ULONGEST xfered_len;
2070
2071 /* If we previously failed to read 1 byte, nothing can be done here. */
2072 if (end - begin <= 1)
2073 return;
2074
2075 gdb::unique_xmalloc_ptr<gdb_byte> buf ((gdb_byte *) xmalloc (end - begin));
2076
2077 /* Check that either first or the last byte is readable, and give up
2078 if not. This heuristic is meant to permit reading accessible memory
2079 at the boundary of accessible region. */
2080 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2081 buf.get (), begin, 1, &xfered_len) == TARGET_XFER_OK)
2082 {
2083 forward = 1;
2084 ++current_begin;
2085 }
2086 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2087 buf.get () + (end - begin) - 1, end - 1, 1,
2088 &xfered_len) == TARGET_XFER_OK)
2089 {
2090 forward = 0;
2091 --current_end;
2092 }
2093 else
2094 return;
2095
2096 /* Loop invariant is that the [current_begin, current_end) was previously
2097 found to be not readable as a whole.
2098
2099 Note loop condition -- if the range has 1 byte, we can't divide the range
2100 so there's no point trying further. */
2101 while (current_end - current_begin > 1)
2102 {
2103 ULONGEST first_half_begin, first_half_end;
2104 ULONGEST second_half_begin, second_half_end;
2105 LONGEST xfer;
2106 ULONGEST middle = current_begin + (current_end - current_begin) / 2;
2107
2108 if (forward)
2109 {
2110 first_half_begin = current_begin;
2111 first_half_end = middle;
2112 second_half_begin = middle;
2113 second_half_end = current_end;
2114 }
2115 else
2116 {
2117 first_half_begin = middle;
2118 first_half_end = current_end;
2119 second_half_begin = current_begin;
2120 second_half_end = middle;
2121 }
2122
2123 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2124 buf.get () + (first_half_begin - begin) * unit_size,
2125 first_half_begin,
2126 first_half_end - first_half_begin);
2127
2128 if (xfer == first_half_end - first_half_begin)
2129 {
2130 /* This half reads up fine. So, the error must be in the
2131 other half. */
2132 current_begin = second_half_begin;
2133 current_end = second_half_end;
2134 }
2135 else
2136 {
2137 /* This half is not readable. Because we've tried one byte, we
2138 know some part of this half if actually readable. Go to the next
2139 iteration to divide again and try to read.
2140
2141 We don't handle the other half, because this function only tries
2142 to read a single readable subrange. */
2143 current_begin = first_half_begin;
2144 current_end = first_half_end;
2145 }
2146 }
2147
2148 if (forward)
2149 {
2150 /* The [begin, current_begin) range has been read. */
2151 result->emplace_back (begin, current_end, std::move (buf));
2152 }
2153 else
2154 {
2155 /* The [current_end, end) range has been read. */
2156 LONGEST region_len = end - current_end;
2157
2158 gdb::unique_xmalloc_ptr<gdb_byte> data
2159 ((gdb_byte *) xmalloc (region_len * unit_size));
2160 memcpy (data.get (), buf.get () + (current_end - begin) * unit_size,
2161 region_len * unit_size);
2162 result->emplace_back (current_end, end, std::move (data));
2163 }
2164 }
2165
2166 std::vector<memory_read_result>
2167 read_memory_robust (struct target_ops *ops,
2168 const ULONGEST offset, const LONGEST len)
2169 {
2170 std::vector<memory_read_result> result;
2171 int unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2172
2173 LONGEST xfered_total = 0;
2174 while (xfered_total < len)
2175 {
2176 struct mem_region *region = lookup_mem_region (offset + xfered_total);
2177 LONGEST region_len;
2178
2179 /* If there is no explicit region, a fake one should be created. */
2180 gdb_assert (region);
2181
2182 if (region->hi == 0)
2183 region_len = len - xfered_total;
2184 else
2185 region_len = region->hi - offset;
2186
2187 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2188 {
2189 /* Cannot read this region. Note that we can end up here only
2190 if the region is explicitly marked inaccessible, or
2191 'inaccessible-by-default' is in effect. */
2192 xfered_total += region_len;
2193 }
2194 else
2195 {
2196 LONGEST to_read = std::min (len - xfered_total, region_len);
2197 gdb::unique_xmalloc_ptr<gdb_byte> buffer
2198 ((gdb_byte *) xmalloc (to_read * unit_size));
2199
2200 LONGEST xfered_partial =
2201 target_read (ops, TARGET_OBJECT_MEMORY, NULL, buffer.get (),
2202 offset + xfered_total, to_read);
2203 /* Call an observer, notifying them of the xfer progress? */
2204 if (xfered_partial <= 0)
2205 {
2206 /* Got an error reading full chunk. See if maybe we can read
2207 some subrange. */
2208 read_whatever_is_readable (ops, offset + xfered_total,
2209 offset + xfered_total + to_read,
2210 unit_size, &result);
2211 xfered_total += to_read;
2212 }
2213 else
2214 {
2215 result.emplace_back (offset + xfered_total,
2216 offset + xfered_total + xfered_partial,
2217 std::move (buffer));
2218 xfered_total += xfered_partial;
2219 }
2220 QUIT;
2221 }
2222 }
2223
2224 return result;
2225 }
2226
2227
2228 /* An alternative to target_write with progress callbacks. */
2229
2230 LONGEST
2231 target_write_with_progress (struct target_ops *ops,
2232 enum target_object object,
2233 const char *annex, const gdb_byte *buf,
2234 ULONGEST offset, LONGEST len,
2235 void (*progress) (ULONGEST, void *), void *baton)
2236 {
2237 LONGEST xfered_total = 0;
2238 int unit_size = 1;
2239
2240 /* If we are writing to a memory object, find the length of an addressable
2241 unit for that architecture. */
2242 if (object == TARGET_OBJECT_MEMORY
2243 || object == TARGET_OBJECT_STACK_MEMORY
2244 || object == TARGET_OBJECT_CODE_MEMORY
2245 || object == TARGET_OBJECT_RAW_MEMORY)
2246 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2247
2248 /* Give the progress callback a chance to set up. */
2249 if (progress)
2250 (*progress) (0, baton);
2251
2252 while (xfered_total < len)
2253 {
2254 ULONGEST xfered_partial;
2255 enum target_xfer_status status;
2256
2257 status = target_write_partial (ops, object, annex,
2258 buf + xfered_total * unit_size,
2259 offset + xfered_total, len - xfered_total,
2260 &xfered_partial);
2261
2262 if (status != TARGET_XFER_OK)
2263 return status == TARGET_XFER_EOF ? xfered_total : TARGET_XFER_E_IO;
2264
2265 if (progress)
2266 (*progress) (xfered_partial, baton);
2267
2268 xfered_total += xfered_partial;
2269 QUIT;
2270 }
2271 return len;
2272 }
2273
2274 /* For docs on target_write see target.h. */
2275
2276 LONGEST
2277 target_write (struct target_ops *ops,
2278 enum target_object object,
2279 const char *annex, const gdb_byte *buf,
2280 ULONGEST offset, LONGEST len)
2281 {
2282 return target_write_with_progress (ops, object, annex, buf, offset, len,
2283 NULL, NULL);
2284 }
2285
2286 /* Help for target_read_alloc and target_read_stralloc. See their comments
2287 for details. */
2288
2289 template <typename T>
2290 gdb::optional<gdb::def_vector<T>>
2291 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2292 const char *annex)
2293 {
2294 gdb::def_vector<T> buf;
2295 size_t buf_pos = 0;
2296 const int chunk = 4096;
2297
2298 /* This function does not have a length parameter; it reads the
2299 entire OBJECT). Also, it doesn't support objects fetched partly
2300 from one target and partly from another (in a different stratum,
2301 e.g. a core file and an executable). Both reasons make it
2302 unsuitable for reading memory. */
2303 gdb_assert (object != TARGET_OBJECT_MEMORY);
2304
2305 /* Start by reading up to 4K at a time. The target will throttle
2306 this number down if necessary. */
2307 while (1)
2308 {
2309 ULONGEST xfered_len;
2310 enum target_xfer_status status;
2311
2312 buf.resize (buf_pos + chunk);
2313
2314 status = target_read_partial (ops, object, annex,
2315 (gdb_byte *) &buf[buf_pos],
2316 buf_pos, chunk,
2317 &xfered_len);
2318
2319 if (status == TARGET_XFER_EOF)
2320 {
2321 /* Read all there was. */
2322 buf.resize (buf_pos);
2323 return buf;
2324 }
2325 else if (status != TARGET_XFER_OK)
2326 {
2327 /* An error occurred. */
2328 return {};
2329 }
2330
2331 buf_pos += xfered_len;
2332
2333 QUIT;
2334 }
2335 }
2336
2337 /* See target.h */
2338
2339 gdb::optional<gdb::byte_vector>
2340 target_read_alloc (struct target_ops *ops, enum target_object object,
2341 const char *annex)
2342 {
2343 return target_read_alloc_1<gdb_byte> (ops, object, annex);
2344 }
2345
2346 /* See target.h. */
2347
2348 gdb::optional<gdb::char_vector>
2349 target_read_stralloc (struct target_ops *ops, enum target_object object,
2350 const char *annex)
2351 {
2352 gdb::optional<gdb::char_vector> buf
2353 = target_read_alloc_1<char> (ops, object, annex);
2354
2355 if (!buf)
2356 return {};
2357
2358 if (buf->empty () || buf->back () != '\0')
2359 buf->push_back ('\0');
2360
2361 /* Check for embedded NUL bytes; but allow trailing NULs. */
2362 for (auto it = std::find (buf->begin (), buf->end (), '\0');
2363 it != buf->end (); it++)
2364 if (*it != '\0')
2365 {
2366 warning (_("target object %d, annex %s, "
2367 "contained unexpected null characters"),
2368 (int) object, annex ? annex : "(none)");
2369 break;
2370 }
2371
2372 return buf;
2373 }
2374
2375 /* Memory transfer methods. */
2376
2377 void
2378 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2379 LONGEST len)
2380 {
2381 /* This method is used to read from an alternate, non-current
2382 target. This read must bypass the overlay support (as symbols
2383 don't match this target), and GDB's internal cache (wrong cache
2384 for this target). */
2385 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2386 != len)
2387 memory_error (TARGET_XFER_E_IO, addr);
2388 }
2389
2390 ULONGEST
2391 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2392 int len, enum bfd_endian byte_order)
2393 {
2394 gdb_byte buf[sizeof (ULONGEST)];
2395
2396 gdb_assert (len <= sizeof (buf));
2397 get_target_memory (ops, addr, buf, len);
2398 return extract_unsigned_integer (buf, len, byte_order);
2399 }
2400
2401 /* See target.h. */
2402
2403 int
2404 target_insert_breakpoint (struct gdbarch *gdbarch,
2405 struct bp_target_info *bp_tgt)
2406 {
2407 if (!may_insert_breakpoints)
2408 {
2409 warning (_("May not insert breakpoints"));
2410 return 1;
2411 }
2412
2413 target_ops *target = current_inferior ()->top_target ();
2414
2415 return target->insert_breakpoint (gdbarch, bp_tgt);
2416 }
2417
2418 /* See target.h. */
2419
2420 int
2421 target_remove_breakpoint (struct gdbarch *gdbarch,
2422 struct bp_target_info *bp_tgt,
2423 enum remove_bp_reason reason)
2424 {
2425 /* This is kind of a weird case to handle, but the permission might
2426 have been changed after breakpoints were inserted - in which case
2427 we should just take the user literally and assume that any
2428 breakpoints should be left in place. */
2429 if (!may_insert_breakpoints)
2430 {
2431 warning (_("May not remove breakpoints"));
2432 return 1;
2433 }
2434
2435 target_ops *target = current_inferior ()->top_target ();
2436
2437 return target->remove_breakpoint (gdbarch, bp_tgt, reason);
2438 }
2439
2440 static void
2441 info_target_command (const char *args, int from_tty)
2442 {
2443 int has_all_mem = 0;
2444
2445 if (current_program_space->symfile_object_file != NULL)
2446 {
2447 objfile *objf = current_program_space->symfile_object_file;
2448 printf_filtered (_("Symbols from \"%s\".\n"),
2449 objfile_name (objf));
2450 }
2451
2452 for (target_ops *t = current_inferior ()->top_target ();
2453 t != NULL;
2454 t = t->beneath ())
2455 {
2456 if (!t->has_memory ())
2457 continue;
2458
2459 if ((int) (t->stratum ()) <= (int) dummy_stratum)
2460 continue;
2461 if (has_all_mem)
2462 printf_filtered (_("\tWhile running this, "
2463 "GDB does not access memory from...\n"));
2464 printf_filtered ("%s:\n", t->longname ());
2465 t->files_info ();
2466 has_all_mem = t->has_all_memory ();
2467 }
2468 }
2469
2470 /* This function is called before any new inferior is created, e.g.
2471 by running a program, attaching, or connecting to a target.
2472 It cleans up any state from previous invocations which might
2473 change between runs. This is a subset of what target_preopen
2474 resets (things which might change between targets). */
2475
2476 void
2477 target_pre_inferior (int from_tty)
2478 {
2479 /* Clear out solib state. Otherwise the solib state of the previous
2480 inferior might have survived and is entirely wrong for the new
2481 target. This has been observed on GNU/Linux using glibc 2.3. How
2482 to reproduce:
2483
2484 bash$ ./foo&
2485 [1] 4711
2486 bash$ ./foo&
2487 [1] 4712
2488 bash$ gdb ./foo
2489 [...]
2490 (gdb) attach 4711
2491 (gdb) detach
2492 (gdb) attach 4712
2493 Cannot access memory at address 0xdeadbeef
2494 */
2495
2496 /* In some OSs, the shared library list is the same/global/shared
2497 across inferiors. If code is shared between processes, so are
2498 memory regions and features. */
2499 if (!gdbarch_has_global_solist (target_gdbarch ()))
2500 {
2501 no_shared_libraries (NULL, from_tty);
2502
2503 invalidate_target_mem_regions ();
2504
2505 target_clear_description ();
2506 }
2507
2508 /* attach_flag may be set if the previous process associated with
2509 the inferior was attached to. */
2510 current_inferior ()->attach_flag = 0;
2511
2512 current_inferior ()->highest_thread_num = 0;
2513
2514 agent_capability_invalidate ();
2515 }
2516
2517 /* This is to be called by the open routine before it does
2518 anything. */
2519
2520 void
2521 target_preopen (int from_tty)
2522 {
2523 dont_repeat ();
2524
2525 if (current_inferior ()->pid != 0)
2526 {
2527 if (!from_tty
2528 || !target_has_execution ()
2529 || query (_("A program is being debugged already. Kill it? ")))
2530 {
2531 /* Core inferiors actually should be detached, not
2532 killed. */
2533 if (target_has_execution ())
2534 target_kill ();
2535 else
2536 target_detach (current_inferior (), 0);
2537 }
2538 else
2539 error (_("Program not killed."));
2540 }
2541
2542 /* Calling target_kill may remove the target from the stack. But if
2543 it doesn't (which seems like a win for UDI), remove it now. */
2544 /* Leave the exec target, though. The user may be switching from a
2545 live process to a core of the same program. */
2546 pop_all_targets_above (file_stratum);
2547
2548 target_pre_inferior (from_tty);
2549 }
2550
2551 /* See target.h. */
2552
2553 void
2554 target_detach (inferior *inf, int from_tty)
2555 {
2556 /* After we have detached, we will clear the register cache for this inferior
2557 by calling registers_changed_ptid. We must save the pid_ptid before
2558 detaching, as the target detach method will clear inf->pid. */
2559 ptid_t save_pid_ptid = ptid_t (inf->pid);
2560
2561 /* As long as some to_detach implementations rely on the current_inferior
2562 (either directly, or indirectly, like through target_gdbarch or by
2563 reading memory), INF needs to be the current inferior. When that
2564 requirement will become no longer true, then we can remove this
2565 assertion. */
2566 gdb_assert (inf == current_inferior ());
2567
2568 prepare_for_detach ();
2569
2570 /* Hold a strong reference because detaching may unpush the
2571 target. */
2572 auto proc_target_ref = target_ops_ref::new_reference (inf->process_target ());
2573
2574 current_inferior ()->top_target ()->detach (inf, from_tty);
2575
2576 process_stratum_target *proc_target
2577 = as_process_stratum_target (proc_target_ref.get ());
2578
2579 registers_changed_ptid (proc_target, save_pid_ptid);
2580
2581 /* We have to ensure we have no frame cache left. Normally,
2582 registers_changed_ptid (save_pid_ptid) calls reinit_frame_cache when
2583 inferior_ptid matches save_pid_ptid, but in our case, it does not
2584 call it, as inferior_ptid has been reset. */
2585 reinit_frame_cache ();
2586 }
2587
2588 void
2589 target_disconnect (const char *args, int from_tty)
2590 {
2591 /* If we're in breakpoints-always-inserted mode or if breakpoints
2592 are global across processes, we have to remove them before
2593 disconnecting. */
2594 remove_breakpoints ();
2595
2596 current_inferior ()->top_target ()->disconnect (args, from_tty);
2597 }
2598
2599 /* See target/target.h. */
2600
2601 ptid_t
2602 target_wait (ptid_t ptid, struct target_waitstatus *status,
2603 target_wait_flags options)
2604 {
2605 target_ops *target = current_inferior ()->top_target ();
2606 process_stratum_target *proc_target = current_inferior ()->process_target ();
2607
2608 gdb_assert (!proc_target->commit_resumed_state);
2609
2610 if (!target_can_async_p (target))
2611 gdb_assert ((options & TARGET_WNOHANG) == 0);
2612
2613 try
2614 {
2615 gdb::observers::target_pre_wait.notify (ptid);
2616 ptid_t event_ptid = target->wait (ptid, status, options);
2617 gdb::observers::target_post_wait.notify (event_ptid);
2618 return event_ptid;
2619 }
2620 catch (...)
2621 {
2622 gdb::observers::target_post_wait.notify (null_ptid);
2623 throw;
2624 }
2625 }
2626
2627 /* See target.h. */
2628
2629 ptid_t
2630 default_target_wait (struct target_ops *ops,
2631 ptid_t ptid, struct target_waitstatus *status,
2632 target_wait_flags options)
2633 {
2634 status->set_ignore ();
2635 return minus_one_ptid;
2636 }
2637
2638 std::string
2639 target_pid_to_str (ptid_t ptid)
2640 {
2641 return current_inferior ()->top_target ()->pid_to_str (ptid);
2642 }
2643
2644 const char *
2645 target_thread_name (struct thread_info *info)
2646 {
2647 gdb_assert (info->inf == current_inferior ());
2648
2649 return current_inferior ()->top_target ()->thread_name (info);
2650 }
2651
2652 struct thread_info *
2653 target_thread_handle_to_thread_info (const gdb_byte *thread_handle,
2654 int handle_len,
2655 struct inferior *inf)
2656 {
2657 target_ops *target = current_inferior ()->top_target ();
2658
2659 return target->thread_handle_to_thread_info (thread_handle, handle_len, inf);
2660 }
2661
2662 /* See target.h. */
2663
2664 gdb::byte_vector
2665 target_thread_info_to_thread_handle (struct thread_info *tip)
2666 {
2667 target_ops *target = current_inferior ()->top_target ();
2668
2669 return target->thread_info_to_thread_handle (tip);
2670 }
2671
2672 void
2673 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2674 {
2675 process_stratum_target *curr_target = current_inferior ()->process_target ();
2676 gdb_assert (!curr_target->commit_resumed_state);
2677
2678 target_dcache_invalidate ();
2679
2680 current_inferior ()->top_target ()->resume (ptid, step, signal);
2681
2682 registers_changed_ptid (curr_target, ptid);
2683 /* We only set the internal executing state here. The user/frontend
2684 running state is set at a higher level. This also clears the
2685 thread's stop_pc as side effect. */
2686 set_executing (curr_target, ptid, true);
2687 clear_inline_frame_state (curr_target, ptid);
2688
2689 if (target_can_async_p ())
2690 target_async (1);
2691 }
2692
2693 /* See target.h. */
2694
2695 void
2696 target_commit_resumed ()
2697 {
2698 gdb_assert (current_inferior ()->process_target ()->commit_resumed_state);
2699 current_inferior ()->top_target ()->commit_resumed ();
2700 }
2701
2702 /* See target.h. */
2703
2704 bool
2705 target_has_pending_events ()
2706 {
2707 return current_inferior ()->top_target ()->has_pending_events ();
2708 }
2709
2710 void
2711 target_pass_signals (gdb::array_view<const unsigned char> pass_signals)
2712 {
2713 current_inferior ()->top_target ()->pass_signals (pass_signals);
2714 }
2715
2716 void
2717 target_program_signals (gdb::array_view<const unsigned char> program_signals)
2718 {
2719 current_inferior ()->top_target ()->program_signals (program_signals);
2720 }
2721
2722 static void
2723 default_follow_fork (struct target_ops *self, inferior *child_inf,
2724 ptid_t child_ptid, target_waitkind fork_kind,
2725 bool follow_child, bool detach_fork)
2726 {
2727 /* Some target returned a fork event, but did not know how to follow it. */
2728 internal_error (__FILE__, __LINE__,
2729 _("could not find a target to follow fork"));
2730 }
2731
2732 /* See target.h. */
2733
2734 void
2735 target_follow_fork (inferior *child_inf, ptid_t child_ptid,
2736 target_waitkind fork_kind, bool follow_child,
2737 bool detach_fork)
2738 {
2739 target_ops *target = current_inferior ()->top_target ();
2740
2741 /* Check consistency between CHILD_INF, CHILD_PTID, FOLLOW_CHILD and
2742 DETACH_FORK. */
2743 if (child_inf != nullptr)
2744 {
2745 gdb_assert (follow_child || !detach_fork);
2746 gdb_assert (child_inf->pid == child_ptid.pid ());
2747 }
2748 else
2749 gdb_assert (!follow_child && detach_fork);
2750
2751 return target->follow_fork (child_inf, child_ptid, fork_kind, follow_child,
2752 detach_fork);
2753 }
2754
2755 /* See target.h. */
2756
2757 void
2758 target_follow_exec (inferior *follow_inf, ptid_t ptid,
2759 const char *execd_pathname)
2760 {
2761 current_inferior ()->top_target ()->follow_exec (follow_inf, ptid,
2762 execd_pathname);
2763 }
2764
2765 static void
2766 default_mourn_inferior (struct target_ops *self)
2767 {
2768 internal_error (__FILE__, __LINE__,
2769 _("could not find a target to follow mourn inferior"));
2770 }
2771
2772 void
2773 target_mourn_inferior (ptid_t ptid)
2774 {
2775 gdb_assert (ptid.pid () == inferior_ptid.pid ());
2776 current_inferior ()->top_target ()->mourn_inferior ();
2777
2778 /* We no longer need to keep handles on any of the object files.
2779 Make sure to release them to avoid unnecessarily locking any
2780 of them while we're not actually debugging. */
2781 bfd_cache_close_all ();
2782 }
2783
2784 /* Look for a target which can describe architectural features, starting
2785 from TARGET. If we find one, return its description. */
2786
2787 const struct target_desc *
2788 target_read_description (struct target_ops *target)
2789 {
2790 return target->read_description ();
2791 }
2792
2793
2794 /* Default implementation of memory-searching. */
2795
2796 static int
2797 default_search_memory (struct target_ops *self,
2798 CORE_ADDR start_addr, ULONGEST search_space_len,
2799 const gdb_byte *pattern, ULONGEST pattern_len,
2800 CORE_ADDR *found_addrp)
2801 {
2802 auto read_memory = [=] (CORE_ADDR addr, gdb_byte *result, size_t len)
2803 {
2804 return target_read (current_inferior ()->top_target (),
2805 TARGET_OBJECT_MEMORY, NULL,
2806 result, addr, len) == len;
2807 };
2808
2809 /* Start over from the top of the target stack. */
2810 return simple_search_memory (read_memory, start_addr, search_space_len,
2811 pattern, pattern_len, found_addrp);
2812 }
2813
2814 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2815 sequence of bytes in PATTERN with length PATTERN_LEN.
2816
2817 The result is 1 if found, 0 if not found, and -1 if there was an error
2818 requiring halting of the search (e.g. memory read error).
2819 If the pattern is found the address is recorded in FOUND_ADDRP. */
2820
2821 int
2822 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2823 const gdb_byte *pattern, ULONGEST pattern_len,
2824 CORE_ADDR *found_addrp)
2825 {
2826 target_ops *target = current_inferior ()->top_target ();
2827
2828 return target->search_memory (start_addr, search_space_len, pattern,
2829 pattern_len, found_addrp);
2830 }
2831
2832 /* Look through the currently pushed targets. If none of them will
2833 be able to restart the currently running process, issue an error
2834 message. */
2835
2836 void
2837 target_require_runnable (void)
2838 {
2839 for (target_ops *t = current_inferior ()->top_target ();
2840 t != NULL;
2841 t = t->beneath ())
2842 {
2843 /* If this target knows how to create a new program, then
2844 assume we will still be able to after killing the current
2845 one. Either killing and mourning will not pop T, or else
2846 find_default_run_target will find it again. */
2847 if (t->can_create_inferior ())
2848 return;
2849
2850 /* Do not worry about targets at certain strata that can not
2851 create inferiors. Assume they will be pushed again if
2852 necessary, and continue to the process_stratum. */
2853 if (t->stratum () > process_stratum)
2854 continue;
2855
2856 error (_("The \"%s\" target does not support \"run\". "
2857 "Try \"help target\" or \"continue\"."),
2858 t->shortname ());
2859 }
2860
2861 /* This function is only called if the target is running. In that
2862 case there should have been a process_stratum target and it
2863 should either know how to create inferiors, or not... */
2864 internal_error (__FILE__, __LINE__, _("No targets found"));
2865 }
2866
2867 /* Whether GDB is allowed to fall back to the default run target for
2868 "run", "attach", etc. when no target is connected yet. */
2869 static bool auto_connect_native_target = true;
2870
2871 static void
2872 show_auto_connect_native_target (struct ui_file *file, int from_tty,
2873 struct cmd_list_element *c, const char *value)
2874 {
2875 fprintf_filtered (file,
2876 _("Whether GDB may automatically connect to the "
2877 "native target is %s.\n"),
2878 value);
2879 }
2880
2881 /* A pointer to the target that can respond to "run" or "attach".
2882 Native targets are always singletons and instantiated early at GDB
2883 startup. */
2884 static target_ops *the_native_target;
2885
2886 /* See target.h. */
2887
2888 void
2889 set_native_target (target_ops *target)
2890 {
2891 if (the_native_target != NULL)
2892 internal_error (__FILE__, __LINE__,
2893 _("native target already set (\"%s\")."),
2894 the_native_target->longname ());
2895
2896 the_native_target = target;
2897 }
2898
2899 /* See target.h. */
2900
2901 target_ops *
2902 get_native_target ()
2903 {
2904 return the_native_target;
2905 }
2906
2907 /* Look through the list of possible targets for a target that can
2908 execute a run or attach command without any other data. This is
2909 used to locate the default process stratum.
2910
2911 If DO_MESG is not NULL, the result is always valid (error() is
2912 called for errors); else, return NULL on error. */
2913
2914 static struct target_ops *
2915 find_default_run_target (const char *do_mesg)
2916 {
2917 if (auto_connect_native_target && the_native_target != NULL)
2918 return the_native_target;
2919
2920 if (do_mesg != NULL)
2921 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2922 return NULL;
2923 }
2924
2925 /* See target.h. */
2926
2927 struct target_ops *
2928 find_attach_target (void)
2929 {
2930 /* If a target on the current stack can attach, use it. */
2931 for (target_ops *t = current_inferior ()->top_target ();
2932 t != NULL;
2933 t = t->beneath ())
2934 {
2935 if (t->can_attach ())
2936 return t;
2937 }
2938
2939 /* Otherwise, use the default run target for attaching. */
2940 return find_default_run_target ("attach");
2941 }
2942
2943 /* See target.h. */
2944
2945 struct target_ops *
2946 find_run_target (void)
2947 {
2948 /* If a target on the current stack can run, use it. */
2949 for (target_ops *t = current_inferior ()->top_target ();
2950 t != NULL;
2951 t = t->beneath ())
2952 {
2953 if (t->can_create_inferior ())
2954 return t;
2955 }
2956
2957 /* Otherwise, use the default run target. */
2958 return find_default_run_target ("run");
2959 }
2960
2961 bool
2962 target_ops::info_proc (const char *args, enum info_proc_what what)
2963 {
2964 return false;
2965 }
2966
2967 /* Implement the "info proc" command. */
2968
2969 int
2970 target_info_proc (const char *args, enum info_proc_what what)
2971 {
2972 struct target_ops *t;
2973
2974 /* If we're already connected to something that can get us OS
2975 related data, use it. Otherwise, try using the native
2976 target. */
2977 t = find_target_at (process_stratum);
2978 if (t == NULL)
2979 t = find_default_run_target (NULL);
2980
2981 for (; t != NULL; t = t->beneath ())
2982 {
2983 if (t->info_proc (args, what))
2984 {
2985 if (targetdebug)
2986 fprintf_unfiltered (gdb_stdlog,
2987 "target_info_proc (\"%s\", %d)\n", args, what);
2988
2989 return 1;
2990 }
2991 }
2992
2993 return 0;
2994 }
2995
2996 static int
2997 find_default_supports_disable_randomization (struct target_ops *self)
2998 {
2999 struct target_ops *t;
3000
3001 t = find_default_run_target (NULL);
3002 if (t != NULL)
3003 return t->supports_disable_randomization ();
3004 return 0;
3005 }
3006
3007 int
3008 target_supports_disable_randomization (void)
3009 {
3010 return current_inferior ()->top_target ()->supports_disable_randomization ();
3011 }
3012
3013 /* See target/target.h. */
3014
3015 int
3016 target_supports_multi_process (void)
3017 {
3018 return current_inferior ()->top_target ()->supports_multi_process ();
3019 }
3020
3021 /* See target.h. */
3022
3023 gdb::optional<gdb::char_vector>
3024 target_get_osdata (const char *type)
3025 {
3026 struct target_ops *t;
3027
3028 /* If we're already connected to something that can get us OS
3029 related data, use it. Otherwise, try using the native
3030 target. */
3031 t = find_target_at (process_stratum);
3032 if (t == NULL)
3033 t = find_default_run_target ("get OS data");
3034
3035 if (!t)
3036 return {};
3037
3038 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3039 }
3040
3041 /* Determine the current address space of thread PTID. */
3042
3043 struct address_space *
3044 target_thread_address_space (ptid_t ptid)
3045 {
3046 struct address_space *aspace;
3047
3048 aspace = current_inferior ()->top_target ()->thread_address_space (ptid);
3049 gdb_assert (aspace != NULL);
3050
3051 return aspace;
3052 }
3053
3054 /* See target.h. */
3055
3056 target_ops *
3057 target_ops::beneath () const
3058 {
3059 return current_inferior ()->find_target_beneath (this);
3060 }
3061
3062 void
3063 target_ops::close ()
3064 {
3065 }
3066
3067 bool
3068 target_ops::can_attach ()
3069 {
3070 return 0;
3071 }
3072
3073 void
3074 target_ops::attach (const char *, int)
3075 {
3076 gdb_assert_not_reached ("target_ops::attach called");
3077 }
3078
3079 bool
3080 target_ops::can_create_inferior ()
3081 {
3082 return 0;
3083 }
3084
3085 void
3086 target_ops::create_inferior (const char *, const std::string &,
3087 char **, int)
3088 {
3089 gdb_assert_not_reached ("target_ops::create_inferior called");
3090 }
3091
3092 bool
3093 target_ops::can_run ()
3094 {
3095 return false;
3096 }
3097
3098 int
3099 target_can_run ()
3100 {
3101 for (target_ops *t = current_inferior ()->top_target ();
3102 t != NULL;
3103 t = t->beneath ())
3104 {
3105 if (t->can_run ())
3106 return 1;
3107 }
3108
3109 return 0;
3110 }
3111
3112 /* Target file operations. */
3113
3114 static struct target_ops *
3115 default_fileio_target (void)
3116 {
3117 struct target_ops *t;
3118
3119 /* If we're already connected to something that can perform
3120 file I/O, use it. Otherwise, try using the native target. */
3121 t = find_target_at (process_stratum);
3122 if (t != NULL)
3123 return t;
3124 return find_default_run_target ("file I/O");
3125 }
3126
3127 /* File handle for target file operations. */
3128
3129 struct fileio_fh_t
3130 {
3131 /* The target on which this file is open. NULL if the target is
3132 meanwhile closed while the handle is open. */
3133 target_ops *target;
3134
3135 /* The file descriptor on the target. */
3136 int target_fd;
3137
3138 /* Check whether this fileio_fh_t represents a closed file. */
3139 bool is_closed ()
3140 {
3141 return target_fd < 0;
3142 }
3143 };
3144
3145 /* Vector of currently open file handles. The value returned by
3146 target_fileio_open and passed as the FD argument to other
3147 target_fileio_* functions is an index into this vector. This
3148 vector's entries are never freed; instead, files are marked as
3149 closed, and the handle becomes available for reuse. */
3150 static std::vector<fileio_fh_t> fileio_fhandles;
3151
3152 /* Index into fileio_fhandles of the lowest handle that might be
3153 closed. This permits handle reuse without searching the whole
3154 list each time a new file is opened. */
3155 static int lowest_closed_fd;
3156
3157 /* See target.h. */
3158
3159 void
3160 fileio_handles_invalidate_target (target_ops *targ)
3161 {
3162 for (fileio_fh_t &fh : fileio_fhandles)
3163 if (fh.target == targ)
3164 fh.target = NULL;
3165 }
3166
3167 /* Acquire a target fileio file descriptor. */
3168
3169 static int
3170 acquire_fileio_fd (target_ops *target, int target_fd)
3171 {
3172 /* Search for closed handles to reuse. */
3173 for (; lowest_closed_fd < fileio_fhandles.size (); lowest_closed_fd++)
3174 {
3175 fileio_fh_t &fh = fileio_fhandles[lowest_closed_fd];
3176
3177 if (fh.is_closed ())
3178 break;
3179 }
3180
3181 /* Push a new handle if no closed handles were found. */
3182 if (lowest_closed_fd == fileio_fhandles.size ())
3183 fileio_fhandles.push_back (fileio_fh_t {target, target_fd});
3184 else
3185 fileio_fhandles[lowest_closed_fd] = {target, target_fd};
3186
3187 /* Should no longer be marked closed. */
3188 gdb_assert (!fileio_fhandles[lowest_closed_fd].is_closed ());
3189
3190 /* Return its index, and start the next lookup at
3191 the next index. */
3192 return lowest_closed_fd++;
3193 }
3194
3195 /* Release a target fileio file descriptor. */
3196
3197 static void
3198 release_fileio_fd (int fd, fileio_fh_t *fh)
3199 {
3200 fh->target_fd = -1;
3201 lowest_closed_fd = std::min (lowest_closed_fd, fd);
3202 }
3203
3204 /* Return a pointer to the fileio_fhandle_t corresponding to FD. */
3205
3206 static fileio_fh_t *
3207 fileio_fd_to_fh (int fd)
3208 {
3209 return &fileio_fhandles[fd];
3210 }
3211
3212
3213 /* Default implementations of file i/o methods. We don't want these
3214 to delegate automatically, because we need to know which target
3215 supported the method, in order to call it directly from within
3216 pread/pwrite, etc. */
3217
3218 int
3219 target_ops::fileio_open (struct inferior *inf, const char *filename,
3220 int flags, int mode, int warn_if_slow,
3221 int *target_errno)
3222 {
3223 *target_errno = FILEIO_ENOSYS;
3224 return -1;
3225 }
3226
3227 int
3228 target_ops::fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3229 ULONGEST offset, int *target_errno)
3230 {
3231 *target_errno = FILEIO_ENOSYS;
3232 return -1;
3233 }
3234
3235 int
3236 target_ops::fileio_pread (int fd, gdb_byte *read_buf, int len,
3237 ULONGEST offset, int *target_errno)
3238 {
3239 *target_errno = FILEIO_ENOSYS;
3240 return -1;
3241 }
3242
3243 int
3244 target_ops::fileio_fstat (int fd, struct stat *sb, int *target_errno)
3245 {
3246 *target_errno = FILEIO_ENOSYS;
3247 return -1;
3248 }
3249
3250 int
3251 target_ops::fileio_close (int fd, int *target_errno)
3252 {
3253 *target_errno = FILEIO_ENOSYS;
3254 return -1;
3255 }
3256
3257 int
3258 target_ops::fileio_unlink (struct inferior *inf, const char *filename,
3259 int *target_errno)
3260 {
3261 *target_errno = FILEIO_ENOSYS;
3262 return -1;
3263 }
3264
3265 gdb::optional<std::string>
3266 target_ops::fileio_readlink (struct inferior *inf, const char *filename,
3267 int *target_errno)
3268 {
3269 *target_errno = FILEIO_ENOSYS;
3270 return {};
3271 }
3272
3273 /* See target.h. */
3274
3275 int
3276 target_fileio_open (struct inferior *inf, const char *filename,
3277 int flags, int mode, bool warn_if_slow, int *target_errno)
3278 {
3279 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3280 {
3281 int fd = t->fileio_open (inf, filename, flags, mode,
3282 warn_if_slow, target_errno);
3283
3284 if (fd == -1 && *target_errno == FILEIO_ENOSYS)
3285 continue;
3286
3287 if (fd < 0)
3288 fd = -1;
3289 else
3290 fd = acquire_fileio_fd (t, fd);
3291
3292 if (targetdebug)
3293 fprintf_unfiltered (gdb_stdlog,
3294 "target_fileio_open (%d,%s,0x%x,0%o,%d)"
3295 " = %d (%d)\n",
3296 inf == NULL ? 0 : inf->num,
3297 filename, flags, mode,
3298 warn_if_slow, fd,
3299 fd != -1 ? 0 : *target_errno);
3300 return fd;
3301 }
3302
3303 *target_errno = FILEIO_ENOSYS;
3304 return -1;
3305 }
3306
3307 /* See target.h. */
3308
3309 int
3310 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3311 ULONGEST offset, int *target_errno)
3312 {
3313 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3314 int ret = -1;
3315
3316 if (fh->is_closed ())
3317 *target_errno = EBADF;
3318 else if (fh->target == NULL)
3319 *target_errno = EIO;
3320 else
3321 ret = fh->target->fileio_pwrite (fh->target_fd, write_buf,
3322 len, offset, target_errno);
3323
3324 if (targetdebug)
3325 fprintf_unfiltered (gdb_stdlog,
3326 "target_fileio_pwrite (%d,...,%d,%s) "
3327 "= %d (%d)\n",
3328 fd, len, pulongest (offset),
3329 ret, ret != -1 ? 0 : *target_errno);
3330 return ret;
3331 }
3332
3333 /* See target.h. */
3334
3335 int
3336 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3337 ULONGEST offset, int *target_errno)
3338 {
3339 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3340 int ret = -1;
3341
3342 if (fh->is_closed ())
3343 *target_errno = EBADF;
3344 else if (fh->target == NULL)
3345 *target_errno = EIO;
3346 else
3347 ret = fh->target->fileio_pread (fh->target_fd, read_buf,
3348 len, offset, target_errno);
3349
3350 if (targetdebug)
3351 fprintf_unfiltered (gdb_stdlog,
3352 "target_fileio_pread (%d,...,%d,%s) "
3353 "= %d (%d)\n",
3354 fd, len, pulongest (offset),
3355 ret, ret != -1 ? 0 : *target_errno);
3356 return ret;
3357 }
3358
3359 /* See target.h. */
3360
3361 int
3362 target_fileio_fstat (int fd, struct stat *sb, int *target_errno)
3363 {
3364 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3365 int ret = -1;
3366
3367 if (fh->is_closed ())
3368 *target_errno = EBADF;
3369 else if (fh->target == NULL)
3370 *target_errno = EIO;
3371 else
3372 ret = fh->target->fileio_fstat (fh->target_fd, sb, target_errno);
3373
3374 if (targetdebug)
3375 fprintf_unfiltered (gdb_stdlog,
3376 "target_fileio_fstat (%d) = %d (%d)\n",
3377 fd, ret, ret != -1 ? 0 : *target_errno);
3378 return ret;
3379 }
3380
3381 /* See target.h. */
3382
3383 int
3384 target_fileio_close (int fd, int *target_errno)
3385 {
3386 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3387 int ret = -1;
3388
3389 if (fh->is_closed ())
3390 *target_errno = EBADF;
3391 else
3392 {
3393 if (fh->target != NULL)
3394 ret = fh->target->fileio_close (fh->target_fd,
3395 target_errno);
3396 else
3397 ret = 0;
3398 release_fileio_fd (fd, fh);
3399 }
3400
3401 if (targetdebug)
3402 fprintf_unfiltered (gdb_stdlog,
3403 "target_fileio_close (%d) = %d (%d)\n",
3404 fd, ret, ret != -1 ? 0 : *target_errno);
3405 return ret;
3406 }
3407
3408 /* See target.h. */
3409
3410 int
3411 target_fileio_unlink (struct inferior *inf, const char *filename,
3412 int *target_errno)
3413 {
3414 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3415 {
3416 int ret = t->fileio_unlink (inf, filename, target_errno);
3417
3418 if (ret == -1 && *target_errno == FILEIO_ENOSYS)
3419 continue;
3420
3421 if (targetdebug)
3422 fprintf_unfiltered (gdb_stdlog,
3423 "target_fileio_unlink (%d,%s)"
3424 " = %d (%d)\n",
3425 inf == NULL ? 0 : inf->num, filename,
3426 ret, ret != -1 ? 0 : *target_errno);
3427 return ret;
3428 }
3429
3430 *target_errno = FILEIO_ENOSYS;
3431 return -1;
3432 }
3433
3434 /* See target.h. */
3435
3436 gdb::optional<std::string>
3437 target_fileio_readlink (struct inferior *inf, const char *filename,
3438 int *target_errno)
3439 {
3440 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3441 {
3442 gdb::optional<std::string> ret
3443 = t->fileio_readlink (inf, filename, target_errno);
3444
3445 if (!ret.has_value () && *target_errno == FILEIO_ENOSYS)
3446 continue;
3447
3448 if (targetdebug)
3449 fprintf_unfiltered (gdb_stdlog,
3450 "target_fileio_readlink (%d,%s)"
3451 " = %s (%d)\n",
3452 inf == NULL ? 0 : inf->num,
3453 filename, ret ? ret->c_str () : "(nil)",
3454 ret ? 0 : *target_errno);
3455 return ret;
3456 }
3457
3458 *target_errno = FILEIO_ENOSYS;
3459 return {};
3460 }
3461
3462 /* Like scoped_fd, but specific to target fileio. */
3463
3464 class scoped_target_fd
3465 {
3466 public:
3467 explicit scoped_target_fd (int fd) noexcept
3468 : m_fd (fd)
3469 {
3470 }
3471
3472 ~scoped_target_fd ()
3473 {
3474 if (m_fd >= 0)
3475 {
3476 int target_errno;
3477
3478 target_fileio_close (m_fd, &target_errno);
3479 }
3480 }
3481
3482 DISABLE_COPY_AND_ASSIGN (scoped_target_fd);
3483
3484 int get () const noexcept
3485 {
3486 return m_fd;
3487 }
3488
3489 private:
3490 int m_fd;
3491 };
3492
3493 /* Read target file FILENAME, in the filesystem as seen by INF. If
3494 INF is NULL, use the filesystem seen by the debugger (GDB or, for
3495 remote targets, the remote stub). Store the result in *BUF_P and
3496 return the size of the transferred data. PADDING additional bytes
3497 are available in *BUF_P. This is a helper function for
3498 target_fileio_read_alloc; see the declaration of that function for
3499 more information. */
3500
3501 static LONGEST
3502 target_fileio_read_alloc_1 (struct inferior *inf, const char *filename,
3503 gdb_byte **buf_p, int padding)
3504 {
3505 size_t buf_alloc, buf_pos;
3506 gdb_byte *buf;
3507 LONGEST n;
3508 int target_errno;
3509
3510 scoped_target_fd fd (target_fileio_open (inf, filename, FILEIO_O_RDONLY,
3511 0700, false, &target_errno));
3512 if (fd.get () == -1)
3513 return -1;
3514
3515 /* Start by reading up to 4K at a time. The target will throttle
3516 this number down if necessary. */
3517 buf_alloc = 4096;
3518 buf = (gdb_byte *) xmalloc (buf_alloc);
3519 buf_pos = 0;
3520 while (1)
3521 {
3522 n = target_fileio_pread (fd.get (), &buf[buf_pos],
3523 buf_alloc - buf_pos - padding, buf_pos,
3524 &target_errno);
3525 if (n < 0)
3526 {
3527 /* An error occurred. */
3528 xfree (buf);
3529 return -1;
3530 }
3531 else if (n == 0)
3532 {
3533 /* Read all there was. */
3534 if (buf_pos == 0)
3535 xfree (buf);
3536 else
3537 *buf_p = buf;
3538 return buf_pos;
3539 }
3540
3541 buf_pos += n;
3542
3543 /* If the buffer is filling up, expand it. */
3544 if (buf_alloc < buf_pos * 2)
3545 {
3546 buf_alloc *= 2;
3547 buf = (gdb_byte *) xrealloc (buf, buf_alloc);
3548 }
3549
3550 QUIT;
3551 }
3552 }
3553
3554 /* See target.h. */
3555
3556 LONGEST
3557 target_fileio_read_alloc (struct inferior *inf, const char *filename,
3558 gdb_byte **buf_p)
3559 {
3560 return target_fileio_read_alloc_1 (inf, filename, buf_p, 0);
3561 }
3562
3563 /* See target.h. */
3564
3565 gdb::unique_xmalloc_ptr<char>
3566 target_fileio_read_stralloc (struct inferior *inf, const char *filename)
3567 {
3568 gdb_byte *buffer;
3569 char *bufstr;
3570 LONGEST i, transferred;
3571
3572 transferred = target_fileio_read_alloc_1 (inf, filename, &buffer, 1);
3573 bufstr = (char *) buffer;
3574
3575 if (transferred < 0)
3576 return gdb::unique_xmalloc_ptr<char> (nullptr);
3577
3578 if (transferred == 0)
3579 return make_unique_xstrdup ("");
3580
3581 bufstr[transferred] = 0;
3582
3583 /* Check for embedded NUL bytes; but allow trailing NULs. */
3584 for (i = strlen (bufstr); i < transferred; i++)
3585 if (bufstr[i] != 0)
3586 {
3587 warning (_("target file %s "
3588 "contained unexpected null characters"),
3589 filename);
3590 break;
3591 }
3592
3593 return gdb::unique_xmalloc_ptr<char> (bufstr);
3594 }
3595
3596
3597 static int
3598 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3599 CORE_ADDR addr, int len)
3600 {
3601 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3602 }
3603
3604 static int
3605 default_watchpoint_addr_within_range (struct target_ops *target,
3606 CORE_ADDR addr,
3607 CORE_ADDR start, int length)
3608 {
3609 return addr >= start && addr < start + length;
3610 }
3611
3612 /* See target.h. */
3613
3614 target_ops *
3615 target_stack::find_beneath (const target_ops *t) const
3616 {
3617 /* Look for a non-empty slot at stratum levels beneath T's. */
3618 for (int stratum = t->stratum () - 1; stratum >= 0; --stratum)
3619 if (m_stack[stratum] != NULL)
3620 return m_stack[stratum];
3621
3622 return NULL;
3623 }
3624
3625 /* See target.h. */
3626
3627 struct target_ops *
3628 find_target_at (enum strata stratum)
3629 {
3630 return current_inferior ()->target_at (stratum);
3631 }
3632
3633 \f
3634
3635 /* See target.h */
3636
3637 void
3638 target_announce_detach (int from_tty)
3639 {
3640 pid_t pid;
3641 const char *exec_file;
3642
3643 if (!from_tty)
3644 return;
3645
3646 pid = inferior_ptid.pid ();
3647 exec_file = get_exec_file (0);
3648 if (exec_file == nullptr)
3649 printf_unfiltered ("Detaching from pid %s\n",
3650 target_pid_to_str (ptid_t (pid)).c_str ());
3651 else
3652 printf_unfiltered (_("Detaching from program: %s, %s\n"), exec_file,
3653 target_pid_to_str (ptid_t (pid)).c_str ());
3654 }
3655
3656 /* See target.h */
3657
3658 void
3659 target_announce_attach (int from_tty, int pid)
3660 {
3661 if (!from_tty)
3662 return;
3663
3664 const char *exec_file = get_exec_file (0);
3665
3666 if (exec_file != nullptr)
3667 printf_unfiltered ("Attaching to program: %s, %s\n", exec_file,
3668 target_pid_to_str (ptid_t (pid)).c_str ());
3669 else
3670 printf_unfiltered ("Attaching to %s\n",
3671 target_pid_to_str (ptid_t (pid)).c_str ());
3672 }
3673
3674 /* The inferior process has died. Long live the inferior! */
3675
3676 void
3677 generic_mourn_inferior (void)
3678 {
3679 inferior *inf = current_inferior ();
3680
3681 switch_to_no_thread ();
3682
3683 /* Mark breakpoints uninserted in case something tries to delete a
3684 breakpoint while we delete the inferior's threads (which would
3685 fail, since the inferior is long gone). */
3686 mark_breakpoints_out ();
3687
3688 if (inf->pid != 0)
3689 exit_inferior (inf);
3690
3691 /* Note this wipes step-resume breakpoints, so needs to be done
3692 after exit_inferior, which ends up referencing the step-resume
3693 breakpoints through clear_thread_inferior_resources. */
3694 breakpoint_init_inferior (inf_exited);
3695
3696 registers_changed ();
3697
3698 reopen_exec_file ();
3699 reinit_frame_cache ();
3700
3701 if (deprecated_detach_hook)
3702 deprecated_detach_hook ();
3703 }
3704 \f
3705 /* Convert a normal process ID to a string. Returns the string in a
3706 static buffer. */
3707
3708 std::string
3709 normal_pid_to_str (ptid_t ptid)
3710 {
3711 return string_printf ("process %d", ptid.pid ());
3712 }
3713
3714 static std::string
3715 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
3716 {
3717 return normal_pid_to_str (ptid);
3718 }
3719
3720 /* Error-catcher for target_find_memory_regions. */
3721 static int
3722 dummy_find_memory_regions (struct target_ops *self,
3723 find_memory_region_ftype ignore1, void *ignore2)
3724 {
3725 error (_("Command not implemented for this target."));
3726 return 0;
3727 }
3728
3729 /* Error-catcher for target_make_corefile_notes. */
3730 static gdb::unique_xmalloc_ptr<char>
3731 dummy_make_corefile_notes (struct target_ops *self,
3732 bfd *ignore1, int *ignore2)
3733 {
3734 error (_("Command not implemented for this target."));
3735 return NULL;
3736 }
3737
3738 #include "target-delegates.c"
3739
3740 /* The initial current target, so that there is always a semi-valid
3741 current target. */
3742
3743 static dummy_target the_dummy_target;
3744
3745 /* See target.h. */
3746
3747 target_ops *
3748 get_dummy_target ()
3749 {
3750 return &the_dummy_target;
3751 }
3752
3753 static const target_info dummy_target_info = {
3754 "None",
3755 N_("None"),
3756 ""
3757 };
3758
3759 strata
3760 dummy_target::stratum () const
3761 {
3762 return dummy_stratum;
3763 }
3764
3765 strata
3766 debug_target::stratum () const
3767 {
3768 return debug_stratum;
3769 }
3770
3771 const target_info &
3772 dummy_target::info () const
3773 {
3774 return dummy_target_info;
3775 }
3776
3777 const target_info &
3778 debug_target::info () const
3779 {
3780 return beneath ()->info ();
3781 }
3782
3783 \f
3784
3785 void
3786 target_close (struct target_ops *targ)
3787 {
3788 for (inferior *inf : all_inferiors ())
3789 gdb_assert (!inf->target_is_pushed (targ));
3790
3791 fileio_handles_invalidate_target (targ);
3792
3793 targ->close ();
3794
3795 if (targetdebug)
3796 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3797 }
3798
3799 int
3800 target_thread_alive (ptid_t ptid)
3801 {
3802 return current_inferior ()->top_target ()->thread_alive (ptid);
3803 }
3804
3805 void
3806 target_update_thread_list (void)
3807 {
3808 current_inferior ()->top_target ()->update_thread_list ();
3809 }
3810
3811 void
3812 target_stop (ptid_t ptid)
3813 {
3814 process_stratum_target *proc_target = current_inferior ()->process_target ();
3815
3816 gdb_assert (!proc_target->commit_resumed_state);
3817
3818 if (!may_stop)
3819 {
3820 warning (_("May not interrupt or stop the target, ignoring attempt"));
3821 return;
3822 }
3823
3824 current_inferior ()->top_target ()->stop (ptid);
3825 }
3826
3827 void
3828 target_interrupt ()
3829 {
3830 if (!may_stop)
3831 {
3832 warning (_("May not interrupt or stop the target, ignoring attempt"));
3833 return;
3834 }
3835
3836 current_inferior ()->top_target ()->interrupt ();
3837 }
3838
3839 /* See target.h. */
3840
3841 void
3842 target_pass_ctrlc (void)
3843 {
3844 /* Pass the Ctrl-C to the first target that has a thread
3845 running. */
3846 for (inferior *inf : all_inferiors ())
3847 {
3848 target_ops *proc_target = inf->process_target ();
3849 if (proc_target == NULL)
3850 continue;
3851
3852 for (thread_info *thr : inf->non_exited_threads ())
3853 {
3854 /* A thread can be THREAD_STOPPED and executing, while
3855 running an infcall. */
3856 if (thr->state == THREAD_RUNNING || thr->executing ())
3857 {
3858 /* We can get here quite deep in target layers. Avoid
3859 switching thread context or anything that would
3860 communicate with the target (e.g., to fetch
3861 registers), or flushing e.g., the frame cache. We
3862 just switch inferior in order to be able to call
3863 through the target_stack. */
3864 scoped_restore_current_inferior restore_inferior;
3865 set_current_inferior (inf);
3866 current_inferior ()->top_target ()->pass_ctrlc ();
3867 return;
3868 }
3869 }
3870 }
3871 }
3872
3873 /* See target.h. */
3874
3875 void
3876 default_target_pass_ctrlc (struct target_ops *ops)
3877 {
3878 target_interrupt ();
3879 }
3880
3881 /* See target/target.h. */
3882
3883 void
3884 target_stop_and_wait (ptid_t ptid)
3885 {
3886 struct target_waitstatus status;
3887 bool was_non_stop = non_stop;
3888
3889 non_stop = true;
3890 target_stop (ptid);
3891
3892 target_wait (ptid, &status, 0);
3893
3894 non_stop = was_non_stop;
3895 }
3896
3897 /* See target/target.h. */
3898
3899 void
3900 target_continue_no_signal (ptid_t ptid)
3901 {
3902 target_resume (ptid, 0, GDB_SIGNAL_0);
3903 }
3904
3905 /* See target/target.h. */
3906
3907 void
3908 target_continue (ptid_t ptid, enum gdb_signal signal)
3909 {
3910 target_resume (ptid, 0, signal);
3911 }
3912
3913 /* Concatenate ELEM to LIST, a comma-separated list. */
3914
3915 static void
3916 str_comma_list_concat_elem (std::string *list, const char *elem)
3917 {
3918 if (!list->empty ())
3919 list->append (", ");
3920
3921 list->append (elem);
3922 }
3923
3924 /* Helper for target_options_to_string. If OPT is present in
3925 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3926 OPT is removed from TARGET_OPTIONS. */
3927
3928 static void
3929 do_option (target_wait_flags *target_options, std::string *ret,
3930 target_wait_flag opt, const char *opt_str)
3931 {
3932 if ((*target_options & opt) != 0)
3933 {
3934 str_comma_list_concat_elem (ret, opt_str);
3935 *target_options &= ~opt;
3936 }
3937 }
3938
3939 /* See target.h. */
3940
3941 std::string
3942 target_options_to_string (target_wait_flags target_options)
3943 {
3944 std::string ret;
3945
3946 #define DO_TARG_OPTION(OPT) \
3947 do_option (&target_options, &ret, OPT, #OPT)
3948
3949 DO_TARG_OPTION (TARGET_WNOHANG);
3950
3951 if (target_options != 0)
3952 str_comma_list_concat_elem (&ret, "unknown???");
3953
3954 return ret;
3955 }
3956
3957 void
3958 target_fetch_registers (struct regcache *regcache, int regno)
3959 {
3960 current_inferior ()->top_target ()->fetch_registers (regcache, regno);
3961 if (targetdebug)
3962 regcache->debug_print_register ("target_fetch_registers", regno);
3963 }
3964
3965 void
3966 target_store_registers (struct regcache *regcache, int regno)
3967 {
3968 if (!may_write_registers)
3969 error (_("Writing to registers is not allowed (regno %d)"), regno);
3970
3971 current_inferior ()->top_target ()->store_registers (regcache, regno);
3972 if (targetdebug)
3973 {
3974 regcache->debug_print_register ("target_store_registers", regno);
3975 }
3976 }
3977
3978 int
3979 target_core_of_thread (ptid_t ptid)
3980 {
3981 return current_inferior ()->top_target ()->core_of_thread (ptid);
3982 }
3983
3984 int
3985 simple_verify_memory (struct target_ops *ops,
3986 const gdb_byte *data, CORE_ADDR lma, ULONGEST size)
3987 {
3988 LONGEST total_xfered = 0;
3989
3990 while (total_xfered < size)
3991 {
3992 ULONGEST xfered_len;
3993 enum target_xfer_status status;
3994 gdb_byte buf[1024];
3995 ULONGEST howmuch = std::min<ULONGEST> (sizeof (buf), size - total_xfered);
3996
3997 status = target_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
3998 buf, NULL, lma + total_xfered, howmuch,
3999 &xfered_len);
4000 if (status == TARGET_XFER_OK
4001 && memcmp (data + total_xfered, buf, xfered_len) == 0)
4002 {
4003 total_xfered += xfered_len;
4004 QUIT;
4005 }
4006 else
4007 return 0;
4008 }
4009 return 1;
4010 }
4011
4012 /* Default implementation of memory verification. */
4013
4014 static int
4015 default_verify_memory (struct target_ops *self,
4016 const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4017 {
4018 /* Start over from the top of the target stack. */
4019 return simple_verify_memory (current_inferior ()->top_target (),
4020 data, memaddr, size);
4021 }
4022
4023 int
4024 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4025 {
4026 target_ops *target = current_inferior ()->top_target ();
4027
4028 return target->verify_memory (data, memaddr, size);
4029 }
4030
4031 /* The documentation for this function is in its prototype declaration in
4032 target.h. */
4033
4034 int
4035 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
4036 enum target_hw_bp_type rw)
4037 {
4038 target_ops *target = current_inferior ()->top_target ();
4039
4040 return target->insert_mask_watchpoint (addr, mask, rw);
4041 }
4042
4043 /* The documentation for this function is in its prototype declaration in
4044 target.h. */
4045
4046 int
4047 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
4048 enum target_hw_bp_type rw)
4049 {
4050 target_ops *target = current_inferior ()->top_target ();
4051
4052 return target->remove_mask_watchpoint (addr, mask, rw);
4053 }
4054
4055 /* The documentation for this function is in its prototype declaration
4056 in target.h. */
4057
4058 int
4059 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4060 {
4061 target_ops *target = current_inferior ()->top_target ();
4062
4063 return target->masked_watch_num_registers (addr, mask);
4064 }
4065
4066 /* The documentation for this function is in its prototype declaration
4067 in target.h. */
4068
4069 int
4070 target_ranged_break_num_registers (void)
4071 {
4072 return current_inferior ()->top_target ()->ranged_break_num_registers ();
4073 }
4074
4075 /* See target.h. */
4076
4077 struct btrace_target_info *
4078 target_enable_btrace (thread_info *tp, const struct btrace_config *conf)
4079 {
4080 return current_inferior ()->top_target ()->enable_btrace (tp, conf);
4081 }
4082
4083 /* See target.h. */
4084
4085 void
4086 target_disable_btrace (struct btrace_target_info *btinfo)
4087 {
4088 current_inferior ()->top_target ()->disable_btrace (btinfo);
4089 }
4090
4091 /* See target.h. */
4092
4093 void
4094 target_teardown_btrace (struct btrace_target_info *btinfo)
4095 {
4096 current_inferior ()->top_target ()->teardown_btrace (btinfo);
4097 }
4098
4099 /* See target.h. */
4100
4101 enum btrace_error
4102 target_read_btrace (struct btrace_data *btrace,
4103 struct btrace_target_info *btinfo,
4104 enum btrace_read_type type)
4105 {
4106 target_ops *target = current_inferior ()->top_target ();
4107
4108 return target->read_btrace (btrace, btinfo, type);
4109 }
4110
4111 /* See target.h. */
4112
4113 const struct btrace_config *
4114 target_btrace_conf (const struct btrace_target_info *btinfo)
4115 {
4116 return current_inferior ()->top_target ()->btrace_conf (btinfo);
4117 }
4118
4119 /* See target.h. */
4120
4121 void
4122 target_stop_recording (void)
4123 {
4124 current_inferior ()->top_target ()->stop_recording ();
4125 }
4126
4127 /* See target.h. */
4128
4129 void
4130 target_save_record (const char *filename)
4131 {
4132 current_inferior ()->top_target ()->save_record (filename);
4133 }
4134
4135 /* See target.h. */
4136
4137 int
4138 target_supports_delete_record ()
4139 {
4140 return current_inferior ()->top_target ()->supports_delete_record ();
4141 }
4142
4143 /* See target.h. */
4144
4145 void
4146 target_delete_record (void)
4147 {
4148 current_inferior ()->top_target ()->delete_record ();
4149 }
4150
4151 /* See target.h. */
4152
4153 enum record_method
4154 target_record_method (ptid_t ptid)
4155 {
4156 return current_inferior ()->top_target ()->record_method (ptid);
4157 }
4158
4159 /* See target.h. */
4160
4161 int
4162 target_record_is_replaying (ptid_t ptid)
4163 {
4164 return current_inferior ()->top_target ()->record_is_replaying (ptid);
4165 }
4166
4167 /* See target.h. */
4168
4169 int
4170 target_record_will_replay (ptid_t ptid, int dir)
4171 {
4172 return current_inferior ()->top_target ()->record_will_replay (ptid, dir);
4173 }
4174
4175 /* See target.h. */
4176
4177 void
4178 target_record_stop_replaying (void)
4179 {
4180 current_inferior ()->top_target ()->record_stop_replaying ();
4181 }
4182
4183 /* See target.h. */
4184
4185 void
4186 target_goto_record_begin (void)
4187 {
4188 current_inferior ()->top_target ()->goto_record_begin ();
4189 }
4190
4191 /* See target.h. */
4192
4193 void
4194 target_goto_record_end (void)
4195 {
4196 current_inferior ()->top_target ()->goto_record_end ();
4197 }
4198
4199 /* See target.h. */
4200
4201 void
4202 target_goto_record (ULONGEST insn)
4203 {
4204 current_inferior ()->top_target ()->goto_record (insn);
4205 }
4206
4207 /* See target.h. */
4208
4209 void
4210 target_insn_history (int size, gdb_disassembly_flags flags)
4211 {
4212 current_inferior ()->top_target ()->insn_history (size, flags);
4213 }
4214
4215 /* See target.h. */
4216
4217 void
4218 target_insn_history_from (ULONGEST from, int size,
4219 gdb_disassembly_flags flags)
4220 {
4221 current_inferior ()->top_target ()->insn_history_from (from, size, flags);
4222 }
4223
4224 /* See target.h. */
4225
4226 void
4227 target_insn_history_range (ULONGEST begin, ULONGEST end,
4228 gdb_disassembly_flags flags)
4229 {
4230 current_inferior ()->top_target ()->insn_history_range (begin, end, flags);
4231 }
4232
4233 /* See target.h. */
4234
4235 void
4236 target_call_history (int size, record_print_flags flags)
4237 {
4238 current_inferior ()->top_target ()->call_history (size, flags);
4239 }
4240
4241 /* See target.h. */
4242
4243 void
4244 target_call_history_from (ULONGEST begin, int size, record_print_flags flags)
4245 {
4246 current_inferior ()->top_target ()->call_history_from (begin, size, flags);
4247 }
4248
4249 /* See target.h. */
4250
4251 void
4252 target_call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
4253 {
4254 current_inferior ()->top_target ()->call_history_range (begin, end, flags);
4255 }
4256
4257 /* See target.h. */
4258
4259 const struct frame_unwind *
4260 target_get_unwinder (void)
4261 {
4262 return current_inferior ()->top_target ()->get_unwinder ();
4263 }
4264
4265 /* See target.h. */
4266
4267 const struct frame_unwind *
4268 target_get_tailcall_unwinder (void)
4269 {
4270 return current_inferior ()->top_target ()->get_tailcall_unwinder ();
4271 }
4272
4273 /* See target.h. */
4274
4275 void
4276 target_prepare_to_generate_core (void)
4277 {
4278 current_inferior ()->top_target ()->prepare_to_generate_core ();
4279 }
4280
4281 /* See target.h. */
4282
4283 void
4284 target_done_generating_core (void)
4285 {
4286 current_inferior ()->top_target ()->done_generating_core ();
4287 }
4288
4289 \f
4290
4291 static char targ_desc[] =
4292 "Names of targets and files being debugged.\nShows the entire \
4293 stack of targets currently in use (including the exec-file,\n\
4294 core-file, and process, if any), as well as the symbol file name.";
4295
4296 static void
4297 default_rcmd (struct target_ops *self, const char *command,
4298 struct ui_file *output)
4299 {
4300 error (_("\"monitor\" command not supported by this target."));
4301 }
4302
4303 static void
4304 do_monitor_command (const char *cmd, int from_tty)
4305 {
4306 target_rcmd (cmd, gdb_stdtarg);
4307 }
4308
4309 /* Erases all the memory regions marked as flash. CMD and FROM_TTY are
4310 ignored. */
4311
4312 void
4313 flash_erase_command (const char *cmd, int from_tty)
4314 {
4315 /* Used to communicate termination of flash operations to the target. */
4316 bool found_flash_region = false;
4317 struct gdbarch *gdbarch = target_gdbarch ();
4318
4319 std::vector<mem_region> mem_regions = target_memory_map ();
4320
4321 /* Iterate over all memory regions. */
4322 for (const mem_region &m : mem_regions)
4323 {
4324 /* Is this a flash memory region? */
4325 if (m.attrib.mode == MEM_FLASH)
4326 {
4327 found_flash_region = true;
4328 target_flash_erase (m.lo, m.hi - m.lo);
4329
4330 ui_out_emit_tuple tuple_emitter (current_uiout, "erased-regions");
4331
4332 current_uiout->message (_("Erasing flash memory region at address "));
4333 current_uiout->field_core_addr ("address", gdbarch, m.lo);
4334 current_uiout->message (", size = ");
4335 current_uiout->field_string ("size", hex_string (m.hi - m.lo));
4336 current_uiout->message ("\n");
4337 }
4338 }
4339
4340 /* Did we do any flash operations? If so, we need to finalize them. */
4341 if (found_flash_region)
4342 target_flash_done ();
4343 else
4344 current_uiout->message (_("No flash memory regions found.\n"));
4345 }
4346
4347 /* Print the name of each layers of our target stack. */
4348
4349 static void
4350 maintenance_print_target_stack (const char *cmd, int from_tty)
4351 {
4352 printf_filtered (_("The current target stack is:\n"));
4353
4354 for (target_ops *t = current_inferior ()->top_target ();
4355 t != NULL;
4356 t = t->beneath ())
4357 {
4358 if (t->stratum () == debug_stratum)
4359 continue;
4360 printf_filtered (" - %s (%s)\n", t->shortname (), t->longname ());
4361 }
4362 }
4363
4364 /* See target.h. */
4365
4366 void
4367 target_async (int enable)
4368 {
4369 /* If we are trying to enable async mode then it must be the case that
4370 async mode is possible for this target. */
4371 gdb_assert (!enable || target_can_async_p ());
4372 infrun_async (enable);
4373 current_inferior ()->top_target ()->async (enable);
4374 }
4375
4376 /* See target.h. */
4377
4378 void
4379 target_thread_events (int enable)
4380 {
4381 current_inferior ()->top_target ()->thread_events (enable);
4382 }
4383
4384 /* Controls if targets can report that they can/are async. This is
4385 just for maintainers to use when debugging gdb. */
4386 bool target_async_permitted = true;
4387
4388 static void
4389 set_maint_target_async (bool permitted)
4390 {
4391 if (have_live_inferiors ())
4392 error (_("Cannot change this setting while the inferior is running."));
4393
4394 target_async_permitted = permitted;
4395 }
4396
4397 static bool
4398 get_maint_target_async ()
4399 {
4400 return target_async_permitted;
4401 }
4402
4403 static void
4404 show_maint_target_async (ui_file *file, int from_tty,
4405 cmd_list_element *c, const char *value)
4406 {
4407 fprintf_filtered (file,
4408 _("Controlling the inferior in "
4409 "asynchronous mode is %s.\n"), value);
4410 }
4411
4412 /* Return true if the target operates in non-stop mode even with "set
4413 non-stop off". */
4414
4415 static int
4416 target_always_non_stop_p (void)
4417 {
4418 return current_inferior ()->top_target ()->always_non_stop_p ();
4419 }
4420
4421 /* See target.h. */
4422
4423 bool
4424 target_is_non_stop_p ()
4425 {
4426 return ((non_stop
4427 || target_non_stop_enabled == AUTO_BOOLEAN_TRUE
4428 || (target_non_stop_enabled == AUTO_BOOLEAN_AUTO
4429 && target_always_non_stop_p ()))
4430 && target_can_async_p ());
4431 }
4432
4433 /* See target.h. */
4434
4435 bool
4436 exists_non_stop_target ()
4437 {
4438 if (target_is_non_stop_p ())
4439 return true;
4440
4441 scoped_restore_current_thread restore_thread;
4442
4443 for (inferior *inf : all_inferiors ())
4444 {
4445 switch_to_inferior_no_thread (inf);
4446 if (target_is_non_stop_p ())
4447 return true;
4448 }
4449
4450 return false;
4451 }
4452
4453 /* Controls if targets can report that they always run in non-stop
4454 mode. This is just for maintainers to use when debugging gdb. */
4455 enum auto_boolean target_non_stop_enabled = AUTO_BOOLEAN_AUTO;
4456
4457 /* Set callback for maint target-non-stop setting. */
4458
4459 static void
4460 set_maint_target_non_stop (auto_boolean enabled)
4461 {
4462 if (have_live_inferiors ())
4463 error (_("Cannot change this setting while the inferior is running."));
4464
4465 target_non_stop_enabled = enabled;
4466 }
4467
4468 /* Get callback for maint target-non-stop setting. */
4469
4470 static auto_boolean
4471 get_maint_target_non_stop ()
4472 {
4473 return target_non_stop_enabled;
4474 }
4475
4476 static void
4477 show_maint_target_non_stop (ui_file *file, int from_tty,
4478 cmd_list_element *c, const char *value)
4479 {
4480 if (target_non_stop_enabled == AUTO_BOOLEAN_AUTO)
4481 fprintf_filtered (file,
4482 _("Whether the target is always in non-stop mode "
4483 "is %s (currently %s).\n"), value,
4484 target_always_non_stop_p () ? "on" : "off");
4485 else
4486 fprintf_filtered (file,
4487 _("Whether the target is always in non-stop mode "
4488 "is %s.\n"), value);
4489 }
4490
4491 /* Temporary copies of permission settings. */
4492
4493 static bool may_write_registers_1 = true;
4494 static bool may_write_memory_1 = true;
4495 static bool may_insert_breakpoints_1 = true;
4496 static bool may_insert_tracepoints_1 = true;
4497 static bool may_insert_fast_tracepoints_1 = true;
4498 static bool may_stop_1 = true;
4499
4500 /* Make the user-set values match the real values again. */
4501
4502 void
4503 update_target_permissions (void)
4504 {
4505 may_write_registers_1 = may_write_registers;
4506 may_write_memory_1 = may_write_memory;
4507 may_insert_breakpoints_1 = may_insert_breakpoints;
4508 may_insert_tracepoints_1 = may_insert_tracepoints;
4509 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4510 may_stop_1 = may_stop;
4511 }
4512
4513 /* The one function handles (most of) the permission flags in the same
4514 way. */
4515
4516 static void
4517 set_target_permissions (const char *args, int from_tty,
4518 struct cmd_list_element *c)
4519 {
4520 if (target_has_execution ())
4521 {
4522 update_target_permissions ();
4523 error (_("Cannot change this setting while the inferior is running."));
4524 }
4525
4526 /* Make the real values match the user-changed values. */
4527 may_write_registers = may_write_registers_1;
4528 may_insert_breakpoints = may_insert_breakpoints_1;
4529 may_insert_tracepoints = may_insert_tracepoints_1;
4530 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4531 may_stop = may_stop_1;
4532 update_observer_mode ();
4533 }
4534
4535 /* Set memory write permission independently of observer mode. */
4536
4537 static void
4538 set_write_memory_permission (const char *args, int from_tty,
4539 struct cmd_list_element *c)
4540 {
4541 /* Make the real values match the user-changed values. */
4542 may_write_memory = may_write_memory_1;
4543 update_observer_mode ();
4544 }
4545
4546 void _initialize_target ();
4547
4548 void
4549 _initialize_target ()
4550 {
4551 the_debug_target = new debug_target ();
4552
4553 add_info ("target", info_target_command, targ_desc);
4554 add_info ("files", info_target_command, targ_desc);
4555
4556 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4557 Set target debugging."), _("\
4558 Show target debugging."), _("\
4559 When non-zero, target debugging is enabled. Higher numbers are more\n\
4560 verbose."),
4561 set_targetdebug,
4562 show_targetdebug,
4563 &setdebuglist, &showdebuglist);
4564
4565 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4566 &trust_readonly, _("\
4567 Set mode for reading from readonly sections."), _("\
4568 Show mode for reading from readonly sections."), _("\
4569 When this mode is on, memory reads from readonly sections (such as .text)\n\
4570 will be read from the object file instead of from the target. This will\n\
4571 result in significant performance improvement for remote targets."),
4572 NULL,
4573 show_trust_readonly,
4574 &setlist, &showlist);
4575
4576 add_com ("monitor", class_obscure, do_monitor_command,
4577 _("Send a command to the remote monitor (remote targets only)."));
4578
4579 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4580 _("Print the name of each layer of the internal target stack."),
4581 &maintenanceprintlist);
4582
4583 add_setshow_boolean_cmd ("target-async", no_class,
4584 _("\
4585 Set whether gdb controls the inferior in asynchronous mode."), _("\
4586 Show whether gdb controls the inferior in asynchronous mode."), _("\
4587 Tells gdb whether to control the inferior in asynchronous mode."),
4588 set_maint_target_async,
4589 get_maint_target_async,
4590 show_maint_target_async,
4591 &maintenance_set_cmdlist,
4592 &maintenance_show_cmdlist);
4593
4594 add_setshow_auto_boolean_cmd ("target-non-stop", no_class,
4595 _("\
4596 Set whether gdb always controls the inferior in non-stop mode."), _("\
4597 Show whether gdb always controls the inferior in non-stop mode."), _("\
4598 Tells gdb whether to control the inferior in non-stop mode."),
4599 set_maint_target_non_stop,
4600 get_maint_target_non_stop,
4601 show_maint_target_non_stop,
4602 &maintenance_set_cmdlist,
4603 &maintenance_show_cmdlist);
4604
4605 add_setshow_boolean_cmd ("may-write-registers", class_support,
4606 &may_write_registers_1, _("\
4607 Set permission to write into registers."), _("\
4608 Show permission to write into registers."), _("\
4609 When this permission is on, GDB may write into the target's registers.\n\
4610 Otherwise, any sort of write attempt will result in an error."),
4611 set_target_permissions, NULL,
4612 &setlist, &showlist);
4613
4614 add_setshow_boolean_cmd ("may-write-memory", class_support,
4615 &may_write_memory_1, _("\
4616 Set permission to write into target memory."), _("\
4617 Show permission to write into target memory."), _("\
4618 When this permission is on, GDB may write into the target's memory.\n\
4619 Otherwise, any sort of write attempt will result in an error."),
4620 set_write_memory_permission, NULL,
4621 &setlist, &showlist);
4622
4623 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4624 &may_insert_breakpoints_1, _("\
4625 Set permission to insert breakpoints in the target."), _("\
4626 Show permission to insert breakpoints in the target."), _("\
4627 When this permission is on, GDB may insert breakpoints in the program.\n\
4628 Otherwise, any sort of insertion attempt will result in an error."),
4629 set_target_permissions, NULL,
4630 &setlist, &showlist);
4631
4632 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4633 &may_insert_tracepoints_1, _("\
4634 Set permission to insert tracepoints in the target."), _("\
4635 Show permission to insert tracepoints in the target."), _("\
4636 When this permission is on, GDB may insert tracepoints in the program.\n\
4637 Otherwise, any sort of insertion attempt will result in an error."),
4638 set_target_permissions, NULL,
4639 &setlist, &showlist);
4640
4641 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4642 &may_insert_fast_tracepoints_1, _("\
4643 Set permission to insert fast tracepoints in the target."), _("\
4644 Show permission to insert fast tracepoints in the target."), _("\
4645 When this permission is on, GDB may insert fast tracepoints.\n\
4646 Otherwise, any sort of insertion attempt will result in an error."),
4647 set_target_permissions, NULL,
4648 &setlist, &showlist);
4649
4650 add_setshow_boolean_cmd ("may-interrupt", class_support,
4651 &may_stop_1, _("\
4652 Set permission to interrupt or signal the target."), _("\
4653 Show permission to interrupt or signal the target."), _("\
4654 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4655 Otherwise, any attempt to interrupt or stop will be ignored."),
4656 set_target_permissions, NULL,
4657 &setlist, &showlist);
4658
4659 add_com ("flash-erase", no_class, flash_erase_command,
4660 _("Erase all flash memory regions."));
4661
4662 add_setshow_boolean_cmd ("auto-connect-native-target", class_support,
4663 &auto_connect_native_target, _("\
4664 Set whether GDB may automatically connect to the native target."), _("\
4665 Show whether GDB may automatically connect to the native target."), _("\
4666 When on, and GDB is not connected to a target yet, GDB\n\
4667 attempts \"run\" and other commands with the native target."),
4668 NULL, show_auto_connect_native_target,
4669 &setlist, &showlist);
4670 }