]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdbserver/linux-low.cc
gdb, gdbserver: support dlmopen()
[thirdparty/binutils-gdb.git] / gdbserver / linux-low.cc
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2022 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
23 #include "tdesc.h"
24 #include "gdbsupport/event-loop.h"
25 #include "gdbsupport/event-pipe.h"
26 #include "gdbsupport/rsp-low.h"
27 #include "gdbsupport/signals-state-save-restore.h"
28 #include "nat/linux-nat.h"
29 #include "nat/linux-waitpid.h"
30 #include "gdbsupport/gdb_wait.h"
31 #include "nat/gdb_ptrace.h"
32 #include "nat/linux-ptrace.h"
33 #include "nat/linux-procfs.h"
34 #include "nat/linux-personality.h"
35 #include <signal.h>
36 #include <sys/ioctl.h>
37 #include <fcntl.h>
38 #include <unistd.h>
39 #include <sys/syscall.h>
40 #include <sched.h>
41 #include <ctype.h>
42 #include <pwd.h>
43 #include <sys/types.h>
44 #include <dirent.h>
45 #include <sys/stat.h>
46 #include <sys/vfs.h>
47 #include <sys/uio.h>
48 #include "gdbsupport/filestuff.h"
49 #include "tracepoint.h"
50 #include <inttypes.h>
51 #include "gdbsupport/common-inferior.h"
52 #include "nat/fork-inferior.h"
53 #include "gdbsupport/environ.h"
54 #include "gdbsupport/gdb-sigmask.h"
55 #include "gdbsupport/scoped_restore.h"
56 #ifndef ELFMAG0
57 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
58 then ELFMAG0 will have been defined. If it didn't get included by
59 gdb_proc_service.h then including it will likely introduce a duplicate
60 definition of elf_fpregset_t. */
61 #include <elf.h>
62 #endif
63 #include "nat/linux-namespaces.h"
64
65 #ifndef O_LARGEFILE
66 #define O_LARGEFILE 0
67 #endif
68
69 #ifndef AT_HWCAP2
70 #define AT_HWCAP2 26
71 #endif
72
73 /* Some targets did not define these ptrace constants from the start,
74 so gdbserver defines them locally here. In the future, these may
75 be removed after they are added to asm/ptrace.h. */
76 #if !(defined(PT_TEXT_ADDR) \
77 || defined(PT_DATA_ADDR) \
78 || defined(PT_TEXT_END_ADDR))
79 #if defined(__mcoldfire__)
80 /* These are still undefined in 3.10 kernels. */
81 #define PT_TEXT_ADDR 49*4
82 #define PT_DATA_ADDR 50*4
83 #define PT_TEXT_END_ADDR 51*4
84 /* These are still undefined in 3.10 kernels. */
85 #elif defined(__TMS320C6X__)
86 #define PT_TEXT_ADDR (0x10000*4)
87 #define PT_DATA_ADDR (0x10004*4)
88 #define PT_TEXT_END_ADDR (0x10008*4)
89 #endif
90 #endif
91
92 #if (defined(__UCLIBC__) \
93 && defined(HAS_NOMMU) \
94 && defined(PT_TEXT_ADDR) \
95 && defined(PT_DATA_ADDR) \
96 && defined(PT_TEXT_END_ADDR))
97 #define SUPPORTS_READ_OFFSETS
98 #endif
99
100 #ifdef HAVE_LINUX_BTRACE
101 # include "nat/linux-btrace.h"
102 # include "gdbsupport/btrace-common.h"
103 #endif
104
105 #ifndef HAVE_ELF32_AUXV_T
106 /* Copied from glibc's elf.h. */
107 typedef struct
108 {
109 uint32_t a_type; /* Entry type */
110 union
111 {
112 uint32_t a_val; /* Integer value */
113 /* We use to have pointer elements added here. We cannot do that,
114 though, since it does not work when using 32-bit definitions
115 on 64-bit platforms and vice versa. */
116 } a_un;
117 } Elf32_auxv_t;
118 #endif
119
120 #ifndef HAVE_ELF64_AUXV_T
121 /* Copied from glibc's elf.h. */
122 typedef struct
123 {
124 uint64_t a_type; /* Entry type */
125 union
126 {
127 uint64_t a_val; /* Integer value */
128 /* We use to have pointer elements added here. We cannot do that,
129 though, since it does not work when using 32-bit definitions
130 on 64-bit platforms and vice versa. */
131 } a_un;
132 } Elf64_auxv_t;
133 #endif
134
135 /* Does the current host support PTRACE_GETREGSET? */
136 int have_ptrace_getregset = -1;
137
138 /* Return TRUE if THREAD is the leader thread of the process. */
139
140 static bool
141 is_leader (thread_info *thread)
142 {
143 ptid_t ptid = ptid_of (thread);
144 return ptid.pid () == ptid.lwp ();
145 }
146
147 /* LWP accessors. */
148
149 /* See nat/linux-nat.h. */
150
151 ptid_t
152 ptid_of_lwp (struct lwp_info *lwp)
153 {
154 return ptid_of (get_lwp_thread (lwp));
155 }
156
157 /* See nat/linux-nat.h. */
158
159 void
160 lwp_set_arch_private_info (struct lwp_info *lwp,
161 struct arch_lwp_info *info)
162 {
163 lwp->arch_private = info;
164 }
165
166 /* See nat/linux-nat.h. */
167
168 struct arch_lwp_info *
169 lwp_arch_private_info (struct lwp_info *lwp)
170 {
171 return lwp->arch_private;
172 }
173
174 /* See nat/linux-nat.h. */
175
176 int
177 lwp_is_stopped (struct lwp_info *lwp)
178 {
179 return lwp->stopped;
180 }
181
182 /* See nat/linux-nat.h. */
183
184 enum target_stop_reason
185 lwp_stop_reason (struct lwp_info *lwp)
186 {
187 return lwp->stop_reason;
188 }
189
190 /* See nat/linux-nat.h. */
191
192 int
193 lwp_is_stepping (struct lwp_info *lwp)
194 {
195 return lwp->stepping;
196 }
197
198 /* A list of all unknown processes which receive stop signals. Some
199 other process will presumably claim each of these as forked
200 children momentarily. */
201
202 struct simple_pid_list
203 {
204 /* The process ID. */
205 int pid;
206
207 /* The status as reported by waitpid. */
208 int status;
209
210 /* Next in chain. */
211 struct simple_pid_list *next;
212 };
213 static struct simple_pid_list *stopped_pids;
214
215 /* Trivial list manipulation functions to keep track of a list of new
216 stopped processes. */
217
218 static void
219 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
220 {
221 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
222
223 new_pid->pid = pid;
224 new_pid->status = status;
225 new_pid->next = *listp;
226 *listp = new_pid;
227 }
228
229 static int
230 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
231 {
232 struct simple_pid_list **p;
233
234 for (p = listp; *p != NULL; p = &(*p)->next)
235 if ((*p)->pid == pid)
236 {
237 struct simple_pid_list *next = (*p)->next;
238
239 *statusp = (*p)->status;
240 xfree (*p);
241 *p = next;
242 return 1;
243 }
244 return 0;
245 }
246
247 enum stopping_threads_kind
248 {
249 /* Not stopping threads presently. */
250 NOT_STOPPING_THREADS,
251
252 /* Stopping threads. */
253 STOPPING_THREADS,
254
255 /* Stopping and suspending threads. */
256 STOPPING_AND_SUSPENDING_THREADS
257 };
258
259 /* This is set while stop_all_lwps is in effect. */
260 static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
261
262 /* FIXME make into a target method? */
263 int using_threads = 1;
264
265 /* True if we're presently stabilizing threads (moving them out of
266 jump pads). */
267 static int stabilizing_threads;
268
269 static void unsuspend_all_lwps (struct lwp_info *except);
270 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
271 static int lwp_is_marked_dead (struct lwp_info *lwp);
272 static int kill_lwp (unsigned long lwpid, int signo);
273 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
274 static int linux_low_ptrace_options (int attached);
275 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
276
277 /* When the event-loop is doing a step-over, this points at the thread
278 being stepped. */
279 static ptid_t step_over_bkpt;
280
281 bool
282 linux_process_target::low_supports_breakpoints ()
283 {
284 return false;
285 }
286
287 CORE_ADDR
288 linux_process_target::low_get_pc (regcache *regcache)
289 {
290 return 0;
291 }
292
293 void
294 linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
295 {
296 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
297 }
298
299 std::vector<CORE_ADDR>
300 linux_process_target::low_get_next_pcs (regcache *regcache)
301 {
302 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
303 "implemented");
304 }
305
306 int
307 linux_process_target::low_decr_pc_after_break ()
308 {
309 return 0;
310 }
311
312 /* True if LWP is stopped in its stepping range. */
313
314 static int
315 lwp_in_step_range (struct lwp_info *lwp)
316 {
317 CORE_ADDR pc = lwp->stop_pc;
318
319 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
320 }
321
322 /* The event pipe registered as a waitable file in the event loop. */
323 static event_pipe linux_event_pipe;
324
325 /* True if we're currently in async mode. */
326 #define target_is_async_p() (linux_event_pipe.is_open ())
327
328 static void send_sigstop (struct lwp_info *lwp);
329
330 /* Return non-zero if HEADER is a 64-bit ELF file. */
331
332 static int
333 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
334 {
335 if (header->e_ident[EI_MAG0] == ELFMAG0
336 && header->e_ident[EI_MAG1] == ELFMAG1
337 && header->e_ident[EI_MAG2] == ELFMAG2
338 && header->e_ident[EI_MAG3] == ELFMAG3)
339 {
340 *machine = header->e_machine;
341 return header->e_ident[EI_CLASS] == ELFCLASS64;
342
343 }
344 *machine = EM_NONE;
345 return -1;
346 }
347
348 /* Return non-zero if FILE is a 64-bit ELF file,
349 zero if the file is not a 64-bit ELF file,
350 and -1 if the file is not accessible or doesn't exist. */
351
352 static int
353 elf_64_file_p (const char *file, unsigned int *machine)
354 {
355 Elf64_Ehdr header;
356 int fd;
357
358 fd = open (file, O_RDONLY);
359 if (fd < 0)
360 return -1;
361
362 if (read (fd, &header, sizeof (header)) != sizeof (header))
363 {
364 close (fd);
365 return 0;
366 }
367 close (fd);
368
369 return elf_64_header_p (&header, machine);
370 }
371
372 /* Accepts an integer PID; Returns true if the executable PID is
373 running is a 64-bit ELF file.. */
374
375 int
376 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
377 {
378 char file[PATH_MAX];
379
380 sprintf (file, "/proc/%d/exe", pid);
381 return elf_64_file_p (file, machine);
382 }
383
384 void
385 linux_process_target::delete_lwp (lwp_info *lwp)
386 {
387 struct thread_info *thr = get_lwp_thread (lwp);
388
389 threads_debug_printf ("deleting %ld", lwpid_of (thr));
390
391 remove_thread (thr);
392
393 low_delete_thread (lwp->arch_private);
394
395 delete lwp;
396 }
397
398 void
399 linux_process_target::low_delete_thread (arch_lwp_info *info)
400 {
401 /* Default implementation should be overridden if architecture-specific
402 info is being used. */
403 gdb_assert (info == nullptr);
404 }
405
406 /* Open the /proc/PID/mem file for PROC. */
407
408 static void
409 open_proc_mem_file (process_info *proc)
410 {
411 gdb_assert (proc->priv->mem_fd == -1);
412
413 char filename[64];
414 xsnprintf (filename, sizeof filename, "/proc/%d/mem", proc->pid);
415
416 proc->priv->mem_fd
417 = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
418 }
419
420 process_info *
421 linux_process_target::add_linux_process_no_mem_file (int pid, int attached)
422 {
423 struct process_info *proc;
424
425 proc = add_process (pid, attached);
426 proc->priv = XCNEW (struct process_info_private);
427
428 proc->priv->arch_private = low_new_process ();
429 proc->priv->mem_fd = -1;
430
431 return proc;
432 }
433
434
435 process_info *
436 linux_process_target::add_linux_process (int pid, int attached)
437 {
438 process_info *proc = add_linux_process_no_mem_file (pid, attached);
439 open_proc_mem_file (proc);
440 return proc;
441 }
442
443 void
444 linux_process_target::remove_linux_process (process_info *proc)
445 {
446 if (proc->priv->mem_fd >= 0)
447 close (proc->priv->mem_fd);
448
449 this->low_delete_process (proc->priv->arch_private);
450
451 xfree (proc->priv);
452 proc->priv = nullptr;
453
454 remove_process (proc);
455 }
456
457 arch_process_info *
458 linux_process_target::low_new_process ()
459 {
460 return nullptr;
461 }
462
463 void
464 linux_process_target::low_delete_process (arch_process_info *info)
465 {
466 /* Default implementation must be overridden if architecture-specific
467 info exists. */
468 gdb_assert (info == nullptr);
469 }
470
471 void
472 linux_process_target::low_new_fork (process_info *parent, process_info *child)
473 {
474 /* Nop. */
475 }
476
477 void
478 linux_process_target::arch_setup_thread (thread_info *thread)
479 {
480 scoped_restore_current_thread restore_thread;
481 switch_to_thread (thread);
482
483 low_arch_setup ();
484 }
485
486 int
487 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
488 int wstat)
489 {
490 client_state &cs = get_client_state ();
491 struct lwp_info *event_lwp = *orig_event_lwp;
492 int event = linux_ptrace_get_extended_event (wstat);
493 struct thread_info *event_thr = get_lwp_thread (event_lwp);
494 struct lwp_info *new_lwp;
495
496 gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
497
498 /* All extended events we currently use are mid-syscall. Only
499 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
500 you have to be using PTRACE_SEIZE to get that. */
501 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
502
503 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
504 || (event == PTRACE_EVENT_CLONE))
505 {
506 ptid_t ptid;
507 unsigned long new_pid;
508 int ret, status;
509
510 /* Get the pid of the new lwp. */
511 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
512 &new_pid);
513
514 /* If we haven't already seen the new PID stop, wait for it now. */
515 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
516 {
517 /* The new child has a pending SIGSTOP. We can't affect it until it
518 hits the SIGSTOP, but we're already attached. */
519
520 ret = my_waitpid (new_pid, &status, __WALL);
521
522 if (ret == -1)
523 perror_with_name ("waiting for new child");
524 else if (ret != new_pid)
525 warning ("wait returned unexpected PID %d", ret);
526 else if (!WIFSTOPPED (status))
527 warning ("wait returned unexpected status 0x%x", status);
528 }
529
530 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
531 {
532 struct process_info *parent_proc;
533 struct process_info *child_proc;
534 struct lwp_info *child_lwp;
535 struct thread_info *child_thr;
536
537 ptid = ptid_t (new_pid, new_pid);
538
539 threads_debug_printf ("Got fork event from LWP %ld, "
540 "new child is %d",
541 ptid_of (event_thr).lwp (),
542 ptid.pid ());
543
544 /* Add the new process to the tables and clone the breakpoint
545 lists of the parent. We need to do this even if the new process
546 will be detached, since we will need the process object and the
547 breakpoints to remove any breakpoints from memory when we
548 detach, and the client side will access registers. */
549 child_proc = add_linux_process (new_pid, 0);
550 gdb_assert (child_proc != NULL);
551 child_lwp = add_lwp (ptid);
552 gdb_assert (child_lwp != NULL);
553 child_lwp->stopped = 1;
554 child_lwp->must_set_ptrace_flags = 1;
555 child_lwp->status_pending_p = 0;
556 child_thr = get_lwp_thread (child_lwp);
557 child_thr->last_resume_kind = resume_stop;
558 child_thr->last_status.set_stopped (GDB_SIGNAL_0);
559
560 /* If we're suspending all threads, leave this one suspended
561 too. If the fork/clone parent is stepping over a breakpoint,
562 all other threads have been suspended already. Leave the
563 child suspended too. */
564 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
565 || event_lwp->bp_reinsert != 0)
566 {
567 threads_debug_printf ("leaving child suspended");
568 child_lwp->suspended = 1;
569 }
570
571 parent_proc = get_thread_process (event_thr);
572 child_proc->attached = parent_proc->attached;
573
574 if (event_lwp->bp_reinsert != 0
575 && supports_software_single_step ()
576 && event == PTRACE_EVENT_VFORK)
577 {
578 /* If we leave single-step breakpoints there, child will
579 hit it, so uninsert single-step breakpoints from parent
580 (and child). Once vfork child is done, reinsert
581 them back to parent. */
582 uninsert_single_step_breakpoints (event_thr);
583 }
584
585 clone_all_breakpoints (child_thr, event_thr);
586
587 target_desc_up tdesc = allocate_target_description ();
588 copy_target_description (tdesc.get (), parent_proc->tdesc);
589 child_proc->tdesc = tdesc.release ();
590
591 /* Clone arch-specific process data. */
592 low_new_fork (parent_proc, child_proc);
593
594 /* Save fork info in the parent thread. */
595 if (event == PTRACE_EVENT_FORK)
596 event_lwp->waitstatus.set_forked (ptid);
597 else if (event == PTRACE_EVENT_VFORK)
598 event_lwp->waitstatus.set_vforked (ptid);
599
600 /* The status_pending field contains bits denoting the
601 extended event, so when the pending event is handled,
602 the handler will look at lwp->waitstatus. */
603 event_lwp->status_pending_p = 1;
604 event_lwp->status_pending = wstat;
605
606 /* Link the threads until the parent event is passed on to
607 higher layers. */
608 event_lwp->fork_relative = child_lwp;
609 child_lwp->fork_relative = event_lwp;
610
611 /* If the parent thread is doing step-over with single-step
612 breakpoints, the list of single-step breakpoints are cloned
613 from the parent's. Remove them from the child process.
614 In case of vfork, we'll reinsert them back once vforked
615 child is done. */
616 if (event_lwp->bp_reinsert != 0
617 && supports_software_single_step ())
618 {
619 /* The child process is forked and stopped, so it is safe
620 to access its memory without stopping all other threads
621 from other processes. */
622 delete_single_step_breakpoints (child_thr);
623
624 gdb_assert (has_single_step_breakpoints (event_thr));
625 gdb_assert (!has_single_step_breakpoints (child_thr));
626 }
627
628 /* Report the event. */
629 return 0;
630 }
631
632 threads_debug_printf
633 ("Got clone event from LWP %ld, new child is LWP %ld",
634 lwpid_of (event_thr), new_pid);
635
636 ptid = ptid_t (pid_of (event_thr), new_pid);
637 new_lwp = add_lwp (ptid);
638
639 /* Either we're going to immediately resume the new thread
640 or leave it stopped. resume_one_lwp is a nop if it
641 thinks the thread is currently running, so set this first
642 before calling resume_one_lwp. */
643 new_lwp->stopped = 1;
644
645 /* If we're suspending all threads, leave this one suspended
646 too. If the fork/clone parent is stepping over a breakpoint,
647 all other threads have been suspended already. Leave the
648 child suspended too. */
649 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
650 || event_lwp->bp_reinsert != 0)
651 new_lwp->suspended = 1;
652
653 /* Normally we will get the pending SIGSTOP. But in some cases
654 we might get another signal delivered to the group first.
655 If we do get another signal, be sure not to lose it. */
656 if (WSTOPSIG (status) != SIGSTOP)
657 {
658 new_lwp->stop_expected = 1;
659 new_lwp->status_pending_p = 1;
660 new_lwp->status_pending = status;
661 }
662 else if (cs.report_thread_events)
663 {
664 new_lwp->waitstatus.set_thread_created ();
665 new_lwp->status_pending_p = 1;
666 new_lwp->status_pending = status;
667 }
668
669 #ifdef USE_THREAD_DB
670 thread_db_notice_clone (event_thr, ptid);
671 #endif
672
673 /* Don't report the event. */
674 return 1;
675 }
676 else if (event == PTRACE_EVENT_VFORK_DONE)
677 {
678 event_lwp->waitstatus.set_vfork_done ();
679
680 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
681 {
682 reinsert_single_step_breakpoints (event_thr);
683
684 gdb_assert (has_single_step_breakpoints (event_thr));
685 }
686
687 /* Report the event. */
688 return 0;
689 }
690 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
691 {
692 struct process_info *proc;
693 std::vector<int> syscalls_to_catch;
694 ptid_t event_ptid;
695 pid_t event_pid;
696
697 threads_debug_printf ("Got exec event from LWP %ld",
698 lwpid_of (event_thr));
699
700 /* Get the event ptid. */
701 event_ptid = ptid_of (event_thr);
702 event_pid = event_ptid.pid ();
703
704 /* Save the syscall list from the execing process. */
705 proc = get_thread_process (event_thr);
706 syscalls_to_catch = std::move (proc->syscalls_to_catch);
707
708 /* Delete the execing process and all its threads. */
709 mourn (proc);
710 switch_to_thread (nullptr);
711
712 /* Create a new process/lwp/thread. */
713 proc = add_linux_process (event_pid, 0);
714 event_lwp = add_lwp (event_ptid);
715 event_thr = get_lwp_thread (event_lwp);
716 gdb_assert (current_thread == event_thr);
717 arch_setup_thread (event_thr);
718
719 /* Set the event status. */
720 event_lwp->waitstatus.set_execd
721 (make_unique_xstrdup
722 (linux_proc_pid_to_exec_file (lwpid_of (event_thr))));
723
724 /* Mark the exec status as pending. */
725 event_lwp->stopped = 1;
726 event_lwp->status_pending_p = 1;
727 event_lwp->status_pending = wstat;
728 event_thr->last_resume_kind = resume_continue;
729 event_thr->last_status.set_ignore ();
730
731 /* Update syscall state in the new lwp, effectively mid-syscall too. */
732 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
733
734 /* Restore the list to catch. Don't rely on the client, which is free
735 to avoid sending a new list when the architecture doesn't change.
736 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
737 proc->syscalls_to_catch = std::move (syscalls_to_catch);
738
739 /* Report the event. */
740 *orig_event_lwp = event_lwp;
741 return 0;
742 }
743
744 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
745 }
746
747 CORE_ADDR
748 linux_process_target::get_pc (lwp_info *lwp)
749 {
750 process_info *proc = get_thread_process (get_lwp_thread (lwp));
751 gdb_assert (!proc->starting_up);
752
753 if (!low_supports_breakpoints ())
754 return 0;
755
756 scoped_restore_current_thread restore_thread;
757 switch_to_thread (get_lwp_thread (lwp));
758
759 struct regcache *regcache = get_thread_regcache (current_thread, 1);
760 CORE_ADDR pc = low_get_pc (regcache);
761
762 threads_debug_printf ("pc is 0x%lx", (long) pc);
763
764 return pc;
765 }
766
767 void
768 linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
769 {
770 struct regcache *regcache;
771
772 scoped_restore_current_thread restore_thread;
773 switch_to_thread (get_lwp_thread (lwp));
774
775 regcache = get_thread_regcache (current_thread, 1);
776 low_get_syscall_trapinfo (regcache, sysno);
777
778 threads_debug_printf ("get_syscall_trapinfo sysno %d", *sysno);
779 }
780
781 void
782 linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
783 {
784 /* By default, report an unknown system call number. */
785 *sysno = UNKNOWN_SYSCALL;
786 }
787
788 bool
789 linux_process_target::save_stop_reason (lwp_info *lwp)
790 {
791 CORE_ADDR pc;
792 CORE_ADDR sw_breakpoint_pc;
793 #if USE_SIGTRAP_SIGINFO
794 siginfo_t siginfo;
795 #endif
796
797 if (!low_supports_breakpoints ())
798 return false;
799
800 process_info *proc = get_thread_process (get_lwp_thread (lwp));
801 if (proc->starting_up)
802 {
803 /* Claim we have the stop PC so that the caller doesn't try to
804 fetch it itself. */
805 return true;
806 }
807
808 pc = get_pc (lwp);
809 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
810
811 /* breakpoint_at reads from the current thread. */
812 scoped_restore_current_thread restore_thread;
813 switch_to_thread (get_lwp_thread (lwp));
814
815 #if USE_SIGTRAP_SIGINFO
816 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
817 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
818 {
819 if (siginfo.si_signo == SIGTRAP)
820 {
821 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
822 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
823 {
824 /* The si_code is ambiguous on this arch -- check debug
825 registers. */
826 if (!check_stopped_by_watchpoint (lwp))
827 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
828 }
829 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
830 {
831 /* If we determine the LWP stopped for a SW breakpoint,
832 trust it. Particularly don't check watchpoint
833 registers, because at least on s390, we'd find
834 stopped-by-watchpoint as long as there's a watchpoint
835 set. */
836 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
837 }
838 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
839 {
840 /* This can indicate either a hardware breakpoint or
841 hardware watchpoint. Check debug registers. */
842 if (!check_stopped_by_watchpoint (lwp))
843 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
844 }
845 else if (siginfo.si_code == TRAP_TRACE)
846 {
847 /* We may have single stepped an instruction that
848 triggered a watchpoint. In that case, on some
849 architectures (such as x86), instead of TRAP_HWBKPT,
850 si_code indicates TRAP_TRACE, and we need to check
851 the debug registers separately. */
852 if (!check_stopped_by_watchpoint (lwp))
853 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
854 }
855 }
856 }
857 #else
858 /* We may have just stepped a breakpoint instruction. E.g., in
859 non-stop mode, GDB first tells the thread A to step a range, and
860 then the user inserts a breakpoint inside the range. In that
861 case we need to report the breakpoint PC. */
862 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
863 && low_breakpoint_at (sw_breakpoint_pc))
864 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
865
866 if (hardware_breakpoint_inserted_here (pc))
867 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
868
869 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
870 check_stopped_by_watchpoint (lwp);
871 #endif
872
873 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
874 {
875 threads_debug_printf
876 ("%s stopped by software breakpoint",
877 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
878
879 /* Back up the PC if necessary. */
880 if (pc != sw_breakpoint_pc)
881 {
882 struct regcache *regcache
883 = get_thread_regcache (current_thread, 1);
884 low_set_pc (regcache, sw_breakpoint_pc);
885 }
886
887 /* Update this so we record the correct stop PC below. */
888 pc = sw_breakpoint_pc;
889 }
890 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
891 threads_debug_printf
892 ("%s stopped by hardware breakpoint",
893 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
894 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
895 threads_debug_printf
896 ("%s stopped by hardware watchpoint",
897 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
898 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
899 threads_debug_printf
900 ("%s stopped by trace",
901 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
902
903 lwp->stop_pc = pc;
904 return true;
905 }
906
907 lwp_info *
908 linux_process_target::add_lwp (ptid_t ptid)
909 {
910 lwp_info *lwp = new lwp_info;
911
912 lwp->thread = add_thread (ptid, lwp);
913
914 low_new_thread (lwp);
915
916 return lwp;
917 }
918
919 void
920 linux_process_target::low_new_thread (lwp_info *info)
921 {
922 /* Nop. */
923 }
924
925 /* Callback to be used when calling fork_inferior, responsible for
926 actually initiating the tracing of the inferior. */
927
928 static void
929 linux_ptrace_fun ()
930 {
931 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
932 (PTRACE_TYPE_ARG4) 0) < 0)
933 trace_start_error_with_name ("ptrace");
934
935 if (setpgid (0, 0) < 0)
936 trace_start_error_with_name ("setpgid");
937
938 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
939 stdout to stderr so that inferior i/o doesn't corrupt the connection.
940 Also, redirect stdin to /dev/null. */
941 if (remote_connection_is_stdio ())
942 {
943 if (close (0) < 0)
944 trace_start_error_with_name ("close");
945 if (open ("/dev/null", O_RDONLY) < 0)
946 trace_start_error_with_name ("open");
947 if (dup2 (2, 1) < 0)
948 trace_start_error_with_name ("dup2");
949 if (write (2, "stdin/stdout redirected\n",
950 sizeof ("stdin/stdout redirected\n") - 1) < 0)
951 {
952 /* Errors ignored. */;
953 }
954 }
955 }
956
957 /* Start an inferior process and returns its pid.
958 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
959 are its arguments. */
960
961 int
962 linux_process_target::create_inferior (const char *program,
963 const std::vector<char *> &program_args)
964 {
965 client_state &cs = get_client_state ();
966 struct lwp_info *new_lwp;
967 int pid;
968 ptid_t ptid;
969
970 {
971 maybe_disable_address_space_randomization restore_personality
972 (cs.disable_randomization);
973 std::string str_program_args = construct_inferior_arguments (program_args);
974
975 pid = fork_inferior (program,
976 str_program_args.c_str (),
977 get_environ ()->envp (), linux_ptrace_fun,
978 NULL, NULL, NULL, NULL);
979 }
980
981 /* When spawning a new process, we can't open the mem file yet. We
982 still have to nurse the process through the shell, and that execs
983 a couple times. The address space a /proc/PID/mem file is
984 accessing is destroyed on exec. */
985 process_info *proc = add_linux_process_no_mem_file (pid, 0);
986
987 ptid = ptid_t (pid, pid);
988 new_lwp = add_lwp (ptid);
989 new_lwp->must_set_ptrace_flags = 1;
990
991 post_fork_inferior (pid, program);
992
993 /* PROC is now past the shell running the program we want, so we can
994 open the /proc/PID/mem file. */
995 open_proc_mem_file (proc);
996
997 return pid;
998 }
999
1000 /* Implement the post_create_inferior target_ops method. */
1001
1002 void
1003 linux_process_target::post_create_inferior ()
1004 {
1005 struct lwp_info *lwp = get_thread_lwp (current_thread);
1006
1007 low_arch_setup ();
1008
1009 if (lwp->must_set_ptrace_flags)
1010 {
1011 struct process_info *proc = current_process ();
1012 int options = linux_low_ptrace_options (proc->attached);
1013
1014 linux_enable_event_reporting (lwpid_of (current_thread), options);
1015 lwp->must_set_ptrace_flags = 0;
1016 }
1017 }
1018
1019 int
1020 linux_process_target::attach_lwp (ptid_t ptid)
1021 {
1022 struct lwp_info *new_lwp;
1023 int lwpid = ptid.lwp ();
1024
1025 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1026 != 0)
1027 return errno;
1028
1029 new_lwp = add_lwp (ptid);
1030
1031 /* We need to wait for SIGSTOP before being able to make the next
1032 ptrace call on this LWP. */
1033 new_lwp->must_set_ptrace_flags = 1;
1034
1035 if (linux_proc_pid_is_stopped (lwpid))
1036 {
1037 threads_debug_printf ("Attached to a stopped process");
1038
1039 /* The process is definitely stopped. It is in a job control
1040 stop, unless the kernel predates the TASK_STOPPED /
1041 TASK_TRACED distinction, in which case it might be in a
1042 ptrace stop. Make sure it is in a ptrace stop; from there we
1043 can kill it, signal it, et cetera.
1044
1045 First make sure there is a pending SIGSTOP. Since we are
1046 already attached, the process can not transition from stopped
1047 to running without a PTRACE_CONT; so we know this signal will
1048 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1049 probably already in the queue (unless this kernel is old
1050 enough to use TASK_STOPPED for ptrace stops); but since
1051 SIGSTOP is not an RT signal, it can only be queued once. */
1052 kill_lwp (lwpid, SIGSTOP);
1053
1054 /* Finally, resume the stopped process. This will deliver the
1055 SIGSTOP (or a higher priority signal, just like normal
1056 PTRACE_ATTACH), which we'll catch later on. */
1057 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1058 }
1059
1060 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1061 brings it to a halt.
1062
1063 There are several cases to consider here:
1064
1065 1) gdbserver has already attached to the process and is being notified
1066 of a new thread that is being created.
1067 In this case we should ignore that SIGSTOP and resume the
1068 process. This is handled below by setting stop_expected = 1,
1069 and the fact that add_thread sets last_resume_kind ==
1070 resume_continue.
1071
1072 2) This is the first thread (the process thread), and we're attaching
1073 to it via attach_inferior.
1074 In this case we want the process thread to stop.
1075 This is handled by having linux_attach set last_resume_kind ==
1076 resume_stop after we return.
1077
1078 If the pid we are attaching to is also the tgid, we attach to and
1079 stop all the existing threads. Otherwise, we attach to pid and
1080 ignore any other threads in the same group as this pid.
1081
1082 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1083 existing threads.
1084 In this case we want the thread to stop.
1085 FIXME: This case is currently not properly handled.
1086 We should wait for the SIGSTOP but don't. Things work apparently
1087 because enough time passes between when we ptrace (ATTACH) and when
1088 gdb makes the next ptrace call on the thread.
1089
1090 On the other hand, if we are currently trying to stop all threads, we
1091 should treat the new thread as if we had sent it a SIGSTOP. This works
1092 because we are guaranteed that the add_lwp call above added us to the
1093 end of the list, and so the new thread has not yet reached
1094 wait_for_sigstop (but will). */
1095 new_lwp->stop_expected = 1;
1096
1097 return 0;
1098 }
1099
1100 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1101 already attached. Returns true if a new LWP is found, false
1102 otherwise. */
1103
1104 static int
1105 attach_proc_task_lwp_callback (ptid_t ptid)
1106 {
1107 /* Is this a new thread? */
1108 if (find_thread_ptid (ptid) == NULL)
1109 {
1110 int lwpid = ptid.lwp ();
1111 int err;
1112
1113 threads_debug_printf ("Found new lwp %d", lwpid);
1114
1115 err = the_linux_target->attach_lwp (ptid);
1116
1117 /* Be quiet if we simply raced with the thread exiting. EPERM
1118 is returned if the thread's task still exists, and is marked
1119 as exited or zombie, as well as other conditions, so in that
1120 case, confirm the status in /proc/PID/status. */
1121 if (err == ESRCH
1122 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1123 threads_debug_printf
1124 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1125 lwpid, err, safe_strerror (err));
1126 else if (err != 0)
1127 {
1128 std::string reason
1129 = linux_ptrace_attach_fail_reason_string (ptid, err);
1130
1131 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1132 }
1133
1134 return 1;
1135 }
1136 return 0;
1137 }
1138
1139 static void async_file_mark (void);
1140
1141 /* Attach to PID. If PID is the tgid, attach to it and all
1142 of its threads. */
1143
1144 int
1145 linux_process_target::attach (unsigned long pid)
1146 {
1147 struct process_info *proc;
1148 struct thread_info *initial_thread;
1149 ptid_t ptid = ptid_t (pid, pid);
1150 int err;
1151
1152 /* Delay opening the /proc/PID/mem file until we've successfully
1153 attached. */
1154 proc = add_linux_process_no_mem_file (pid, 1);
1155
1156 /* Attach to PID. We will check for other threads
1157 soon. */
1158 err = attach_lwp (ptid);
1159 if (err != 0)
1160 {
1161 this->remove_linux_process (proc);
1162
1163 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1164 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1165 }
1166
1167 open_proc_mem_file (proc);
1168
1169 /* Don't ignore the initial SIGSTOP if we just attached to this
1170 process. It will be collected by wait shortly. */
1171 initial_thread = find_thread_ptid (ptid_t (pid, pid));
1172 initial_thread->last_resume_kind = resume_stop;
1173
1174 /* We must attach to every LWP. If /proc is mounted, use that to
1175 find them now. On the one hand, the inferior may be using raw
1176 clone instead of using pthreads. On the other hand, even if it
1177 is using pthreads, GDB may not be connected yet (thread_db needs
1178 to do symbol lookups, through qSymbol). Also, thread_db walks
1179 structures in the inferior's address space to find the list of
1180 threads/LWPs, and those structures may well be corrupted. Note
1181 that once thread_db is loaded, we'll still use it to list threads
1182 and associate pthread info with each LWP. */
1183 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1184
1185 /* GDB will shortly read the xml target description for this
1186 process, to figure out the process' architecture. But the target
1187 description is only filled in when the first process/thread in
1188 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1189 that now, otherwise, if GDB is fast enough, it could read the
1190 target description _before_ that initial stop. */
1191 if (non_stop)
1192 {
1193 struct lwp_info *lwp;
1194 int wstat, lwpid;
1195 ptid_t pid_ptid = ptid_t (pid);
1196
1197 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1198 gdb_assert (lwpid > 0);
1199
1200 lwp = find_lwp_pid (ptid_t (lwpid));
1201
1202 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1203 {
1204 lwp->status_pending_p = 1;
1205 lwp->status_pending = wstat;
1206 }
1207
1208 initial_thread->last_resume_kind = resume_continue;
1209
1210 async_file_mark ();
1211
1212 gdb_assert (proc->tdesc != NULL);
1213 }
1214
1215 return 0;
1216 }
1217
1218 static int
1219 last_thread_of_process_p (int pid)
1220 {
1221 bool seen_one = false;
1222
1223 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1224 {
1225 if (!seen_one)
1226 {
1227 /* This is the first thread of this process we see. */
1228 seen_one = true;
1229 return false;
1230 }
1231 else
1232 {
1233 /* This is the second thread of this process we see. */
1234 return true;
1235 }
1236 });
1237
1238 return thread == NULL;
1239 }
1240
1241 /* Kill LWP. */
1242
1243 static void
1244 linux_kill_one_lwp (struct lwp_info *lwp)
1245 {
1246 struct thread_info *thr = get_lwp_thread (lwp);
1247 int pid = lwpid_of (thr);
1248
1249 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1250 there is no signal context, and ptrace(PTRACE_KILL) (or
1251 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1252 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1253 alternative is to kill with SIGKILL. We only need one SIGKILL
1254 per process, not one for each thread. But since we still support
1255 support debugging programs using raw clone without CLONE_THREAD,
1256 we send one for each thread. For years, we used PTRACE_KILL
1257 only, so we're being a bit paranoid about some old kernels where
1258 PTRACE_KILL might work better (dubious if there are any such, but
1259 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1260 second, and so we're fine everywhere. */
1261
1262 errno = 0;
1263 kill_lwp (pid, SIGKILL);
1264 if (debug_threads)
1265 {
1266 int save_errno = errno;
1267
1268 threads_debug_printf ("kill_lwp (SIGKILL) %s, 0, 0 (%s)",
1269 target_pid_to_str (ptid_of (thr)).c_str (),
1270 save_errno ? safe_strerror (save_errno) : "OK");
1271 }
1272
1273 errno = 0;
1274 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1275 if (debug_threads)
1276 {
1277 int save_errno = errno;
1278
1279 threads_debug_printf ("PTRACE_KILL %s, 0, 0 (%s)",
1280 target_pid_to_str (ptid_of (thr)).c_str (),
1281 save_errno ? safe_strerror (save_errno) : "OK");
1282 }
1283 }
1284
1285 /* Kill LWP and wait for it to die. */
1286
1287 static void
1288 kill_wait_lwp (struct lwp_info *lwp)
1289 {
1290 struct thread_info *thr = get_lwp_thread (lwp);
1291 int pid = ptid_of (thr).pid ();
1292 int lwpid = ptid_of (thr).lwp ();
1293 int wstat;
1294 int res;
1295
1296 threads_debug_printf ("killing lwp %d, for pid: %d", lwpid, pid);
1297
1298 do
1299 {
1300 linux_kill_one_lwp (lwp);
1301
1302 /* Make sure it died. Notes:
1303
1304 - The loop is most likely unnecessary.
1305
1306 - We don't use wait_for_event as that could delete lwps
1307 while we're iterating over them. We're not interested in
1308 any pending status at this point, only in making sure all
1309 wait status on the kernel side are collected until the
1310 process is reaped.
1311
1312 - We don't use __WALL here as the __WALL emulation relies on
1313 SIGCHLD, and killing a stopped process doesn't generate
1314 one, nor an exit status.
1315 */
1316 res = my_waitpid (lwpid, &wstat, 0);
1317 if (res == -1 && errno == ECHILD)
1318 res = my_waitpid (lwpid, &wstat, __WCLONE);
1319 } while (res > 0 && WIFSTOPPED (wstat));
1320
1321 /* Even if it was stopped, the child may have already disappeared.
1322 E.g., if it was killed by SIGKILL. */
1323 if (res < 0 && errno != ECHILD)
1324 perror_with_name ("kill_wait_lwp");
1325 }
1326
1327 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1328 except the leader. */
1329
1330 static void
1331 kill_one_lwp_callback (thread_info *thread, int pid)
1332 {
1333 struct lwp_info *lwp = get_thread_lwp (thread);
1334
1335 /* We avoid killing the first thread here, because of a Linux kernel (at
1336 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1337 the children get a chance to be reaped, it will remain a zombie
1338 forever. */
1339
1340 if (lwpid_of (thread) == pid)
1341 {
1342 threads_debug_printf ("is last of process %s",
1343 target_pid_to_str (thread->id).c_str ());
1344 return;
1345 }
1346
1347 kill_wait_lwp (lwp);
1348 }
1349
1350 int
1351 linux_process_target::kill (process_info *process)
1352 {
1353 int pid = process->pid;
1354
1355 /* If we're killing a running inferior, make sure it is stopped
1356 first, as PTRACE_KILL will not work otherwise. */
1357 stop_all_lwps (0, NULL);
1358
1359 for_each_thread (pid, [&] (thread_info *thread)
1360 {
1361 kill_one_lwp_callback (thread, pid);
1362 });
1363
1364 /* See the comment in linux_kill_one_lwp. We did not kill the first
1365 thread in the list, so do so now. */
1366 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1367
1368 if (lwp == NULL)
1369 threads_debug_printf ("cannot find lwp for pid: %d", pid);
1370 else
1371 kill_wait_lwp (lwp);
1372
1373 mourn (process);
1374
1375 /* Since we presently can only stop all lwps of all processes, we
1376 need to unstop lwps of other processes. */
1377 unstop_all_lwps (0, NULL);
1378 return 0;
1379 }
1380
1381 /* Get pending signal of THREAD, for detaching purposes. This is the
1382 signal the thread last stopped for, which we need to deliver to the
1383 thread when detaching, otherwise, it'd be suppressed/lost. */
1384
1385 static int
1386 get_detach_signal (struct thread_info *thread)
1387 {
1388 client_state &cs = get_client_state ();
1389 enum gdb_signal signo = GDB_SIGNAL_0;
1390 int status;
1391 struct lwp_info *lp = get_thread_lwp (thread);
1392
1393 if (lp->status_pending_p)
1394 status = lp->status_pending;
1395 else
1396 {
1397 /* If the thread had been suspended by gdbserver, and it stopped
1398 cleanly, then it'll have stopped with SIGSTOP. But we don't
1399 want to deliver that SIGSTOP. */
1400 if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1401 || thread->last_status.sig () == GDB_SIGNAL_0)
1402 return 0;
1403
1404 /* Otherwise, we may need to deliver the signal we
1405 intercepted. */
1406 status = lp->last_status;
1407 }
1408
1409 if (!WIFSTOPPED (status))
1410 {
1411 threads_debug_printf ("lwp %s hasn't stopped: no pending signal",
1412 target_pid_to_str (ptid_of (thread)).c_str ());
1413 return 0;
1414 }
1415
1416 /* Extended wait statuses aren't real SIGTRAPs. */
1417 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1418 {
1419 threads_debug_printf ("lwp %s had stopped with extended "
1420 "status: no pending signal",
1421 target_pid_to_str (ptid_of (thread)).c_str ());
1422 return 0;
1423 }
1424
1425 signo = gdb_signal_from_host (WSTOPSIG (status));
1426
1427 if (cs.program_signals_p && !cs.program_signals[signo])
1428 {
1429 threads_debug_printf ("lwp %s had signal %s, but it is in nopass state",
1430 target_pid_to_str (ptid_of (thread)).c_str (),
1431 gdb_signal_to_string (signo));
1432 return 0;
1433 }
1434 else if (!cs.program_signals_p
1435 /* If we have no way to know which signals GDB does not
1436 want to have passed to the program, assume
1437 SIGTRAP/SIGINT, which is GDB's default. */
1438 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1439 {
1440 threads_debug_printf ("lwp %s had signal %s, "
1441 "but we don't know if we should pass it. "
1442 "Default to not.",
1443 target_pid_to_str (ptid_of (thread)).c_str (),
1444 gdb_signal_to_string (signo));
1445 return 0;
1446 }
1447 else
1448 {
1449 threads_debug_printf ("lwp %s has pending signal %s: delivering it",
1450 target_pid_to_str (ptid_of (thread)).c_str (),
1451 gdb_signal_to_string (signo));
1452
1453 return WSTOPSIG (status);
1454 }
1455 }
1456
1457 void
1458 linux_process_target::detach_one_lwp (lwp_info *lwp)
1459 {
1460 struct thread_info *thread = get_lwp_thread (lwp);
1461 int sig;
1462 int lwpid;
1463
1464 /* If there is a pending SIGSTOP, get rid of it. */
1465 if (lwp->stop_expected)
1466 {
1467 threads_debug_printf ("Sending SIGCONT to %s",
1468 target_pid_to_str (ptid_of (thread)).c_str ());
1469
1470 kill_lwp (lwpid_of (thread), SIGCONT);
1471 lwp->stop_expected = 0;
1472 }
1473
1474 /* Pass on any pending signal for this thread. */
1475 sig = get_detach_signal (thread);
1476
1477 /* Preparing to resume may try to write registers, and fail if the
1478 lwp is zombie. If that happens, ignore the error. We'll handle
1479 it below, when detach fails with ESRCH. */
1480 try
1481 {
1482 /* Flush any pending changes to the process's registers. */
1483 regcache_invalidate_thread (thread);
1484
1485 /* Finally, let it resume. */
1486 low_prepare_to_resume (lwp);
1487 }
1488 catch (const gdb_exception_error &ex)
1489 {
1490 if (!check_ptrace_stopped_lwp_gone (lwp))
1491 throw;
1492 }
1493
1494 lwpid = lwpid_of (thread);
1495 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1496 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1497 {
1498 int save_errno = errno;
1499
1500 /* We know the thread exists, so ESRCH must mean the lwp is
1501 zombie. This can happen if one of the already-detached
1502 threads exits the whole thread group. In that case we're
1503 still attached, and must reap the lwp. */
1504 if (save_errno == ESRCH)
1505 {
1506 int ret, status;
1507
1508 ret = my_waitpid (lwpid, &status, __WALL);
1509 if (ret == -1)
1510 {
1511 warning (_("Couldn't reap LWP %d while detaching: %s"),
1512 lwpid, safe_strerror (errno));
1513 }
1514 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1515 {
1516 warning (_("Reaping LWP %d while detaching "
1517 "returned unexpected status 0x%x"),
1518 lwpid, status);
1519 }
1520 }
1521 else
1522 {
1523 error (_("Can't detach %s: %s"),
1524 target_pid_to_str (ptid_of (thread)).c_str (),
1525 safe_strerror (save_errno));
1526 }
1527 }
1528 else
1529 threads_debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)",
1530 target_pid_to_str (ptid_of (thread)).c_str (),
1531 strsignal (sig));
1532
1533 delete_lwp (lwp);
1534 }
1535
1536 int
1537 linux_process_target::detach (process_info *process)
1538 {
1539 struct lwp_info *main_lwp;
1540
1541 /* As there's a step over already in progress, let it finish first,
1542 otherwise nesting a stabilize_threads operation on top gets real
1543 messy. */
1544 complete_ongoing_step_over ();
1545
1546 /* Stop all threads before detaching. First, ptrace requires that
1547 the thread is stopped to successfully detach. Second, thread_db
1548 may need to uninstall thread event breakpoints from memory, which
1549 only works with a stopped process anyway. */
1550 stop_all_lwps (0, NULL);
1551
1552 #ifdef USE_THREAD_DB
1553 thread_db_detach (process);
1554 #endif
1555
1556 /* Stabilize threads (move out of jump pads). */
1557 target_stabilize_threads ();
1558
1559 /* Detach from the clone lwps first. If the thread group exits just
1560 while we're detaching, we must reap the clone lwps before we're
1561 able to reap the leader. */
1562 for_each_thread (process->pid, [this] (thread_info *thread)
1563 {
1564 /* We don't actually detach from the thread group leader just yet.
1565 If the thread group exits, we must reap the zombie clone lwps
1566 before we're able to reap the leader. */
1567 if (thread->id.pid () == thread->id.lwp ())
1568 return;
1569
1570 lwp_info *lwp = get_thread_lwp (thread);
1571 detach_one_lwp (lwp);
1572 });
1573
1574 main_lwp = find_lwp_pid (ptid_t (process->pid));
1575 detach_one_lwp (main_lwp);
1576
1577 mourn (process);
1578
1579 /* Since we presently can only stop all lwps of all processes, we
1580 need to unstop lwps of other processes. */
1581 unstop_all_lwps (0, NULL);
1582 return 0;
1583 }
1584
1585 /* Remove all LWPs that belong to process PROC from the lwp list. */
1586
1587 void
1588 linux_process_target::mourn (process_info *process)
1589 {
1590 #ifdef USE_THREAD_DB
1591 thread_db_mourn (process);
1592 #endif
1593
1594 for_each_thread (process->pid, [this] (thread_info *thread)
1595 {
1596 delete_lwp (get_thread_lwp (thread));
1597 });
1598
1599 this->remove_linux_process (process);
1600 }
1601
1602 void
1603 linux_process_target::join (int pid)
1604 {
1605 int status, ret;
1606
1607 do {
1608 ret = my_waitpid (pid, &status, 0);
1609 if (WIFEXITED (status) || WIFSIGNALED (status))
1610 break;
1611 } while (ret != -1 || errno != ECHILD);
1612 }
1613
1614 /* Return true if the given thread is still alive. */
1615
1616 bool
1617 linux_process_target::thread_alive (ptid_t ptid)
1618 {
1619 struct lwp_info *lwp = find_lwp_pid (ptid);
1620
1621 /* We assume we always know if a thread exits. If a whole process
1622 exited but we still haven't been able to report it to GDB, we'll
1623 hold on to the last lwp of the dead process. */
1624 if (lwp != NULL)
1625 return !lwp_is_marked_dead (lwp);
1626 else
1627 return 0;
1628 }
1629
1630 bool
1631 linux_process_target::thread_still_has_status_pending (thread_info *thread)
1632 {
1633 struct lwp_info *lp = get_thread_lwp (thread);
1634
1635 if (!lp->status_pending_p)
1636 return 0;
1637
1638 if (thread->last_resume_kind != resume_stop
1639 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1640 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1641 {
1642 CORE_ADDR pc;
1643 int discard = 0;
1644
1645 gdb_assert (lp->last_status != 0);
1646
1647 pc = get_pc (lp);
1648
1649 scoped_restore_current_thread restore_thread;
1650 switch_to_thread (thread);
1651
1652 if (pc != lp->stop_pc)
1653 {
1654 threads_debug_printf ("PC of %ld changed",
1655 lwpid_of (thread));
1656 discard = 1;
1657 }
1658
1659 #if !USE_SIGTRAP_SIGINFO
1660 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1661 && !low_breakpoint_at (pc))
1662 {
1663 threads_debug_printf ("previous SW breakpoint of %ld gone",
1664 lwpid_of (thread));
1665 discard = 1;
1666 }
1667 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1668 && !hardware_breakpoint_inserted_here (pc))
1669 {
1670 threads_debug_printf ("previous HW breakpoint of %ld gone",
1671 lwpid_of (thread));
1672 discard = 1;
1673 }
1674 #endif
1675
1676 if (discard)
1677 {
1678 threads_debug_printf ("discarding pending breakpoint status");
1679 lp->status_pending_p = 0;
1680 return 0;
1681 }
1682 }
1683
1684 return 1;
1685 }
1686
1687 /* Returns true if LWP is resumed from the client's perspective. */
1688
1689 static int
1690 lwp_resumed (struct lwp_info *lwp)
1691 {
1692 struct thread_info *thread = get_lwp_thread (lwp);
1693
1694 if (thread->last_resume_kind != resume_stop)
1695 return 1;
1696
1697 /* Did gdb send us a `vCont;t', but we haven't reported the
1698 corresponding stop to gdb yet? If so, the thread is still
1699 resumed/running from gdb's perspective. */
1700 if (thread->last_resume_kind == resume_stop
1701 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
1702 return 1;
1703
1704 return 0;
1705 }
1706
1707 bool
1708 linux_process_target::status_pending_p_callback (thread_info *thread,
1709 ptid_t ptid)
1710 {
1711 struct lwp_info *lp = get_thread_lwp (thread);
1712
1713 /* Check if we're only interested in events from a specific process
1714 or a specific LWP. */
1715 if (!thread->id.matches (ptid))
1716 return 0;
1717
1718 if (!lwp_resumed (lp))
1719 return 0;
1720
1721 if (lp->status_pending_p
1722 && !thread_still_has_status_pending (thread))
1723 {
1724 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1725 return 0;
1726 }
1727
1728 return lp->status_pending_p;
1729 }
1730
1731 struct lwp_info *
1732 find_lwp_pid (ptid_t ptid)
1733 {
1734 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
1735 {
1736 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1737 return thr_arg->id.lwp () == lwp;
1738 });
1739
1740 if (thread == NULL)
1741 return NULL;
1742
1743 return get_thread_lwp (thread);
1744 }
1745
1746 /* Return the number of known LWPs in the tgid given by PID. */
1747
1748 static int
1749 num_lwps (int pid)
1750 {
1751 int count = 0;
1752
1753 for_each_thread (pid, [&] (thread_info *thread)
1754 {
1755 count++;
1756 });
1757
1758 return count;
1759 }
1760
1761 /* See nat/linux-nat.h. */
1762
1763 struct lwp_info *
1764 iterate_over_lwps (ptid_t filter,
1765 gdb::function_view<iterate_over_lwps_ftype> callback)
1766 {
1767 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1768 {
1769 lwp_info *lwp = get_thread_lwp (thr_arg);
1770
1771 return callback (lwp);
1772 });
1773
1774 if (thread == NULL)
1775 return NULL;
1776
1777 return get_thread_lwp (thread);
1778 }
1779
1780 void
1781 linux_process_target::check_zombie_leaders ()
1782 {
1783 for_each_process ([this] (process_info *proc)
1784 {
1785 pid_t leader_pid = pid_of (proc);
1786 lwp_info *leader_lp = find_lwp_pid (ptid_t (leader_pid));
1787
1788 threads_debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1789 "num_lwps=%d, zombie=%d",
1790 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1791 linux_proc_pid_is_zombie (leader_pid));
1792
1793 if (leader_lp != NULL && !leader_lp->stopped
1794 /* Check if there are other threads in the group, as we may
1795 have raced with the inferior simply exiting. Note this
1796 isn't a watertight check. If the inferior is
1797 multi-threaded and is exiting, it may be we see the
1798 leader as zombie before we reap all the non-leader
1799 threads. See comments below. */
1800 && !last_thread_of_process_p (leader_pid)
1801 && linux_proc_pid_is_zombie (leader_pid))
1802 {
1803 /* A zombie leader in a multi-threaded program can mean one
1804 of three things:
1805
1806 #1 - Only the leader exited, not the whole program, e.g.,
1807 with pthread_exit. Since we can't reap the leader's exit
1808 status until all other threads are gone and reaped too,
1809 we want to delete the zombie leader right away, as it
1810 can't be debugged, we can't read its registers, etc.
1811 This is the main reason we check for zombie leaders
1812 disappearing.
1813
1814 #2 - The whole thread-group/process exited (a group exit,
1815 via e.g. exit(3), and there is (or will be shortly) an
1816 exit reported for each thread in the process, and then
1817 finally an exit for the leader once the non-leaders are
1818 reaped.
1819
1820 #3 - There are 3 or more threads in the group, and a
1821 thread other than the leader exec'd. See comments on
1822 exec events at the top of the file.
1823
1824 Ideally we would never delete the leader for case #2.
1825 Instead, we want to collect the exit status of each
1826 non-leader thread, and then finally collect the exit
1827 status of the leader as normal and use its exit code as
1828 whole-process exit code. Unfortunately, there's no
1829 race-free way to distinguish cases #1 and #2. We can't
1830 assume the exit events for the non-leaders threads are
1831 already pending in the kernel, nor can we assume the
1832 non-leader threads are in zombie state already. Between
1833 the leader becoming zombie and the non-leaders exiting
1834 and becoming zombie themselves, there's a small time
1835 window, so such a check would be racy. Temporarily
1836 pausing all threads and checking to see if all threads
1837 exit or not before re-resuming them would work in the
1838 case that all threads are running right now, but it
1839 wouldn't work if some thread is currently already
1840 ptrace-stopped, e.g., due to scheduler-locking.
1841
1842 So what we do is we delete the leader anyhow, and then
1843 later on when we see its exit status, we re-add it back.
1844 We also make sure that we only report a whole-process
1845 exit when we see the leader exiting, as opposed to when
1846 the last LWP in the LWP list exits, which can be a
1847 non-leader if we deleted the leader here. */
1848 threads_debug_printf ("Thread group leader %d zombie "
1849 "(it exited, or another thread execd), "
1850 "deleting it.",
1851 leader_pid);
1852 delete_lwp (leader_lp);
1853 }
1854 });
1855 }
1856
1857 /* Callback for `find_thread'. Returns the first LWP that is not
1858 stopped. */
1859
1860 static bool
1861 not_stopped_callback (thread_info *thread, ptid_t filter)
1862 {
1863 if (!thread->id.matches (filter))
1864 return false;
1865
1866 lwp_info *lwp = get_thread_lwp (thread);
1867
1868 return !lwp->stopped;
1869 }
1870
1871 /* Increment LWP's suspend count. */
1872
1873 static void
1874 lwp_suspended_inc (struct lwp_info *lwp)
1875 {
1876 lwp->suspended++;
1877
1878 if (lwp->suspended > 4)
1879 threads_debug_printf
1880 ("LWP %ld has a suspiciously high suspend count, suspended=%d",
1881 lwpid_of (get_lwp_thread (lwp)), lwp->suspended);
1882 }
1883
1884 /* Decrement LWP's suspend count. */
1885
1886 static void
1887 lwp_suspended_decr (struct lwp_info *lwp)
1888 {
1889 lwp->suspended--;
1890
1891 if (lwp->suspended < 0)
1892 {
1893 struct thread_info *thread = get_lwp_thread (lwp);
1894
1895 internal_error (__FILE__, __LINE__,
1896 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1897 lwp->suspended);
1898 }
1899 }
1900
1901 /* This function should only be called if the LWP got a SIGTRAP.
1902
1903 Handle any tracepoint steps or hits. Return true if a tracepoint
1904 event was handled, 0 otherwise. */
1905
1906 static int
1907 handle_tracepoints (struct lwp_info *lwp)
1908 {
1909 struct thread_info *tinfo = get_lwp_thread (lwp);
1910 int tpoint_related_event = 0;
1911
1912 gdb_assert (lwp->suspended == 0);
1913
1914 /* If this tracepoint hit causes a tracing stop, we'll immediately
1915 uninsert tracepoints. To do this, we temporarily pause all
1916 threads, unpatch away, and then unpause threads. We need to make
1917 sure the unpausing doesn't resume LWP too. */
1918 lwp_suspended_inc (lwp);
1919
1920 /* And we need to be sure that any all-threads-stopping doesn't try
1921 to move threads out of the jump pads, as it could deadlock the
1922 inferior (LWP could be in the jump pad, maybe even holding the
1923 lock.) */
1924
1925 /* Do any necessary step collect actions. */
1926 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1927
1928 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1929
1930 /* See if we just hit a tracepoint and do its main collect
1931 actions. */
1932 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1933
1934 lwp_suspended_decr (lwp);
1935
1936 gdb_assert (lwp->suspended == 0);
1937 gdb_assert (!stabilizing_threads
1938 || (lwp->collecting_fast_tracepoint
1939 != fast_tpoint_collect_result::not_collecting));
1940
1941 if (tpoint_related_event)
1942 {
1943 threads_debug_printf ("got a tracepoint event");
1944 return 1;
1945 }
1946
1947 return 0;
1948 }
1949
1950 fast_tpoint_collect_result
1951 linux_process_target::linux_fast_tracepoint_collecting
1952 (lwp_info *lwp, fast_tpoint_collect_status *status)
1953 {
1954 CORE_ADDR thread_area;
1955 struct thread_info *thread = get_lwp_thread (lwp);
1956
1957 /* Get the thread area address. This is used to recognize which
1958 thread is which when tracing with the in-process agent library.
1959 We don't read anything from the address, and treat it as opaque;
1960 it's the address itself that we assume is unique per-thread. */
1961 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
1962 return fast_tpoint_collect_result::not_collecting;
1963
1964 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1965 }
1966
1967 int
1968 linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1969 {
1970 return -1;
1971 }
1972
1973 bool
1974 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
1975 {
1976 scoped_restore_current_thread restore_thread;
1977 switch_to_thread (get_lwp_thread (lwp));
1978
1979 if ((wstat == NULL
1980 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1981 && supports_fast_tracepoints ()
1982 && agent_loaded_p ())
1983 {
1984 struct fast_tpoint_collect_status status;
1985
1986 threads_debug_printf
1987 ("Checking whether LWP %ld needs to move out of the jump pad.",
1988 lwpid_of (current_thread));
1989
1990 fast_tpoint_collect_result r
1991 = linux_fast_tracepoint_collecting (lwp, &status);
1992
1993 if (wstat == NULL
1994 || (WSTOPSIG (*wstat) != SIGILL
1995 && WSTOPSIG (*wstat) != SIGFPE
1996 && WSTOPSIG (*wstat) != SIGSEGV
1997 && WSTOPSIG (*wstat) != SIGBUS))
1998 {
1999 lwp->collecting_fast_tracepoint = r;
2000
2001 if (r != fast_tpoint_collect_result::not_collecting)
2002 {
2003 if (r == fast_tpoint_collect_result::before_insn
2004 && lwp->exit_jump_pad_bkpt == NULL)
2005 {
2006 /* Haven't executed the original instruction yet.
2007 Set breakpoint there, and wait till it's hit,
2008 then single-step until exiting the jump pad. */
2009 lwp->exit_jump_pad_bkpt
2010 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2011 }
2012
2013 threads_debug_printf
2014 ("Checking whether LWP %ld needs to move out of the jump pad..."
2015 " it does", lwpid_of (current_thread));
2016
2017 return true;
2018 }
2019 }
2020 else
2021 {
2022 /* If we get a synchronous signal while collecting, *and*
2023 while executing the (relocated) original instruction,
2024 reset the PC to point at the tpoint address, before
2025 reporting to GDB. Otherwise, it's an IPA lib bug: just
2026 report the signal to GDB, and pray for the best. */
2027
2028 lwp->collecting_fast_tracepoint
2029 = fast_tpoint_collect_result::not_collecting;
2030
2031 if (r != fast_tpoint_collect_result::not_collecting
2032 && (status.adjusted_insn_addr <= lwp->stop_pc
2033 && lwp->stop_pc < status.adjusted_insn_addr_end))
2034 {
2035 siginfo_t info;
2036 struct regcache *regcache;
2037
2038 /* The si_addr on a few signals references the address
2039 of the faulting instruction. Adjust that as
2040 well. */
2041 if ((WSTOPSIG (*wstat) == SIGILL
2042 || WSTOPSIG (*wstat) == SIGFPE
2043 || WSTOPSIG (*wstat) == SIGBUS
2044 || WSTOPSIG (*wstat) == SIGSEGV)
2045 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2046 (PTRACE_TYPE_ARG3) 0, &info) == 0
2047 /* Final check just to make sure we don't clobber
2048 the siginfo of non-kernel-sent signals. */
2049 && (uintptr_t) info.si_addr == lwp->stop_pc)
2050 {
2051 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2052 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2053 (PTRACE_TYPE_ARG3) 0, &info);
2054 }
2055
2056 regcache = get_thread_regcache (current_thread, 1);
2057 low_set_pc (regcache, status.tpoint_addr);
2058 lwp->stop_pc = status.tpoint_addr;
2059
2060 /* Cancel any fast tracepoint lock this thread was
2061 holding. */
2062 force_unlock_trace_buffer ();
2063 }
2064
2065 if (lwp->exit_jump_pad_bkpt != NULL)
2066 {
2067 threads_debug_printf
2068 ("Cancelling fast exit-jump-pad: removing bkpt."
2069 "stopping all threads momentarily.");
2070
2071 stop_all_lwps (1, lwp);
2072
2073 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2074 lwp->exit_jump_pad_bkpt = NULL;
2075
2076 unstop_all_lwps (1, lwp);
2077
2078 gdb_assert (lwp->suspended >= 0);
2079 }
2080 }
2081 }
2082
2083 threads_debug_printf
2084 ("Checking whether LWP %ld needs to move out of the jump pad... no",
2085 lwpid_of (current_thread));
2086
2087 return false;
2088 }
2089
2090 /* Enqueue one signal in the "signals to report later when out of the
2091 jump pad" list. */
2092
2093 static void
2094 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2095 {
2096 struct thread_info *thread = get_lwp_thread (lwp);
2097
2098 threads_debug_printf ("Deferring signal %d for LWP %ld.",
2099 WSTOPSIG (*wstat), lwpid_of (thread));
2100
2101 if (debug_threads)
2102 {
2103 for (const auto &sig : lwp->pending_signals_to_report)
2104 threads_debug_printf (" Already queued %d", sig.signal);
2105
2106 threads_debug_printf (" (no more currently queued signals)");
2107 }
2108
2109 /* Don't enqueue non-RT signals if they are already in the deferred
2110 queue. (SIGSTOP being the easiest signal to see ending up here
2111 twice) */
2112 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2113 {
2114 for (const auto &sig : lwp->pending_signals_to_report)
2115 {
2116 if (sig.signal == WSTOPSIG (*wstat))
2117 {
2118 threads_debug_printf
2119 ("Not requeuing already queued non-RT signal %d for LWP %ld",
2120 sig.signal, lwpid_of (thread));
2121 return;
2122 }
2123 }
2124 }
2125
2126 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
2127
2128 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2129 &lwp->pending_signals_to_report.back ().info);
2130 }
2131
2132 /* Dequeue one signal from the "signals to report later when out of
2133 the jump pad" list. */
2134
2135 static int
2136 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2137 {
2138 struct thread_info *thread = get_lwp_thread (lwp);
2139
2140 if (!lwp->pending_signals_to_report.empty ())
2141 {
2142 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
2143
2144 *wstat = W_STOPCODE (p_sig.signal);
2145 if (p_sig.info.si_signo != 0)
2146 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2147 &p_sig.info);
2148
2149 lwp->pending_signals_to_report.pop_front ();
2150
2151 threads_debug_printf ("Reporting deferred signal %d for LWP %ld.",
2152 WSTOPSIG (*wstat), lwpid_of (thread));
2153
2154 if (debug_threads)
2155 {
2156 for (const auto &sig : lwp->pending_signals_to_report)
2157 threads_debug_printf (" Still queued %d", sig.signal);
2158
2159 threads_debug_printf (" (no more queued signals)");
2160 }
2161
2162 return 1;
2163 }
2164
2165 return 0;
2166 }
2167
2168 bool
2169 linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
2170 {
2171 scoped_restore_current_thread restore_thread;
2172 switch_to_thread (get_lwp_thread (child));
2173
2174 if (low_stopped_by_watchpoint ())
2175 {
2176 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2177 child->stopped_data_address = low_stopped_data_address ();
2178 }
2179
2180 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2181 }
2182
2183 bool
2184 linux_process_target::low_stopped_by_watchpoint ()
2185 {
2186 return false;
2187 }
2188
2189 CORE_ADDR
2190 linux_process_target::low_stopped_data_address ()
2191 {
2192 return 0;
2193 }
2194
2195 /* Return the ptrace options that we want to try to enable. */
2196
2197 static int
2198 linux_low_ptrace_options (int attached)
2199 {
2200 client_state &cs = get_client_state ();
2201 int options = 0;
2202
2203 if (!attached)
2204 options |= PTRACE_O_EXITKILL;
2205
2206 if (cs.report_fork_events)
2207 options |= PTRACE_O_TRACEFORK;
2208
2209 if (cs.report_vfork_events)
2210 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2211
2212 if (cs.report_exec_events)
2213 options |= PTRACE_O_TRACEEXEC;
2214
2215 options |= PTRACE_O_TRACESYSGOOD;
2216
2217 return options;
2218 }
2219
2220 void
2221 linux_process_target::filter_event (int lwpid, int wstat)
2222 {
2223 client_state &cs = get_client_state ();
2224 struct lwp_info *child;
2225 struct thread_info *thread;
2226 int have_stop_pc = 0;
2227
2228 child = find_lwp_pid (ptid_t (lwpid));
2229
2230 /* Check for events reported by anything not in our LWP list. */
2231 if (child == nullptr)
2232 {
2233 if (WIFSTOPPED (wstat))
2234 {
2235 if (WSTOPSIG (wstat) == SIGTRAP
2236 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2237 {
2238 /* A non-leader thread exec'ed after we've seen the
2239 leader zombie, and removed it from our lists (in
2240 check_zombie_leaders). The non-leader thread changes
2241 its tid to the tgid. */
2242 threads_debug_printf
2243 ("Re-adding thread group leader LWP %d after exec.",
2244 lwpid);
2245
2246 child = add_lwp (ptid_t (lwpid, lwpid));
2247 child->stopped = 1;
2248 switch_to_thread (child->thread);
2249 }
2250 else
2251 {
2252 /* A process we are controlling has forked and the new
2253 child's stop was reported to us by the kernel. Save
2254 its PID and go back to waiting for the fork event to
2255 be reported - the stopped process might be returned
2256 from waitpid before or after the fork event is. */
2257 threads_debug_printf
2258 ("Saving LWP %d status %s in stopped_pids list",
2259 lwpid, status_to_str (wstat).c_str ());
2260 add_to_pid_list (&stopped_pids, lwpid, wstat);
2261 }
2262 }
2263 else
2264 {
2265 /* Don't report an event for the exit of an LWP not in our
2266 list, i.e. not part of any inferior we're debugging.
2267 This can happen if we detach from a program we originally
2268 forked and then it exits. However, note that we may have
2269 earlier deleted a leader of an inferior we're debugging,
2270 in check_zombie_leaders. Re-add it back here if so. */
2271 find_process ([&] (process_info *proc)
2272 {
2273 if (proc->pid == lwpid)
2274 {
2275 threads_debug_printf
2276 ("Re-adding thread group leader LWP %d after exit.",
2277 lwpid);
2278
2279 child = add_lwp (ptid_t (lwpid, lwpid));
2280 return true;
2281 }
2282 return false;
2283 });
2284 }
2285
2286 if (child == nullptr)
2287 return;
2288 }
2289
2290 thread = get_lwp_thread (child);
2291
2292 child->stopped = 1;
2293
2294 child->last_status = wstat;
2295
2296 /* Check if the thread has exited. */
2297 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2298 {
2299 threads_debug_printf ("%d exited", lwpid);
2300
2301 if (finish_step_over (child))
2302 {
2303 /* Unsuspend all other LWPs, and set them back running again. */
2304 unsuspend_all_lwps (child);
2305 }
2306
2307 /* If this is not the leader LWP, then the exit signal was not
2308 the end of the debugged application and should be ignored,
2309 unless GDB wants to hear about thread exits. */
2310 if (cs.report_thread_events || is_leader (thread))
2311 {
2312 /* Since events are serialized to GDB core, and we can't
2313 report this one right now. Leave the status pending for
2314 the next time we're able to report it. */
2315 mark_lwp_dead (child, wstat);
2316 return;
2317 }
2318 else
2319 {
2320 delete_lwp (child);
2321 return;
2322 }
2323 }
2324
2325 gdb_assert (WIFSTOPPED (wstat));
2326
2327 if (WIFSTOPPED (wstat))
2328 {
2329 struct process_info *proc;
2330
2331 /* Architecture-specific setup after inferior is running. */
2332 proc = find_process_pid (pid_of (thread));
2333 if (proc->tdesc == NULL)
2334 {
2335 if (proc->attached)
2336 {
2337 /* This needs to happen after we have attached to the
2338 inferior and it is stopped for the first time, but
2339 before we access any inferior registers. */
2340 arch_setup_thread (thread);
2341 }
2342 else
2343 {
2344 /* The process is started, but GDBserver will do
2345 architecture-specific setup after the program stops at
2346 the first instruction. */
2347 child->status_pending_p = 1;
2348 child->status_pending = wstat;
2349 return;
2350 }
2351 }
2352 }
2353
2354 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2355 {
2356 struct process_info *proc = find_process_pid (pid_of (thread));
2357 int options = linux_low_ptrace_options (proc->attached);
2358
2359 linux_enable_event_reporting (lwpid, options);
2360 child->must_set_ptrace_flags = 0;
2361 }
2362
2363 /* Always update syscall_state, even if it will be filtered later. */
2364 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2365 {
2366 child->syscall_state
2367 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2368 ? TARGET_WAITKIND_SYSCALL_RETURN
2369 : TARGET_WAITKIND_SYSCALL_ENTRY);
2370 }
2371 else
2372 {
2373 /* Almost all other ptrace-stops are known to be outside of system
2374 calls, with further exceptions in handle_extended_wait. */
2375 child->syscall_state = TARGET_WAITKIND_IGNORE;
2376 }
2377
2378 /* Be careful to not overwrite stop_pc until save_stop_reason is
2379 called. */
2380 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2381 && linux_is_extended_waitstatus (wstat))
2382 {
2383 child->stop_pc = get_pc (child);
2384 if (handle_extended_wait (&child, wstat))
2385 {
2386 /* The event has been handled, so just return without
2387 reporting it. */
2388 return;
2389 }
2390 }
2391
2392 if (linux_wstatus_maybe_breakpoint (wstat))
2393 {
2394 if (save_stop_reason (child))
2395 have_stop_pc = 1;
2396 }
2397
2398 if (!have_stop_pc)
2399 child->stop_pc = get_pc (child);
2400
2401 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2402 && child->stop_expected)
2403 {
2404 threads_debug_printf ("Expected stop.");
2405
2406 child->stop_expected = 0;
2407
2408 if (thread->last_resume_kind == resume_stop)
2409 {
2410 /* We want to report the stop to the core. Treat the
2411 SIGSTOP as a normal event. */
2412 threads_debug_printf ("resume_stop SIGSTOP caught for %s.",
2413 target_pid_to_str (ptid_of (thread)).c_str ());
2414 }
2415 else if (stopping_threads != NOT_STOPPING_THREADS)
2416 {
2417 /* Stopping threads. We don't want this SIGSTOP to end up
2418 pending. */
2419 threads_debug_printf ("SIGSTOP caught for %s while stopping threads.",
2420 target_pid_to_str (ptid_of (thread)).c_str ());
2421 return;
2422 }
2423 else
2424 {
2425 /* This is a delayed SIGSTOP. Filter out the event. */
2426 threads_debug_printf ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2427 child->stepping ? "step" : "continue",
2428 target_pid_to_str (ptid_of (thread)).c_str ());
2429
2430 resume_one_lwp (child, child->stepping, 0, NULL);
2431 return;
2432 }
2433 }
2434
2435 child->status_pending_p = 1;
2436 child->status_pending = wstat;
2437 return;
2438 }
2439
2440 bool
2441 linux_process_target::maybe_hw_step (thread_info *thread)
2442 {
2443 if (supports_hardware_single_step ())
2444 return true;
2445 else
2446 {
2447 /* GDBserver must insert single-step breakpoint for software
2448 single step. */
2449 gdb_assert (has_single_step_breakpoints (thread));
2450 return false;
2451 }
2452 }
2453
2454 void
2455 linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
2456 {
2457 struct lwp_info *lp = get_thread_lwp (thread);
2458
2459 if (lp->stopped
2460 && !lp->suspended
2461 && !lp->status_pending_p
2462 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
2463 {
2464 int step = 0;
2465
2466 if (thread->last_resume_kind == resume_step)
2467 step = maybe_hw_step (thread);
2468
2469 threads_debug_printf ("resuming stopped-resumed LWP %s at %s: step=%d",
2470 target_pid_to_str (ptid_of (thread)).c_str (),
2471 paddress (lp->stop_pc), step);
2472
2473 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2474 }
2475 }
2476
2477 int
2478 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2479 ptid_t filter_ptid,
2480 int *wstatp, int options)
2481 {
2482 struct thread_info *event_thread;
2483 struct lwp_info *event_child, *requested_child;
2484 sigset_t block_mask, prev_mask;
2485
2486 retry:
2487 /* N.B. event_thread points to the thread_info struct that contains
2488 event_child. Keep them in sync. */
2489 event_thread = NULL;
2490 event_child = NULL;
2491 requested_child = NULL;
2492
2493 /* Check for a lwp with a pending status. */
2494
2495 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2496 {
2497 event_thread = find_thread_in_random ([&] (thread_info *thread)
2498 {
2499 return status_pending_p_callback (thread, filter_ptid);
2500 });
2501
2502 if (event_thread != NULL)
2503 {
2504 event_child = get_thread_lwp (event_thread);
2505 threads_debug_printf ("Got a pending child %ld", lwpid_of (event_thread));
2506 }
2507 }
2508 else if (filter_ptid != null_ptid)
2509 {
2510 requested_child = find_lwp_pid (filter_ptid);
2511
2512 if (stopping_threads == NOT_STOPPING_THREADS
2513 && requested_child->status_pending_p
2514 && (requested_child->collecting_fast_tracepoint
2515 != fast_tpoint_collect_result::not_collecting))
2516 {
2517 enqueue_one_deferred_signal (requested_child,
2518 &requested_child->status_pending);
2519 requested_child->status_pending_p = 0;
2520 requested_child->status_pending = 0;
2521 resume_one_lwp (requested_child, 0, 0, NULL);
2522 }
2523
2524 if (requested_child->suspended
2525 && requested_child->status_pending_p)
2526 {
2527 internal_error (__FILE__, __LINE__,
2528 "requesting an event out of a"
2529 " suspended child?");
2530 }
2531
2532 if (requested_child->status_pending_p)
2533 {
2534 event_child = requested_child;
2535 event_thread = get_lwp_thread (event_child);
2536 }
2537 }
2538
2539 if (event_child != NULL)
2540 {
2541 threads_debug_printf ("Got an event from pending child %ld (%04x)",
2542 lwpid_of (event_thread),
2543 event_child->status_pending);
2544
2545 *wstatp = event_child->status_pending;
2546 event_child->status_pending_p = 0;
2547 event_child->status_pending = 0;
2548 switch_to_thread (event_thread);
2549 return lwpid_of (event_thread);
2550 }
2551
2552 /* But if we don't find a pending event, we'll have to wait.
2553
2554 We only enter this loop if no process has a pending wait status.
2555 Thus any action taken in response to a wait status inside this
2556 loop is responding as soon as we detect the status, not after any
2557 pending events. */
2558
2559 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2560 all signals while here. */
2561 sigfillset (&block_mask);
2562 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2563
2564 /* Always pull all events out of the kernel. We'll randomly select
2565 an event LWP out of all that have events, to prevent
2566 starvation. */
2567 while (event_child == NULL)
2568 {
2569 pid_t ret = 0;
2570
2571 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2572 quirks:
2573
2574 - If the thread group leader exits while other threads in the
2575 thread group still exist, waitpid(TGID, ...) hangs. That
2576 waitpid won't return an exit status until the other threads
2577 in the group are reaped.
2578
2579 - When a non-leader thread execs, that thread just vanishes
2580 without reporting an exit (so we'd hang if we waited for it
2581 explicitly in that case). The exec event is reported to
2582 the TGID pid. */
2583 errno = 0;
2584 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2585
2586 threads_debug_printf ("waitpid(-1, ...) returned %d, %s",
2587 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2588
2589 if (ret > 0)
2590 {
2591 threads_debug_printf ("waitpid %ld received %s",
2592 (long) ret, status_to_str (*wstatp).c_str ());
2593
2594 /* Filter all events. IOW, leave all events pending. We'll
2595 randomly select an event LWP out of all that have events
2596 below. */
2597 filter_event (ret, *wstatp);
2598 /* Retry until nothing comes out of waitpid. A single
2599 SIGCHLD can indicate more than one child stopped. */
2600 continue;
2601 }
2602
2603 /* Now that we've pulled all events out of the kernel, resume
2604 LWPs that don't have an interesting event to report. */
2605 if (stopping_threads == NOT_STOPPING_THREADS)
2606 for_each_thread ([this] (thread_info *thread)
2607 {
2608 resume_stopped_resumed_lwps (thread);
2609 });
2610
2611 /* ... and find an LWP with a status to report to the core, if
2612 any. */
2613 event_thread = find_thread_in_random ([&] (thread_info *thread)
2614 {
2615 return status_pending_p_callback (thread, filter_ptid);
2616 });
2617
2618 if (event_thread != NULL)
2619 {
2620 event_child = get_thread_lwp (event_thread);
2621 *wstatp = event_child->status_pending;
2622 event_child->status_pending_p = 0;
2623 event_child->status_pending = 0;
2624 break;
2625 }
2626
2627 /* Check for zombie thread group leaders. Those can't be reaped
2628 until all other threads in the thread group are. */
2629 check_zombie_leaders ();
2630
2631 auto not_stopped = [&] (thread_info *thread)
2632 {
2633 return not_stopped_callback (thread, wait_ptid);
2634 };
2635
2636 /* If there are no resumed children left in the set of LWPs we
2637 want to wait for, bail. We can't just block in
2638 waitpid/sigsuspend, because lwps might have been left stopped
2639 in trace-stop state, and we'd be stuck forever waiting for
2640 their status to change (which would only happen if we resumed
2641 them). Even if WNOHANG is set, this return code is preferred
2642 over 0 (below), as it is more detailed. */
2643 if (find_thread (not_stopped) == NULL)
2644 {
2645 threads_debug_printf ("exit (no unwaited-for LWP)");
2646
2647 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2648 return -1;
2649 }
2650
2651 /* No interesting event to report to the caller. */
2652 if ((options & WNOHANG))
2653 {
2654 threads_debug_printf ("WNOHANG set, no event found");
2655
2656 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2657 return 0;
2658 }
2659
2660 /* Block until we get an event reported with SIGCHLD. */
2661 threads_debug_printf ("sigsuspend'ing");
2662
2663 sigsuspend (&prev_mask);
2664 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2665 goto retry;
2666 }
2667
2668 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2669
2670 switch_to_thread (event_thread);
2671
2672 return lwpid_of (event_thread);
2673 }
2674
2675 int
2676 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2677 {
2678 return wait_for_event_filtered (ptid, ptid, wstatp, options);
2679 }
2680
2681 /* Select one LWP out of those that have events pending. */
2682
2683 static void
2684 select_event_lwp (struct lwp_info **orig_lp)
2685 {
2686 struct thread_info *event_thread = NULL;
2687
2688 /* In all-stop, give preference to the LWP that is being
2689 single-stepped. There will be at most one, and it's the LWP that
2690 the core is most interested in. If we didn't do this, then we'd
2691 have to handle pending step SIGTRAPs somehow in case the core
2692 later continues the previously-stepped thread, otherwise we'd
2693 report the pending SIGTRAP, and the core, not having stepped the
2694 thread, wouldn't understand what the trap was for, and therefore
2695 would report it to the user as a random signal. */
2696 if (!non_stop)
2697 {
2698 event_thread = find_thread ([] (thread_info *thread)
2699 {
2700 lwp_info *lp = get_thread_lwp (thread);
2701
2702 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2703 && thread->last_resume_kind == resume_step
2704 && lp->status_pending_p);
2705 });
2706
2707 if (event_thread != NULL)
2708 threads_debug_printf
2709 ("Select single-step %s",
2710 target_pid_to_str (ptid_of (event_thread)).c_str ());
2711 }
2712 if (event_thread == NULL)
2713 {
2714 /* No single-stepping LWP. Select one at random, out of those
2715 which have had events. */
2716
2717 event_thread = find_thread_in_random ([&] (thread_info *thread)
2718 {
2719 lwp_info *lp = get_thread_lwp (thread);
2720
2721 /* Only resumed LWPs that have an event pending. */
2722 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2723 && lp->status_pending_p);
2724 });
2725 }
2726
2727 if (event_thread != NULL)
2728 {
2729 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2730
2731 /* Switch the event LWP. */
2732 *orig_lp = event_lp;
2733 }
2734 }
2735
2736 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2737 NULL. */
2738
2739 static void
2740 unsuspend_all_lwps (struct lwp_info *except)
2741 {
2742 for_each_thread ([&] (thread_info *thread)
2743 {
2744 lwp_info *lwp = get_thread_lwp (thread);
2745
2746 if (lwp != except)
2747 lwp_suspended_decr (lwp);
2748 });
2749 }
2750
2751 static bool lwp_running (thread_info *thread);
2752
2753 /* Stabilize threads (move out of jump pads).
2754
2755 If a thread is midway collecting a fast tracepoint, we need to
2756 finish the collection and move it out of the jump pad before
2757 reporting the signal.
2758
2759 This avoids recursion while collecting (when a signal arrives
2760 midway, and the signal handler itself collects), which would trash
2761 the trace buffer. In case the user set a breakpoint in a signal
2762 handler, this avoids the backtrace showing the jump pad, etc..
2763 Most importantly, there are certain things we can't do safely if
2764 threads are stopped in a jump pad (or in its callee's). For
2765 example:
2766
2767 - starting a new trace run. A thread still collecting the
2768 previous run, could trash the trace buffer when resumed. The trace
2769 buffer control structures would have been reset but the thread had
2770 no way to tell. The thread could even midway memcpy'ing to the
2771 buffer, which would mean that when resumed, it would clobber the
2772 trace buffer that had been set for a new run.
2773
2774 - we can't rewrite/reuse the jump pads for new tracepoints
2775 safely. Say you do tstart while a thread is stopped midway while
2776 collecting. When the thread is later resumed, it finishes the
2777 collection, and returns to the jump pad, to execute the original
2778 instruction that was under the tracepoint jump at the time the
2779 older run had been started. If the jump pad had been rewritten
2780 since for something else in the new run, the thread would now
2781 execute the wrong / random instructions. */
2782
2783 void
2784 linux_process_target::stabilize_threads ()
2785 {
2786 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2787 {
2788 return stuck_in_jump_pad (thread);
2789 });
2790
2791 if (thread_stuck != NULL)
2792 {
2793 threads_debug_printf ("can't stabilize, LWP %ld is stuck in jump pad",
2794 lwpid_of (thread_stuck));
2795 return;
2796 }
2797
2798 scoped_restore_current_thread restore_thread;
2799
2800 stabilizing_threads = 1;
2801
2802 /* Kick 'em all. */
2803 for_each_thread ([this] (thread_info *thread)
2804 {
2805 move_out_of_jump_pad (thread);
2806 });
2807
2808 /* Loop until all are stopped out of the jump pads. */
2809 while (find_thread (lwp_running) != NULL)
2810 {
2811 struct target_waitstatus ourstatus;
2812 struct lwp_info *lwp;
2813 int wstat;
2814
2815 /* Note that we go through the full wait even loop. While
2816 moving threads out of jump pad, we need to be able to step
2817 over internal breakpoints and such. */
2818 wait_1 (minus_one_ptid, &ourstatus, 0);
2819
2820 if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
2821 {
2822 lwp = get_thread_lwp (current_thread);
2823
2824 /* Lock it. */
2825 lwp_suspended_inc (lwp);
2826
2827 if (ourstatus.sig () != GDB_SIGNAL_0
2828 || current_thread->last_resume_kind == resume_stop)
2829 {
2830 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
2831 enqueue_one_deferred_signal (lwp, &wstat);
2832 }
2833 }
2834 }
2835
2836 unsuspend_all_lwps (NULL);
2837
2838 stabilizing_threads = 0;
2839
2840 if (debug_threads)
2841 {
2842 thread_stuck = find_thread ([this] (thread_info *thread)
2843 {
2844 return stuck_in_jump_pad (thread);
2845 });
2846
2847 if (thread_stuck != NULL)
2848 threads_debug_printf
2849 ("couldn't stabilize, LWP %ld got stuck in jump pad",
2850 lwpid_of (thread_stuck));
2851 }
2852 }
2853
2854 /* Convenience function that is called when the kernel reports an
2855 event that is not passed out to GDB. */
2856
2857 static ptid_t
2858 ignore_event (struct target_waitstatus *ourstatus)
2859 {
2860 /* If we got an event, there may still be others, as a single
2861 SIGCHLD can indicate more than one child stopped. This forces
2862 another target_wait call. */
2863 async_file_mark ();
2864
2865 ourstatus->set_ignore ();
2866 return null_ptid;
2867 }
2868
2869 ptid_t
2870 linux_process_target::filter_exit_event (lwp_info *event_child,
2871 target_waitstatus *ourstatus)
2872 {
2873 client_state &cs = get_client_state ();
2874 struct thread_info *thread = get_lwp_thread (event_child);
2875 ptid_t ptid = ptid_of (thread);
2876
2877 if (!is_leader (thread))
2878 {
2879 if (cs.report_thread_events)
2880 ourstatus->set_thread_exited (0);
2881 else
2882 ourstatus->set_ignore ();
2883
2884 delete_lwp (event_child);
2885 }
2886 return ptid;
2887 }
2888
2889 /* Returns 1 if GDB is interested in any event_child syscalls. */
2890
2891 static int
2892 gdb_catching_syscalls_p (struct lwp_info *event_child)
2893 {
2894 struct thread_info *thread = get_lwp_thread (event_child);
2895 struct process_info *proc = get_thread_process (thread);
2896
2897 return !proc->syscalls_to_catch.empty ();
2898 }
2899
2900 bool
2901 linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
2902 {
2903 int sysno;
2904 struct thread_info *thread = get_lwp_thread (event_child);
2905 struct process_info *proc = get_thread_process (thread);
2906
2907 if (proc->syscalls_to_catch.empty ())
2908 return false;
2909
2910 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
2911 return true;
2912
2913 get_syscall_trapinfo (event_child, &sysno);
2914
2915 for (int iter : proc->syscalls_to_catch)
2916 if (iter == sysno)
2917 return true;
2918
2919 return false;
2920 }
2921
2922 ptid_t
2923 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
2924 target_wait_flags target_options)
2925 {
2926 THREADS_SCOPED_DEBUG_ENTER_EXIT;
2927
2928 client_state &cs = get_client_state ();
2929 int w;
2930 struct lwp_info *event_child;
2931 int options;
2932 int pid;
2933 int step_over_finished;
2934 int bp_explains_trap;
2935 int maybe_internal_trap;
2936 int report_to_gdb;
2937 int trace_event;
2938 int in_step_range;
2939 int any_resumed;
2940
2941 threads_debug_printf ("[%s]", target_pid_to_str (ptid).c_str ());
2942
2943 /* Translate generic target options into linux options. */
2944 options = __WALL;
2945 if (target_options & TARGET_WNOHANG)
2946 options |= WNOHANG;
2947
2948 bp_explains_trap = 0;
2949 trace_event = 0;
2950 in_step_range = 0;
2951 ourstatus->set_ignore ();
2952
2953 auto status_pending_p_any = [&] (thread_info *thread)
2954 {
2955 return status_pending_p_callback (thread, minus_one_ptid);
2956 };
2957
2958 auto not_stopped = [&] (thread_info *thread)
2959 {
2960 return not_stopped_callback (thread, minus_one_ptid);
2961 };
2962
2963 /* Find a resumed LWP, if any. */
2964 if (find_thread (status_pending_p_any) != NULL)
2965 any_resumed = 1;
2966 else if (find_thread (not_stopped) != NULL)
2967 any_resumed = 1;
2968 else
2969 any_resumed = 0;
2970
2971 if (step_over_bkpt == null_ptid)
2972 pid = wait_for_event (ptid, &w, options);
2973 else
2974 {
2975 threads_debug_printf ("step_over_bkpt set [%s], doing a blocking wait",
2976 target_pid_to_str (step_over_bkpt).c_str ());
2977 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2978 }
2979
2980 if (pid == 0 || (pid == -1 && !any_resumed))
2981 {
2982 gdb_assert (target_options & TARGET_WNOHANG);
2983
2984 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_IGNORE");
2985
2986 ourstatus->set_ignore ();
2987 return null_ptid;
2988 }
2989 else if (pid == -1)
2990 {
2991 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_NO_RESUMED");
2992
2993 ourstatus->set_no_resumed ();
2994 return null_ptid;
2995 }
2996
2997 event_child = get_thread_lwp (current_thread);
2998
2999 /* wait_for_event only returns an exit status for the last
3000 child of a process. Report it. */
3001 if (WIFEXITED (w) || WIFSIGNALED (w))
3002 {
3003 if (WIFEXITED (w))
3004 {
3005 ourstatus->set_exited (WEXITSTATUS (w));
3006
3007 threads_debug_printf
3008 ("ret = %s, exited with retcode %d",
3009 target_pid_to_str (ptid_of (current_thread)).c_str (),
3010 WEXITSTATUS (w));
3011 }
3012 else
3013 {
3014 ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
3015
3016 threads_debug_printf
3017 ("ret = %s, terminated with signal %d",
3018 target_pid_to_str (ptid_of (current_thread)).c_str (),
3019 WTERMSIG (w));
3020 }
3021
3022 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
3023 return filter_exit_event (event_child, ourstatus);
3024
3025 return ptid_of (current_thread);
3026 }
3027
3028 /* If step-over executes a breakpoint instruction, in the case of a
3029 hardware single step it means a gdb/gdbserver breakpoint had been
3030 planted on top of a permanent breakpoint, in the case of a software
3031 single step it may just mean that gdbserver hit the reinsert breakpoint.
3032 The PC has been adjusted by save_stop_reason to point at
3033 the breakpoint address.
3034 So in the case of the hardware single step advance the PC manually
3035 past the breakpoint and in the case of software single step advance only
3036 if it's not the single_step_breakpoint we are hitting.
3037 This avoids that a program would keep trapping a permanent breakpoint
3038 forever. */
3039 if (step_over_bkpt != null_ptid
3040 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3041 && (event_child->stepping
3042 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3043 {
3044 int increment_pc = 0;
3045 int breakpoint_kind = 0;
3046 CORE_ADDR stop_pc = event_child->stop_pc;
3047
3048 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3049 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3050
3051 threads_debug_printf
3052 ("step-over for %s executed software breakpoint",
3053 target_pid_to_str (ptid_of (current_thread)).c_str ());
3054
3055 if (increment_pc != 0)
3056 {
3057 struct regcache *regcache
3058 = get_thread_regcache (current_thread, 1);
3059
3060 event_child->stop_pc += increment_pc;
3061 low_set_pc (regcache, event_child->stop_pc);
3062
3063 if (!low_breakpoint_at (event_child->stop_pc))
3064 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3065 }
3066 }
3067
3068 /* If this event was not handled before, and is not a SIGTRAP, we
3069 report it. SIGILL and SIGSEGV are also treated as traps in case
3070 a breakpoint is inserted at the current PC. If this target does
3071 not support internal breakpoints at all, we also report the
3072 SIGTRAP without further processing; it's of no concern to us. */
3073 maybe_internal_trap
3074 = (low_supports_breakpoints ()
3075 && (WSTOPSIG (w) == SIGTRAP
3076 || ((WSTOPSIG (w) == SIGILL
3077 || WSTOPSIG (w) == SIGSEGV)
3078 && low_breakpoint_at (event_child->stop_pc))));
3079
3080 if (maybe_internal_trap)
3081 {
3082 /* Handle anything that requires bookkeeping before deciding to
3083 report the event or continue waiting. */
3084
3085 /* First check if we can explain the SIGTRAP with an internal
3086 breakpoint, or if we should possibly report the event to GDB.
3087 Do this before anything that may remove or insert a
3088 breakpoint. */
3089 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3090
3091 /* We have a SIGTRAP, possibly a step-over dance has just
3092 finished. If so, tweak the state machine accordingly,
3093 reinsert breakpoints and delete any single-step
3094 breakpoints. */
3095 step_over_finished = finish_step_over (event_child);
3096
3097 /* Now invoke the callbacks of any internal breakpoints there. */
3098 check_breakpoints (event_child->stop_pc);
3099
3100 /* Handle tracepoint data collecting. This may overflow the
3101 trace buffer, and cause a tracing stop, removing
3102 breakpoints. */
3103 trace_event = handle_tracepoints (event_child);
3104
3105 if (bp_explains_trap)
3106 threads_debug_printf ("Hit a gdbserver breakpoint.");
3107 }
3108 else
3109 {
3110 /* We have some other signal, possibly a step-over dance was in
3111 progress, and it should be cancelled too. */
3112 step_over_finished = finish_step_over (event_child);
3113 }
3114
3115 /* We have all the data we need. Either report the event to GDB, or
3116 resume threads and keep waiting for more. */
3117
3118 /* If we're collecting a fast tracepoint, finish the collection and
3119 move out of the jump pad before delivering a signal. See
3120 linux_stabilize_threads. */
3121
3122 if (WIFSTOPPED (w)
3123 && WSTOPSIG (w) != SIGTRAP
3124 && supports_fast_tracepoints ()
3125 && agent_loaded_p ())
3126 {
3127 threads_debug_printf ("Got signal %d for LWP %ld. Check if we need "
3128 "to defer or adjust it.",
3129 WSTOPSIG (w), lwpid_of (current_thread));
3130
3131 /* Allow debugging the jump pad itself. */
3132 if (current_thread->last_resume_kind != resume_step
3133 && maybe_move_out_of_jump_pad (event_child, &w))
3134 {
3135 enqueue_one_deferred_signal (event_child, &w);
3136
3137 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad)",
3138 WSTOPSIG (w), lwpid_of (current_thread));
3139
3140 resume_one_lwp (event_child, 0, 0, NULL);
3141
3142 return ignore_event (ourstatus);
3143 }
3144 }
3145
3146 if (event_child->collecting_fast_tracepoint
3147 != fast_tpoint_collect_result::not_collecting)
3148 {
3149 threads_debug_printf
3150 ("LWP %ld was trying to move out of the jump pad (%d). "
3151 "Check if we're already there.",
3152 lwpid_of (current_thread),
3153 (int) event_child->collecting_fast_tracepoint);
3154
3155 trace_event = 1;
3156
3157 event_child->collecting_fast_tracepoint
3158 = linux_fast_tracepoint_collecting (event_child, NULL);
3159
3160 if (event_child->collecting_fast_tracepoint
3161 != fast_tpoint_collect_result::before_insn)
3162 {
3163 /* No longer need this breakpoint. */
3164 if (event_child->exit_jump_pad_bkpt != NULL)
3165 {
3166 threads_debug_printf
3167 ("No longer need exit-jump-pad bkpt; removing it."
3168 "stopping all threads momentarily.");
3169
3170 /* Other running threads could hit this breakpoint.
3171 We don't handle moribund locations like GDB does,
3172 instead we always pause all threads when removing
3173 breakpoints, so that any step-over or
3174 decr_pc_after_break adjustment is always taken
3175 care of while the breakpoint is still
3176 inserted. */
3177 stop_all_lwps (1, event_child);
3178
3179 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3180 event_child->exit_jump_pad_bkpt = NULL;
3181
3182 unstop_all_lwps (1, event_child);
3183
3184 gdb_assert (event_child->suspended >= 0);
3185 }
3186 }
3187
3188 if (event_child->collecting_fast_tracepoint
3189 == fast_tpoint_collect_result::not_collecting)
3190 {
3191 threads_debug_printf
3192 ("fast tracepoint finished collecting successfully.");
3193
3194 /* We may have a deferred signal to report. */
3195 if (dequeue_one_deferred_signal (event_child, &w))
3196 threads_debug_printf ("dequeued one signal.");
3197 else
3198 {
3199 threads_debug_printf ("no deferred signals.");
3200
3201 if (stabilizing_threads)
3202 {
3203 ourstatus->set_stopped (GDB_SIGNAL_0);
3204
3205 threads_debug_printf
3206 ("ret = %s, stopped while stabilizing threads",
3207 target_pid_to_str (ptid_of (current_thread)).c_str ());
3208
3209 return ptid_of (current_thread);
3210 }
3211 }
3212 }
3213 }
3214
3215 /* Check whether GDB would be interested in this event. */
3216
3217 /* Check if GDB is interested in this syscall. */
3218 if (WIFSTOPPED (w)
3219 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3220 && !gdb_catch_this_syscall (event_child))
3221 {
3222 threads_debug_printf ("Ignored syscall for LWP %ld.",
3223 lwpid_of (current_thread));
3224
3225 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
3226
3227 return ignore_event (ourstatus);
3228 }
3229
3230 /* If GDB is not interested in this signal, don't stop other
3231 threads, and don't report it to GDB. Just resume the inferior
3232 right away. We do this for threading-related signals as well as
3233 any that GDB specifically requested we ignore. But never ignore
3234 SIGSTOP if we sent it ourselves, and do not ignore signals when
3235 stepping - they may require special handling to skip the signal
3236 handler. Also never ignore signals that could be caused by a
3237 breakpoint. */
3238 if (WIFSTOPPED (w)
3239 && current_thread->last_resume_kind != resume_step
3240 && (
3241 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3242 (current_process ()->priv->thread_db != NULL
3243 && (WSTOPSIG (w) == __SIGRTMIN
3244 || WSTOPSIG (w) == __SIGRTMIN + 1))
3245 ||
3246 #endif
3247 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3248 && !(WSTOPSIG (w) == SIGSTOP
3249 && current_thread->last_resume_kind == resume_stop)
3250 && !linux_wstatus_maybe_breakpoint (w))))
3251 {
3252 siginfo_t info, *info_p;
3253
3254 threads_debug_printf ("Ignored signal %d for LWP %ld.",
3255 WSTOPSIG (w), lwpid_of (current_thread));
3256
3257 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3258 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3259 info_p = &info;
3260 else
3261 info_p = NULL;
3262
3263 if (step_over_finished)
3264 {
3265 /* We cancelled this thread's step-over above. We still
3266 need to unsuspend all other LWPs, and set them back
3267 running again while the signal handler runs. */
3268 unsuspend_all_lwps (event_child);
3269
3270 /* Enqueue the pending signal info so that proceed_all_lwps
3271 doesn't lose it. */
3272 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3273
3274 proceed_all_lwps ();
3275 }
3276 else
3277 {
3278 resume_one_lwp (event_child, event_child->stepping,
3279 WSTOPSIG (w), info_p);
3280 }
3281
3282 return ignore_event (ourstatus);
3283 }
3284
3285 /* Note that all addresses are always "out of the step range" when
3286 there's no range to begin with. */
3287 in_step_range = lwp_in_step_range (event_child);
3288
3289 /* If GDB wanted this thread to single step, and the thread is out
3290 of the step range, we always want to report the SIGTRAP, and let
3291 GDB handle it. Watchpoints should always be reported. So should
3292 signals we can't explain. A SIGTRAP we can't explain could be a
3293 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3294 do, we're be able to handle GDB breakpoints on top of internal
3295 breakpoints, by handling the internal breakpoint and still
3296 reporting the event to GDB. If we don't, we're out of luck, GDB
3297 won't see the breakpoint hit. If we see a single-step event but
3298 the thread should be continuing, don't pass the trap to gdb.
3299 That indicates that we had previously finished a single-step but
3300 left the single-step pending -- see
3301 complete_ongoing_step_over. */
3302 report_to_gdb = (!maybe_internal_trap
3303 || (current_thread->last_resume_kind == resume_step
3304 && !in_step_range)
3305 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3306 || (!in_step_range
3307 && !bp_explains_trap
3308 && !trace_event
3309 && !step_over_finished
3310 && !(current_thread->last_resume_kind == resume_continue
3311 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3312 || (gdb_breakpoint_here (event_child->stop_pc)
3313 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3314 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3315 || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
3316
3317 run_breakpoint_commands (event_child->stop_pc);
3318
3319 /* We found no reason GDB would want us to stop. We either hit one
3320 of our own breakpoints, or finished an internal step GDB
3321 shouldn't know about. */
3322 if (!report_to_gdb)
3323 {
3324 if (bp_explains_trap)
3325 threads_debug_printf ("Hit a gdbserver breakpoint.");
3326
3327 if (step_over_finished)
3328 threads_debug_printf ("Step-over finished.");
3329
3330 if (trace_event)
3331 threads_debug_printf ("Tracepoint event.");
3332
3333 if (lwp_in_step_range (event_child))
3334 threads_debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).",
3335 paddress (event_child->stop_pc),
3336 paddress (event_child->step_range_start),
3337 paddress (event_child->step_range_end));
3338
3339 /* We're not reporting this breakpoint to GDB, so apply the
3340 decr_pc_after_break adjustment to the inferior's regcache
3341 ourselves. */
3342
3343 if (low_supports_breakpoints ())
3344 {
3345 struct regcache *regcache
3346 = get_thread_regcache (current_thread, 1);
3347 low_set_pc (regcache, event_child->stop_pc);
3348 }
3349
3350 if (step_over_finished)
3351 {
3352 /* If we have finished stepping over a breakpoint, we've
3353 stopped and suspended all LWPs momentarily except the
3354 stepping one. This is where we resume them all again.
3355 We're going to keep waiting, so use proceed, which
3356 handles stepping over the next breakpoint. */
3357 unsuspend_all_lwps (event_child);
3358 }
3359 else
3360 {
3361 /* Remove the single-step breakpoints if any. Note that
3362 there isn't single-step breakpoint if we finished stepping
3363 over. */
3364 if (supports_software_single_step ()
3365 && has_single_step_breakpoints (current_thread))
3366 {
3367 stop_all_lwps (0, event_child);
3368 delete_single_step_breakpoints (current_thread);
3369 unstop_all_lwps (0, event_child);
3370 }
3371 }
3372
3373 threads_debug_printf ("proceeding all threads.");
3374
3375 proceed_all_lwps ();
3376
3377 return ignore_event (ourstatus);
3378 }
3379
3380 if (debug_threads)
3381 {
3382 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3383 threads_debug_printf ("LWP %ld: extended event with waitstatus %s",
3384 lwpid_of (get_lwp_thread (event_child)),
3385 event_child->waitstatus.to_string ().c_str ());
3386
3387 if (current_thread->last_resume_kind == resume_step)
3388 {
3389 if (event_child->step_range_start == event_child->step_range_end)
3390 threads_debug_printf
3391 ("GDB wanted to single-step, reporting event.");
3392 else if (!lwp_in_step_range (event_child))
3393 threads_debug_printf ("Out of step range, reporting event.");
3394 }
3395
3396 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3397 threads_debug_printf ("Stopped by watchpoint.");
3398 else if (gdb_breakpoint_here (event_child->stop_pc))
3399 threads_debug_printf ("Stopped by GDB breakpoint.");
3400 }
3401
3402 threads_debug_printf ("Hit a non-gdbserver trap event.");
3403
3404 /* Alright, we're going to report a stop. */
3405
3406 /* Remove single-step breakpoints. */
3407 if (supports_software_single_step ())
3408 {
3409 /* Remove single-step breakpoints or not. It it is true, stop all
3410 lwps, so that other threads won't hit the breakpoint in the
3411 staled memory. */
3412 int remove_single_step_breakpoints_p = 0;
3413
3414 if (non_stop)
3415 {
3416 remove_single_step_breakpoints_p
3417 = has_single_step_breakpoints (current_thread);
3418 }
3419 else
3420 {
3421 /* In all-stop, a stop reply cancels all previous resume
3422 requests. Delete all single-step breakpoints. */
3423
3424 find_thread ([&] (thread_info *thread) {
3425 if (has_single_step_breakpoints (thread))
3426 {
3427 remove_single_step_breakpoints_p = 1;
3428 return true;
3429 }
3430
3431 return false;
3432 });
3433 }
3434
3435 if (remove_single_step_breakpoints_p)
3436 {
3437 /* If we remove single-step breakpoints from memory, stop all lwps,
3438 so that other threads won't hit the breakpoint in the staled
3439 memory. */
3440 stop_all_lwps (0, event_child);
3441
3442 if (non_stop)
3443 {
3444 gdb_assert (has_single_step_breakpoints (current_thread));
3445 delete_single_step_breakpoints (current_thread);
3446 }
3447 else
3448 {
3449 for_each_thread ([] (thread_info *thread){
3450 if (has_single_step_breakpoints (thread))
3451 delete_single_step_breakpoints (thread);
3452 });
3453 }
3454
3455 unstop_all_lwps (0, event_child);
3456 }
3457 }
3458
3459 if (!stabilizing_threads)
3460 {
3461 /* In all-stop, stop all threads. */
3462 if (!non_stop)
3463 stop_all_lwps (0, NULL);
3464
3465 if (step_over_finished)
3466 {
3467 if (!non_stop)
3468 {
3469 /* If we were doing a step-over, all other threads but
3470 the stepping one had been paused in start_step_over,
3471 with their suspend counts incremented. We don't want
3472 to do a full unstop/unpause, because we're in
3473 all-stop mode (so we want threads stopped), but we
3474 still need to unsuspend the other threads, to
3475 decrement their `suspended' count back. */
3476 unsuspend_all_lwps (event_child);
3477 }
3478 else
3479 {
3480 /* If we just finished a step-over, then all threads had
3481 been momentarily paused. In all-stop, that's fine,
3482 we want threads stopped by now anyway. In non-stop,
3483 we need to re-resume threads that GDB wanted to be
3484 running. */
3485 unstop_all_lwps (1, event_child);
3486 }
3487 }
3488
3489 /* If we're not waiting for a specific LWP, choose an event LWP
3490 from among those that have had events. Giving equal priority
3491 to all LWPs that have had events helps prevent
3492 starvation. */
3493 if (ptid == minus_one_ptid)
3494 {
3495 event_child->status_pending_p = 1;
3496 event_child->status_pending = w;
3497
3498 select_event_lwp (&event_child);
3499
3500 /* current_thread and event_child must stay in sync. */
3501 switch_to_thread (get_lwp_thread (event_child));
3502
3503 event_child->status_pending_p = 0;
3504 w = event_child->status_pending;
3505 }
3506
3507
3508 /* Stabilize threads (move out of jump pads). */
3509 if (!non_stop)
3510 target_stabilize_threads ();
3511 }
3512 else
3513 {
3514 /* If we just finished a step-over, then all threads had been
3515 momentarily paused. In all-stop, that's fine, we want
3516 threads stopped by now anyway. In non-stop, we need to
3517 re-resume threads that GDB wanted to be running. */
3518 if (step_over_finished)
3519 unstop_all_lwps (1, event_child);
3520 }
3521
3522 /* At this point, we haven't set OURSTATUS. This is where we do it. */
3523 gdb_assert (ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3524
3525 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3526 {
3527 /* If the reported event is an exit, fork, vfork or exec, let
3528 GDB know. */
3529
3530 /* Break the unreported fork relationship chain. */
3531 if (event_child->waitstatus.kind () == TARGET_WAITKIND_FORKED
3532 || event_child->waitstatus.kind () == TARGET_WAITKIND_VFORKED)
3533 {
3534 event_child->fork_relative->fork_relative = NULL;
3535 event_child->fork_relative = NULL;
3536 }
3537
3538 *ourstatus = event_child->waitstatus;
3539 /* Clear the event lwp's waitstatus since we handled it already. */
3540 event_child->waitstatus.set_ignore ();
3541 }
3542 else
3543 {
3544 /* The LWP stopped due to a plain signal or a syscall signal. Either way,
3545 event_chid->waitstatus wasn't filled in with the details, so look at
3546 the wait status W. */
3547 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3548 {
3549 int syscall_number;
3550
3551 get_syscall_trapinfo (event_child, &syscall_number);
3552 if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3553 ourstatus->set_syscall_entry (syscall_number);
3554 else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3555 ourstatus->set_syscall_return (syscall_number);
3556 else
3557 gdb_assert_not_reached ("unexpected syscall state");
3558 }
3559 else if (current_thread->last_resume_kind == resume_stop
3560 && WSTOPSIG (w) == SIGSTOP)
3561 {
3562 /* A thread that has been requested to stop by GDB with vCont;t,
3563 and it stopped cleanly, so report as SIG0. The use of
3564 SIGSTOP is an implementation detail. */
3565 ourstatus->set_stopped (GDB_SIGNAL_0);
3566 }
3567 else
3568 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
3569 }
3570
3571 /* Now that we've selected our final event LWP, un-adjust its PC if
3572 it was a software breakpoint, and the client doesn't know we can
3573 adjust the breakpoint ourselves. */
3574 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3575 && !cs.swbreak_feature)
3576 {
3577 int decr_pc = low_decr_pc_after_break ();
3578
3579 if (decr_pc != 0)
3580 {
3581 struct regcache *regcache
3582 = get_thread_regcache (current_thread, 1);
3583 low_set_pc (regcache, event_child->stop_pc + decr_pc);
3584 }
3585 }
3586
3587 gdb_assert (step_over_bkpt == null_ptid);
3588
3589 threads_debug_printf ("ret = %s, %s",
3590 target_pid_to_str (ptid_of (current_thread)).c_str (),
3591 ourstatus->to_string ().c_str ());
3592
3593 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
3594 return filter_exit_event (event_child, ourstatus);
3595
3596 return ptid_of (current_thread);
3597 }
3598
3599 /* Get rid of any pending event in the pipe. */
3600 static void
3601 async_file_flush (void)
3602 {
3603 linux_event_pipe.flush ();
3604 }
3605
3606 /* Put something in the pipe, so the event loop wakes up. */
3607 static void
3608 async_file_mark (void)
3609 {
3610 linux_event_pipe.mark ();
3611 }
3612
3613 ptid_t
3614 linux_process_target::wait (ptid_t ptid,
3615 target_waitstatus *ourstatus,
3616 target_wait_flags target_options)
3617 {
3618 ptid_t event_ptid;
3619
3620 /* Flush the async file first. */
3621 if (target_is_async_p ())
3622 async_file_flush ();
3623
3624 do
3625 {
3626 event_ptid = wait_1 (ptid, ourstatus, target_options);
3627 }
3628 while ((target_options & TARGET_WNOHANG) == 0
3629 && event_ptid == null_ptid
3630 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3631
3632 /* If at least one stop was reported, there may be more. A single
3633 SIGCHLD can signal more than one child stop. */
3634 if (target_is_async_p ()
3635 && (target_options & TARGET_WNOHANG) != 0
3636 && event_ptid != null_ptid)
3637 async_file_mark ();
3638
3639 return event_ptid;
3640 }
3641
3642 /* Send a signal to an LWP. */
3643
3644 static int
3645 kill_lwp (unsigned long lwpid, int signo)
3646 {
3647 int ret;
3648
3649 errno = 0;
3650 ret = syscall (__NR_tkill, lwpid, signo);
3651 if (errno == ENOSYS)
3652 {
3653 /* If tkill fails, then we are not using nptl threads, a
3654 configuration we no longer support. */
3655 perror_with_name (("tkill"));
3656 }
3657 return ret;
3658 }
3659
3660 void
3661 linux_stop_lwp (struct lwp_info *lwp)
3662 {
3663 send_sigstop (lwp);
3664 }
3665
3666 static void
3667 send_sigstop (struct lwp_info *lwp)
3668 {
3669 int pid;
3670
3671 pid = lwpid_of (get_lwp_thread (lwp));
3672
3673 /* If we already have a pending stop signal for this process, don't
3674 send another. */
3675 if (lwp->stop_expected)
3676 {
3677 threads_debug_printf ("Have pending sigstop for lwp %d", pid);
3678
3679 return;
3680 }
3681
3682 threads_debug_printf ("Sending sigstop to lwp %d", pid);
3683
3684 lwp->stop_expected = 1;
3685 kill_lwp (pid, SIGSTOP);
3686 }
3687
3688 static void
3689 send_sigstop (thread_info *thread, lwp_info *except)
3690 {
3691 struct lwp_info *lwp = get_thread_lwp (thread);
3692
3693 /* Ignore EXCEPT. */
3694 if (lwp == except)
3695 return;
3696
3697 if (lwp->stopped)
3698 return;
3699
3700 send_sigstop (lwp);
3701 }
3702
3703 /* Increment the suspend count of an LWP, and stop it, if not stopped
3704 yet. */
3705 static void
3706 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3707 {
3708 struct lwp_info *lwp = get_thread_lwp (thread);
3709
3710 /* Ignore EXCEPT. */
3711 if (lwp == except)
3712 return;
3713
3714 lwp_suspended_inc (lwp);
3715
3716 send_sigstop (thread, except);
3717 }
3718
3719 static void
3720 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3721 {
3722 /* Store the exit status for later. */
3723 lwp->status_pending_p = 1;
3724 lwp->status_pending = wstat;
3725
3726 /* Store in waitstatus as well, as there's nothing else to process
3727 for this event. */
3728 if (WIFEXITED (wstat))
3729 lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
3730 else if (WIFSIGNALED (wstat))
3731 lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
3732
3733 /* Prevent trying to stop it. */
3734 lwp->stopped = 1;
3735
3736 /* No further stops are expected from a dead lwp. */
3737 lwp->stop_expected = 0;
3738 }
3739
3740 /* Return true if LWP has exited already, and has a pending exit event
3741 to report to GDB. */
3742
3743 static int
3744 lwp_is_marked_dead (struct lwp_info *lwp)
3745 {
3746 return (lwp->status_pending_p
3747 && (WIFEXITED (lwp->status_pending)
3748 || WIFSIGNALED (lwp->status_pending)));
3749 }
3750
3751 void
3752 linux_process_target::wait_for_sigstop ()
3753 {
3754 struct thread_info *saved_thread;
3755 ptid_t saved_tid;
3756 int wstat;
3757 int ret;
3758
3759 saved_thread = current_thread;
3760 if (saved_thread != NULL)
3761 saved_tid = saved_thread->id;
3762 else
3763 saved_tid = null_ptid; /* avoid bogus unused warning */
3764
3765 scoped_restore_current_thread restore_thread;
3766
3767 threads_debug_printf ("pulling events");
3768
3769 /* Passing NULL_PTID as filter indicates we want all events to be
3770 left pending. Eventually this returns when there are no
3771 unwaited-for children left. */
3772 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3773 gdb_assert (ret == -1);
3774
3775 if (saved_thread == NULL || mythread_alive (saved_tid))
3776 return;
3777 else
3778 {
3779 threads_debug_printf ("Previously current thread died.");
3780
3781 /* We can't change the current inferior behind GDB's back,
3782 otherwise, a subsequent command may apply to the wrong
3783 process. */
3784 restore_thread.dont_restore ();
3785 switch_to_thread (nullptr);
3786 }
3787 }
3788
3789 bool
3790 linux_process_target::stuck_in_jump_pad (thread_info *thread)
3791 {
3792 struct lwp_info *lwp = get_thread_lwp (thread);
3793
3794 if (lwp->suspended != 0)
3795 {
3796 internal_error (__FILE__, __LINE__,
3797 "LWP %ld is suspended, suspended=%d\n",
3798 lwpid_of (thread), lwp->suspended);
3799 }
3800 gdb_assert (lwp->stopped);
3801
3802 /* Allow debugging the jump pad, gdb_collect, etc.. */
3803 return (supports_fast_tracepoints ()
3804 && agent_loaded_p ()
3805 && (gdb_breakpoint_here (lwp->stop_pc)
3806 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3807 || thread->last_resume_kind == resume_step)
3808 && (linux_fast_tracepoint_collecting (lwp, NULL)
3809 != fast_tpoint_collect_result::not_collecting));
3810 }
3811
3812 void
3813 linux_process_target::move_out_of_jump_pad (thread_info *thread)
3814 {
3815 struct lwp_info *lwp = get_thread_lwp (thread);
3816 int *wstat;
3817
3818 if (lwp->suspended != 0)
3819 {
3820 internal_error (__FILE__, __LINE__,
3821 "LWP %ld is suspended, suspended=%d\n",
3822 lwpid_of (thread), lwp->suspended);
3823 }
3824 gdb_assert (lwp->stopped);
3825
3826 /* For gdb_breakpoint_here. */
3827 scoped_restore_current_thread restore_thread;
3828 switch_to_thread (thread);
3829
3830 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3831
3832 /* Allow debugging the jump pad, gdb_collect, etc. */
3833 if (!gdb_breakpoint_here (lwp->stop_pc)
3834 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3835 && thread->last_resume_kind != resume_step
3836 && maybe_move_out_of_jump_pad (lwp, wstat))
3837 {
3838 threads_debug_printf ("LWP %ld needs stabilizing (in jump pad)",
3839 lwpid_of (thread));
3840
3841 if (wstat)
3842 {
3843 lwp->status_pending_p = 0;
3844 enqueue_one_deferred_signal (lwp, wstat);
3845
3846 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad",
3847 WSTOPSIG (*wstat), lwpid_of (thread));
3848 }
3849
3850 resume_one_lwp (lwp, 0, 0, NULL);
3851 }
3852 else
3853 lwp_suspended_inc (lwp);
3854 }
3855
3856 static bool
3857 lwp_running (thread_info *thread)
3858 {
3859 struct lwp_info *lwp = get_thread_lwp (thread);
3860
3861 if (lwp_is_marked_dead (lwp))
3862 return false;
3863
3864 return !lwp->stopped;
3865 }
3866
3867 void
3868 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
3869 {
3870 /* Should not be called recursively. */
3871 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3872
3873 THREADS_SCOPED_DEBUG_ENTER_EXIT;
3874
3875 threads_debug_printf
3876 ("%s, except=%s", suspend ? "stop-and-suspend" : "stop",
3877 (except != NULL
3878 ? target_pid_to_str (ptid_of (get_lwp_thread (except))).c_str ()
3879 : "none"));
3880
3881 stopping_threads = (suspend
3882 ? STOPPING_AND_SUSPENDING_THREADS
3883 : STOPPING_THREADS);
3884
3885 if (suspend)
3886 for_each_thread ([&] (thread_info *thread)
3887 {
3888 suspend_and_send_sigstop (thread, except);
3889 });
3890 else
3891 for_each_thread ([&] (thread_info *thread)
3892 {
3893 send_sigstop (thread, except);
3894 });
3895
3896 wait_for_sigstop ();
3897 stopping_threads = NOT_STOPPING_THREADS;
3898
3899 threads_debug_printf ("setting stopping_threads back to !stopping");
3900 }
3901
3902 /* Enqueue one signal in the chain of signals which need to be
3903 delivered to this process on next resume. */
3904
3905 static void
3906 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3907 {
3908 lwp->pending_signals.emplace_back (signal);
3909 if (info == nullptr)
3910 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
3911 else
3912 lwp->pending_signals.back ().info = *info;
3913 }
3914
3915 void
3916 linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
3917 {
3918 struct thread_info *thread = get_lwp_thread (lwp);
3919 struct regcache *regcache = get_thread_regcache (thread, 1);
3920
3921 scoped_restore_current_thread restore_thread;
3922
3923 switch_to_thread (thread);
3924 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
3925
3926 for (CORE_ADDR pc : next_pcs)
3927 set_single_step_breakpoint (pc, current_ptid);
3928 }
3929
3930 int
3931 linux_process_target::single_step (lwp_info* lwp)
3932 {
3933 int step = 0;
3934
3935 if (supports_hardware_single_step ())
3936 {
3937 step = 1;
3938 }
3939 else if (supports_software_single_step ())
3940 {
3941 install_software_single_step_breakpoints (lwp);
3942 step = 0;
3943 }
3944 else
3945 threads_debug_printf ("stepping is not implemented on this target");
3946
3947 return step;
3948 }
3949
3950 /* The signal can be delivered to the inferior if we are not trying to
3951 finish a fast tracepoint collect. Since signal can be delivered in
3952 the step-over, the program may go to signal handler and trap again
3953 after return from the signal handler. We can live with the spurious
3954 double traps. */
3955
3956 static int
3957 lwp_signal_can_be_delivered (struct lwp_info *lwp)
3958 {
3959 return (lwp->collecting_fast_tracepoint
3960 == fast_tpoint_collect_result::not_collecting);
3961 }
3962
3963 void
3964 linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
3965 int signal, siginfo_t *info)
3966 {
3967 struct thread_info *thread = get_lwp_thread (lwp);
3968 int ptrace_request;
3969 struct process_info *proc = get_thread_process (thread);
3970
3971 /* Note that target description may not be initialised
3972 (proc->tdesc == NULL) at this point because the program hasn't
3973 stopped at the first instruction yet. It means GDBserver skips
3974 the extra traps from the wrapper program (see option --wrapper).
3975 Code in this function that requires register access should be
3976 guarded by proc->tdesc == NULL or something else. */
3977
3978 if (lwp->stopped == 0)
3979 return;
3980
3981 gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
3982
3983 fast_tpoint_collect_result fast_tp_collecting
3984 = lwp->collecting_fast_tracepoint;
3985
3986 gdb_assert (!stabilizing_threads
3987 || (fast_tp_collecting
3988 != fast_tpoint_collect_result::not_collecting));
3989
3990 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3991 user used the "jump" command, or "set $pc = foo"). */
3992 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
3993 {
3994 /* Collecting 'while-stepping' actions doesn't make sense
3995 anymore. */
3996 release_while_stepping_state_list (thread);
3997 }
3998
3999 /* If we have pending signals or status, and a new signal, enqueue the
4000 signal. Also enqueue the signal if it can't be delivered to the
4001 inferior right now. */
4002 if (signal != 0
4003 && (lwp->status_pending_p
4004 || !lwp->pending_signals.empty ()
4005 || !lwp_signal_can_be_delivered (lwp)))
4006 {
4007 enqueue_pending_signal (lwp, signal, info);
4008
4009 /* Postpone any pending signal. It was enqueued above. */
4010 signal = 0;
4011 }
4012
4013 if (lwp->status_pending_p)
4014 {
4015 threads_debug_printf
4016 ("Not resuming lwp %ld (%s, stop %s); has pending status",
4017 lwpid_of (thread), step ? "step" : "continue",
4018 lwp->stop_expected ? "expected" : "not expected");
4019 return;
4020 }
4021
4022 scoped_restore_current_thread restore_thread;
4023 switch_to_thread (thread);
4024
4025 /* This bit needs some thinking about. If we get a signal that
4026 we must report while a single-step reinsert is still pending,
4027 we often end up resuming the thread. It might be better to
4028 (ew) allow a stack of pending events; then we could be sure that
4029 the reinsert happened right away and not lose any signals.
4030
4031 Making this stack would also shrink the window in which breakpoints are
4032 uninserted (see comment in linux_wait_for_lwp) but not enough for
4033 complete correctness, so it won't solve that problem. It may be
4034 worthwhile just to solve this one, however. */
4035 if (lwp->bp_reinsert != 0)
4036 {
4037 threads_debug_printf (" pending reinsert at 0x%s",
4038 paddress (lwp->bp_reinsert));
4039
4040 if (supports_hardware_single_step ())
4041 {
4042 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4043 {
4044 if (step == 0)
4045 warning ("BAD - reinserting but not stepping.");
4046 if (lwp->suspended)
4047 warning ("BAD - reinserting and suspended(%d).",
4048 lwp->suspended);
4049 }
4050 }
4051
4052 step = maybe_hw_step (thread);
4053 }
4054
4055 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4056 threads_debug_printf
4057 ("lwp %ld wants to get out of fast tracepoint jump pad "
4058 "(exit-jump-pad-bkpt)", lwpid_of (thread));
4059
4060 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4061 {
4062 threads_debug_printf
4063 ("lwp %ld wants to get out of fast tracepoint jump pad single-stepping",
4064 lwpid_of (thread));
4065
4066 if (supports_hardware_single_step ())
4067 step = 1;
4068 else
4069 {
4070 internal_error (__FILE__, __LINE__,
4071 "moving out of jump pad single-stepping"
4072 " not implemented on this target");
4073 }
4074 }
4075
4076 /* If we have while-stepping actions in this thread set it stepping.
4077 If we have a signal to deliver, it may or may not be set to
4078 SIG_IGN, we don't know. Assume so, and allow collecting
4079 while-stepping into a signal handler. A possible smart thing to
4080 do would be to set an internal breakpoint at the signal return
4081 address, continue, and carry on catching this while-stepping
4082 action only when that breakpoint is hit. A future
4083 enhancement. */
4084 if (thread->while_stepping != NULL)
4085 {
4086 threads_debug_printf
4087 ("lwp %ld has a while-stepping action -> forcing step.",
4088 lwpid_of (thread));
4089
4090 step = single_step (lwp);
4091 }
4092
4093 if (proc->tdesc != NULL && low_supports_breakpoints ())
4094 {
4095 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4096
4097 lwp->stop_pc = low_get_pc (regcache);
4098
4099 threads_debug_printf (" %s from pc 0x%lx", step ? "step" : "continue",
4100 (long) lwp->stop_pc);
4101 }
4102
4103 /* If we have pending signals, consume one if it can be delivered to
4104 the inferior. */
4105 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
4106 {
4107 const pending_signal &p_sig = lwp->pending_signals.front ();
4108
4109 signal = p_sig.signal;
4110 if (p_sig.info.si_signo != 0)
4111 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4112 &p_sig.info);
4113
4114 lwp->pending_signals.pop_front ();
4115 }
4116
4117 threads_debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)",
4118 lwpid_of (thread), step ? "step" : "continue", signal,
4119 lwp->stop_expected ? "expected" : "not expected");
4120
4121 low_prepare_to_resume (lwp);
4122
4123 regcache_invalidate_thread (thread);
4124 errno = 0;
4125 lwp->stepping = step;
4126 if (step)
4127 ptrace_request = PTRACE_SINGLESTEP;
4128 else if (gdb_catching_syscalls_p (lwp))
4129 ptrace_request = PTRACE_SYSCALL;
4130 else
4131 ptrace_request = PTRACE_CONT;
4132 ptrace (ptrace_request,
4133 lwpid_of (thread),
4134 (PTRACE_TYPE_ARG3) 0,
4135 /* Coerce to a uintptr_t first to avoid potential gcc warning
4136 of coercing an 8 byte integer to a 4 byte pointer. */
4137 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4138
4139 if (errno)
4140 {
4141 int saved_errno = errno;
4142
4143 threads_debug_printf ("ptrace errno = %d (%s)",
4144 saved_errno, strerror (saved_errno));
4145
4146 errno = saved_errno;
4147 perror_with_name ("resuming thread");
4148 }
4149
4150 /* Successfully resumed. Clear state that no longer makes sense,
4151 and mark the LWP as running. Must not do this before resuming
4152 otherwise if that fails other code will be confused. E.g., we'd
4153 later try to stop the LWP and hang forever waiting for a stop
4154 status. Note that we must not throw after this is cleared,
4155 otherwise handle_zombie_lwp_error would get confused. */
4156 lwp->stopped = 0;
4157 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4158 }
4159
4160 void
4161 linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4162 {
4163 /* Nop. */
4164 }
4165
4166 /* Called when we try to resume a stopped LWP and that errors out. If
4167 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4168 or about to become), discard the error, clear any pending status
4169 the LWP may have, and return true (we'll collect the exit status
4170 soon enough). Otherwise, return false. */
4171
4172 static int
4173 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4174 {
4175 struct thread_info *thread = get_lwp_thread (lp);
4176
4177 /* If we get an error after resuming the LWP successfully, we'd
4178 confuse !T state for the LWP being gone. */
4179 gdb_assert (lp->stopped);
4180
4181 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4182 because even if ptrace failed with ESRCH, the tracee may be "not
4183 yet fully dead", but already refusing ptrace requests. In that
4184 case the tracee has 'R (Running)' state for a little bit
4185 (observed in Linux 3.18). See also the note on ESRCH in the
4186 ptrace(2) man page. Instead, check whether the LWP has any state
4187 other than ptrace-stopped. */
4188
4189 /* Don't assume anything if /proc/PID/status can't be read. */
4190 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4191 {
4192 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4193 lp->status_pending_p = 0;
4194 return 1;
4195 }
4196 return 0;
4197 }
4198
4199 void
4200 linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4201 siginfo_t *info)
4202 {
4203 try
4204 {
4205 resume_one_lwp_throw (lwp, step, signal, info);
4206 }
4207 catch (const gdb_exception_error &ex)
4208 {
4209 if (check_ptrace_stopped_lwp_gone (lwp))
4210 {
4211 /* This could because we tried to resume an LWP after its leader
4212 exited. Mark it as resumed, so we can collect an exit event
4213 from it. */
4214 lwp->stopped = 0;
4215 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4216 }
4217 else
4218 throw;
4219 }
4220 }
4221
4222 /* This function is called once per thread via for_each_thread.
4223 We look up which resume request applies to THREAD and mark it with a
4224 pointer to the appropriate resume request.
4225
4226 This algorithm is O(threads * resume elements), but resume elements
4227 is small (and will remain small at least until GDB supports thread
4228 suspension). */
4229
4230 static void
4231 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4232 {
4233 struct lwp_info *lwp = get_thread_lwp (thread);
4234
4235 for (int ndx = 0; ndx < n; ndx++)
4236 {
4237 ptid_t ptid = resume[ndx].thread;
4238 if (ptid == minus_one_ptid
4239 || ptid == thread->id
4240 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4241 of PID'. */
4242 || (ptid.pid () == pid_of (thread)
4243 && (ptid.is_pid ()
4244 || ptid.lwp () == -1)))
4245 {
4246 if (resume[ndx].kind == resume_stop
4247 && thread->last_resume_kind == resume_stop)
4248 {
4249 threads_debug_printf
4250 ("already %s LWP %ld at GDB's request",
4251 (thread->last_status.kind () == TARGET_WAITKIND_STOPPED
4252 ? "stopped" : "stopping"),
4253 lwpid_of (thread));
4254
4255 continue;
4256 }
4257
4258 /* Ignore (wildcard) resume requests for already-resumed
4259 threads. */
4260 if (resume[ndx].kind != resume_stop
4261 && thread->last_resume_kind != resume_stop)
4262 {
4263 threads_debug_printf
4264 ("already %s LWP %ld at GDB's request",
4265 (thread->last_resume_kind == resume_step
4266 ? "stepping" : "continuing"),
4267 lwpid_of (thread));
4268 continue;
4269 }
4270
4271 /* Don't let wildcard resumes resume fork children that GDB
4272 does not yet know are new fork children. */
4273 if (lwp->fork_relative != NULL)
4274 {
4275 struct lwp_info *rel = lwp->fork_relative;
4276
4277 if (rel->status_pending_p
4278 && (rel->waitstatus.kind () == TARGET_WAITKIND_FORKED
4279 || rel->waitstatus.kind () == TARGET_WAITKIND_VFORKED))
4280 {
4281 threads_debug_printf
4282 ("not resuming LWP %ld: has queued stop reply",
4283 lwpid_of (thread));
4284 continue;
4285 }
4286 }
4287
4288 /* If the thread has a pending event that has already been
4289 reported to GDBserver core, but GDB has not pulled the
4290 event out of the vStopped queue yet, likewise, ignore the
4291 (wildcard) resume request. */
4292 if (in_queued_stop_replies (thread->id))
4293 {
4294 threads_debug_printf
4295 ("not resuming LWP %ld: has queued stop reply",
4296 lwpid_of (thread));
4297 continue;
4298 }
4299
4300 lwp->resume = &resume[ndx];
4301 thread->last_resume_kind = lwp->resume->kind;
4302
4303 lwp->step_range_start = lwp->resume->step_range_start;
4304 lwp->step_range_end = lwp->resume->step_range_end;
4305
4306 /* If we had a deferred signal to report, dequeue one now.
4307 This can happen if LWP gets more than one signal while
4308 trying to get out of a jump pad. */
4309 if (lwp->stopped
4310 && !lwp->status_pending_p
4311 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4312 {
4313 lwp->status_pending_p = 1;
4314
4315 threads_debug_printf
4316 ("Dequeueing deferred signal %d for LWP %ld, "
4317 "leaving status pending.",
4318 WSTOPSIG (lwp->status_pending),
4319 lwpid_of (thread));
4320 }
4321
4322 return;
4323 }
4324 }
4325
4326 /* No resume action for this thread. */
4327 lwp->resume = NULL;
4328 }
4329
4330 bool
4331 linux_process_target::resume_status_pending (thread_info *thread)
4332 {
4333 struct lwp_info *lwp = get_thread_lwp (thread);
4334
4335 /* LWPs which will not be resumed are not interesting, because
4336 we might not wait for them next time through linux_wait. */
4337 if (lwp->resume == NULL)
4338 return false;
4339
4340 return thread_still_has_status_pending (thread);
4341 }
4342
4343 bool
4344 linux_process_target::thread_needs_step_over (thread_info *thread)
4345 {
4346 struct lwp_info *lwp = get_thread_lwp (thread);
4347 CORE_ADDR pc;
4348 struct process_info *proc = get_thread_process (thread);
4349
4350 /* GDBserver is skipping the extra traps from the wrapper program,
4351 don't have to do step over. */
4352 if (proc->tdesc == NULL)
4353 return false;
4354
4355 /* LWPs which will not be resumed are not interesting, because we
4356 might not wait for them next time through linux_wait. */
4357
4358 if (!lwp->stopped)
4359 {
4360 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped",
4361 lwpid_of (thread));
4362 return false;
4363 }
4364
4365 if (thread->last_resume_kind == resume_stop)
4366 {
4367 threads_debug_printf
4368 ("Need step over [LWP %ld]? Ignoring, should remain stopped",
4369 lwpid_of (thread));
4370 return false;
4371 }
4372
4373 gdb_assert (lwp->suspended >= 0);
4374
4375 if (lwp->suspended)
4376 {
4377 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, suspended",
4378 lwpid_of (thread));
4379 return false;
4380 }
4381
4382 if (lwp->status_pending_p)
4383 {
4384 threads_debug_printf
4385 ("Need step over [LWP %ld]? Ignoring, has pending status.",
4386 lwpid_of (thread));
4387 return false;
4388 }
4389
4390 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4391 or we have. */
4392 pc = get_pc (lwp);
4393
4394 /* If the PC has changed since we stopped, then don't do anything,
4395 and let the breakpoint/tracepoint be hit. This happens if, for
4396 instance, GDB handled the decr_pc_after_break subtraction itself,
4397 GDB is OOL stepping this thread, or the user has issued a "jump"
4398 command, or poked thread's registers herself. */
4399 if (pc != lwp->stop_pc)
4400 {
4401 threads_debug_printf
4402 ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4403 "Old stop_pc was 0x%s, PC is now 0x%s", lwpid_of (thread),
4404 paddress (lwp->stop_pc), paddress (pc));
4405 return false;
4406 }
4407
4408 /* On software single step target, resume the inferior with signal
4409 rather than stepping over. */
4410 if (supports_software_single_step ()
4411 && !lwp->pending_signals.empty ()
4412 && lwp_signal_can_be_delivered (lwp))
4413 {
4414 threads_debug_printf
4415 ("Need step over [LWP %ld]? Ignoring, has pending signals.",
4416 lwpid_of (thread));
4417
4418 return false;
4419 }
4420
4421 scoped_restore_current_thread restore_thread;
4422 switch_to_thread (thread);
4423
4424 /* We can only step over breakpoints we know about. */
4425 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4426 {
4427 /* Don't step over a breakpoint that GDB expects to hit
4428 though. If the condition is being evaluated on the target's side
4429 and it evaluate to false, step over this breakpoint as well. */
4430 if (gdb_breakpoint_here (pc)
4431 && gdb_condition_true_at_breakpoint (pc)
4432 && gdb_no_commands_at_breakpoint (pc))
4433 {
4434 threads_debug_printf ("Need step over [LWP %ld]? yes, but found"
4435 " GDB breakpoint at 0x%s; skipping step over",
4436 lwpid_of (thread), paddress (pc));
4437
4438 return false;
4439 }
4440 else
4441 {
4442 threads_debug_printf ("Need step over [LWP %ld]? yes, "
4443 "found breakpoint at 0x%s",
4444 lwpid_of (thread), paddress (pc));
4445
4446 /* We've found an lwp that needs stepping over --- return 1 so
4447 that find_thread stops looking. */
4448 return true;
4449 }
4450 }
4451
4452 threads_debug_printf
4453 ("Need step over [LWP %ld]? No, no breakpoint found at 0x%s",
4454 lwpid_of (thread), paddress (pc));
4455
4456 return false;
4457 }
4458
4459 void
4460 linux_process_target::start_step_over (lwp_info *lwp)
4461 {
4462 struct thread_info *thread = get_lwp_thread (lwp);
4463 CORE_ADDR pc;
4464
4465 threads_debug_printf ("Starting step-over on LWP %ld. Stopping all threads",
4466 lwpid_of (thread));
4467
4468 stop_all_lwps (1, lwp);
4469
4470 if (lwp->suspended != 0)
4471 {
4472 internal_error (__FILE__, __LINE__,
4473 "LWP %ld suspended=%d\n", lwpid_of (thread),
4474 lwp->suspended);
4475 }
4476
4477 threads_debug_printf ("Done stopping all threads for step-over.");
4478
4479 /* Note, we should always reach here with an already adjusted PC,
4480 either by GDB (if we're resuming due to GDB's request), or by our
4481 caller, if we just finished handling an internal breakpoint GDB
4482 shouldn't care about. */
4483 pc = get_pc (lwp);
4484
4485 bool step = false;
4486 {
4487 scoped_restore_current_thread restore_thread;
4488 switch_to_thread (thread);
4489
4490 lwp->bp_reinsert = pc;
4491 uninsert_breakpoints_at (pc);
4492 uninsert_fast_tracepoint_jumps_at (pc);
4493
4494 step = single_step (lwp);
4495 }
4496
4497 resume_one_lwp (lwp, step, 0, NULL);
4498
4499 /* Require next event from this LWP. */
4500 step_over_bkpt = thread->id;
4501 }
4502
4503 bool
4504 linux_process_target::finish_step_over (lwp_info *lwp)
4505 {
4506 if (lwp->bp_reinsert != 0)
4507 {
4508 scoped_restore_current_thread restore_thread;
4509
4510 threads_debug_printf ("Finished step over.");
4511
4512 switch_to_thread (get_lwp_thread (lwp));
4513
4514 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4515 may be no breakpoint to reinsert there by now. */
4516 reinsert_breakpoints_at (lwp->bp_reinsert);
4517 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4518
4519 lwp->bp_reinsert = 0;
4520
4521 /* Delete any single-step breakpoints. No longer needed. We
4522 don't have to worry about other threads hitting this trap,
4523 and later not being able to explain it, because we were
4524 stepping over a breakpoint, and we hold all threads but
4525 LWP stopped while doing that. */
4526 if (!supports_hardware_single_step ())
4527 {
4528 gdb_assert (has_single_step_breakpoints (current_thread));
4529 delete_single_step_breakpoints (current_thread);
4530 }
4531
4532 step_over_bkpt = null_ptid;
4533 return true;
4534 }
4535 else
4536 return false;
4537 }
4538
4539 void
4540 linux_process_target::complete_ongoing_step_over ()
4541 {
4542 if (step_over_bkpt != null_ptid)
4543 {
4544 struct lwp_info *lwp;
4545 int wstat;
4546 int ret;
4547
4548 threads_debug_printf ("detach: step over in progress, finish it first");
4549
4550 /* Passing NULL_PTID as filter indicates we want all events to
4551 be left pending. Eventually this returns when there are no
4552 unwaited-for children left. */
4553 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4554 __WALL);
4555 gdb_assert (ret == -1);
4556
4557 lwp = find_lwp_pid (step_over_bkpt);
4558 if (lwp != NULL)
4559 {
4560 finish_step_over (lwp);
4561
4562 /* If we got our step SIGTRAP, don't leave it pending,
4563 otherwise we would report it to GDB as a spurious
4564 SIGTRAP. */
4565 gdb_assert (lwp->status_pending_p);
4566 if (WIFSTOPPED (lwp->status_pending)
4567 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4568 {
4569 thread_info *thread = get_lwp_thread (lwp);
4570 if (thread->last_resume_kind != resume_step)
4571 {
4572 threads_debug_printf ("detach: discard step-over SIGTRAP");
4573
4574 lwp->status_pending_p = 0;
4575 lwp->status_pending = 0;
4576 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4577 }
4578 else
4579 threads_debug_printf
4580 ("detach: resume_step, not discarding step-over SIGTRAP");
4581 }
4582 }
4583 step_over_bkpt = null_ptid;
4584 unsuspend_all_lwps (lwp);
4585 }
4586 }
4587
4588 void
4589 linux_process_target::resume_one_thread (thread_info *thread,
4590 bool leave_all_stopped)
4591 {
4592 struct lwp_info *lwp = get_thread_lwp (thread);
4593 int leave_pending;
4594
4595 if (lwp->resume == NULL)
4596 return;
4597
4598 if (lwp->resume->kind == resume_stop)
4599 {
4600 threads_debug_printf ("resume_stop request for LWP %ld",
4601 lwpid_of (thread));
4602
4603 if (!lwp->stopped)
4604 {
4605 threads_debug_printf ("stopping LWP %ld", lwpid_of (thread));
4606
4607 /* Stop the thread, and wait for the event asynchronously,
4608 through the event loop. */
4609 send_sigstop (lwp);
4610 }
4611 else
4612 {
4613 threads_debug_printf ("already stopped LWP %ld", lwpid_of (thread));
4614
4615 /* The LWP may have been stopped in an internal event that
4616 was not meant to be notified back to GDB (e.g., gdbserver
4617 breakpoint), so we should be reporting a stop event in
4618 this case too. */
4619
4620 /* If the thread already has a pending SIGSTOP, this is a
4621 no-op. Otherwise, something later will presumably resume
4622 the thread and this will cause it to cancel any pending
4623 operation, due to last_resume_kind == resume_stop. If
4624 the thread already has a pending status to report, we
4625 will still report it the next time we wait - see
4626 status_pending_p_callback. */
4627
4628 /* If we already have a pending signal to report, then
4629 there's no need to queue a SIGSTOP, as this means we're
4630 midway through moving the LWP out of the jumppad, and we
4631 will report the pending signal as soon as that is
4632 finished. */
4633 if (lwp->pending_signals_to_report.empty ())
4634 send_sigstop (lwp);
4635 }
4636
4637 /* For stop requests, we're done. */
4638 lwp->resume = NULL;
4639 thread->last_status.set_ignore ();
4640 return;
4641 }
4642
4643 /* If this thread which is about to be resumed has a pending status,
4644 then don't resume it - we can just report the pending status.
4645 Likewise if it is suspended, because e.g., another thread is
4646 stepping past a breakpoint. Make sure to queue any signals that
4647 would otherwise be sent. In all-stop mode, we do this decision
4648 based on if *any* thread has a pending status. If there's a
4649 thread that needs the step-over-breakpoint dance, then don't
4650 resume any other thread but that particular one. */
4651 leave_pending = (lwp->suspended
4652 || lwp->status_pending_p
4653 || leave_all_stopped);
4654
4655 /* If we have a new signal, enqueue the signal. */
4656 if (lwp->resume->sig != 0)
4657 {
4658 siginfo_t info, *info_p;
4659
4660 /* If this is the same signal we were previously stopped by,
4661 make sure to queue its siginfo. */
4662 if (WIFSTOPPED (lwp->last_status)
4663 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4664 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4665 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4666 info_p = &info;
4667 else
4668 info_p = NULL;
4669
4670 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4671 }
4672
4673 if (!leave_pending)
4674 {
4675 threads_debug_printf ("resuming LWP %ld", lwpid_of (thread));
4676
4677 proceed_one_lwp (thread, NULL);
4678 }
4679 else
4680 threads_debug_printf ("leaving LWP %ld stopped", lwpid_of (thread));
4681
4682 thread->last_status.set_ignore ();
4683 lwp->resume = NULL;
4684 }
4685
4686 void
4687 linux_process_target::resume (thread_resume *resume_info, size_t n)
4688 {
4689 struct thread_info *need_step_over = NULL;
4690
4691 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4692
4693 for_each_thread ([&] (thread_info *thread)
4694 {
4695 linux_set_resume_request (thread, resume_info, n);
4696 });
4697
4698 /* If there is a thread which would otherwise be resumed, which has
4699 a pending status, then don't resume any threads - we can just
4700 report the pending status. Make sure to queue any signals that
4701 would otherwise be sent. In non-stop mode, we'll apply this
4702 logic to each thread individually. We consume all pending events
4703 before considering to start a step-over (in all-stop). */
4704 bool any_pending = false;
4705 if (!non_stop)
4706 any_pending = find_thread ([this] (thread_info *thread)
4707 {
4708 return resume_status_pending (thread);
4709 }) != nullptr;
4710
4711 /* If there is a thread which would otherwise be resumed, which is
4712 stopped at a breakpoint that needs stepping over, then don't
4713 resume any threads - have it step over the breakpoint with all
4714 other threads stopped, then resume all threads again. Make sure
4715 to queue any signals that would otherwise be delivered or
4716 queued. */
4717 if (!any_pending && low_supports_breakpoints ())
4718 need_step_over = find_thread ([this] (thread_info *thread)
4719 {
4720 return thread_needs_step_over (thread);
4721 });
4722
4723 bool leave_all_stopped = (need_step_over != NULL || any_pending);
4724
4725 if (need_step_over != NULL)
4726 threads_debug_printf ("Not resuming all, need step over");
4727 else if (any_pending)
4728 threads_debug_printf ("Not resuming, all-stop and found "
4729 "an LWP with pending status");
4730 else
4731 threads_debug_printf ("Resuming, no pending status or step over needed");
4732
4733 /* Even if we're leaving threads stopped, queue all signals we'd
4734 otherwise deliver. */
4735 for_each_thread ([&] (thread_info *thread)
4736 {
4737 resume_one_thread (thread, leave_all_stopped);
4738 });
4739
4740 if (need_step_over)
4741 start_step_over (get_thread_lwp (need_step_over));
4742
4743 /* We may have events that were pending that can/should be sent to
4744 the client now. Trigger a linux_wait call. */
4745 if (target_is_async_p ())
4746 async_file_mark ();
4747 }
4748
4749 void
4750 linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
4751 {
4752 struct lwp_info *lwp = get_thread_lwp (thread);
4753 int step;
4754
4755 if (lwp == except)
4756 return;
4757
4758 threads_debug_printf ("lwp %ld", lwpid_of (thread));
4759
4760 if (!lwp->stopped)
4761 {
4762 threads_debug_printf (" LWP %ld already running", lwpid_of (thread));
4763 return;
4764 }
4765
4766 if (thread->last_resume_kind == resume_stop
4767 && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
4768 {
4769 threads_debug_printf (" client wants LWP to remain %ld stopped",
4770 lwpid_of (thread));
4771 return;
4772 }
4773
4774 if (lwp->status_pending_p)
4775 {
4776 threads_debug_printf (" LWP %ld has pending status, leaving stopped",
4777 lwpid_of (thread));
4778 return;
4779 }
4780
4781 gdb_assert (lwp->suspended >= 0);
4782
4783 if (lwp->suspended)
4784 {
4785 threads_debug_printf (" LWP %ld is suspended", lwpid_of (thread));
4786 return;
4787 }
4788
4789 if (thread->last_resume_kind == resume_stop
4790 && lwp->pending_signals_to_report.empty ()
4791 && (lwp->collecting_fast_tracepoint
4792 == fast_tpoint_collect_result::not_collecting))
4793 {
4794 /* We haven't reported this LWP as stopped yet (otherwise, the
4795 last_status.kind check above would catch it, and we wouldn't
4796 reach here. This LWP may have been momentarily paused by a
4797 stop_all_lwps call while handling for example, another LWP's
4798 step-over. In that case, the pending expected SIGSTOP signal
4799 that was queued at vCont;t handling time will have already
4800 been consumed by wait_for_sigstop, and so we need to requeue
4801 another one here. Note that if the LWP already has a SIGSTOP
4802 pending, this is a no-op. */
4803
4804 threads_debug_printf
4805 ("Client wants LWP %ld to stop. Making sure it has a SIGSTOP pending",
4806 lwpid_of (thread));
4807
4808 send_sigstop (lwp);
4809 }
4810
4811 if (thread->last_resume_kind == resume_step)
4812 {
4813 threads_debug_printf (" stepping LWP %ld, client wants it stepping",
4814 lwpid_of (thread));
4815
4816 /* If resume_step is requested by GDB, install single-step
4817 breakpoints when the thread is about to be actually resumed if
4818 the single-step breakpoints weren't removed. */
4819 if (supports_software_single_step ()
4820 && !has_single_step_breakpoints (thread))
4821 install_software_single_step_breakpoints (lwp);
4822
4823 step = maybe_hw_step (thread);
4824 }
4825 else if (lwp->bp_reinsert != 0)
4826 {
4827 threads_debug_printf (" stepping LWP %ld, reinsert set",
4828 lwpid_of (thread));
4829
4830 step = maybe_hw_step (thread);
4831 }
4832 else
4833 step = 0;
4834
4835 resume_one_lwp (lwp, step, 0, NULL);
4836 }
4837
4838 void
4839 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4840 lwp_info *except)
4841 {
4842 struct lwp_info *lwp = get_thread_lwp (thread);
4843
4844 if (lwp == except)
4845 return;
4846
4847 lwp_suspended_decr (lwp);
4848
4849 proceed_one_lwp (thread, except);
4850 }
4851
4852 void
4853 linux_process_target::proceed_all_lwps ()
4854 {
4855 struct thread_info *need_step_over;
4856
4857 /* If there is a thread which would otherwise be resumed, which is
4858 stopped at a breakpoint that needs stepping over, then don't
4859 resume any threads - have it step over the breakpoint with all
4860 other threads stopped, then resume all threads again. */
4861
4862 if (low_supports_breakpoints ())
4863 {
4864 need_step_over = find_thread ([this] (thread_info *thread)
4865 {
4866 return thread_needs_step_over (thread);
4867 });
4868
4869 if (need_step_over != NULL)
4870 {
4871 threads_debug_printf ("found thread %ld needing a step-over",
4872 lwpid_of (need_step_over));
4873
4874 start_step_over (get_thread_lwp (need_step_over));
4875 return;
4876 }
4877 }
4878
4879 threads_debug_printf ("Proceeding, no step-over needed");
4880
4881 for_each_thread ([this] (thread_info *thread)
4882 {
4883 proceed_one_lwp (thread, NULL);
4884 });
4885 }
4886
4887 void
4888 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
4889 {
4890 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4891
4892 if (except)
4893 threads_debug_printf ("except=(LWP %ld)",
4894 lwpid_of (get_lwp_thread (except)));
4895 else
4896 threads_debug_printf ("except=nullptr");
4897
4898 if (unsuspend)
4899 for_each_thread ([&] (thread_info *thread)
4900 {
4901 unsuspend_and_proceed_one_lwp (thread, except);
4902 });
4903 else
4904 for_each_thread ([&] (thread_info *thread)
4905 {
4906 proceed_one_lwp (thread, except);
4907 });
4908 }
4909
4910
4911 #ifdef HAVE_LINUX_REGSETS
4912
4913 #define use_linux_regsets 1
4914
4915 /* Returns true if REGSET has been disabled. */
4916
4917 static int
4918 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4919 {
4920 return (info->disabled_regsets != NULL
4921 && info->disabled_regsets[regset - info->regsets]);
4922 }
4923
4924 /* Disable REGSET. */
4925
4926 static void
4927 disable_regset (struct regsets_info *info, struct regset_info *regset)
4928 {
4929 int dr_offset;
4930
4931 dr_offset = regset - info->regsets;
4932 if (info->disabled_regsets == NULL)
4933 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
4934 info->disabled_regsets[dr_offset] = 1;
4935 }
4936
4937 static int
4938 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4939 struct regcache *regcache)
4940 {
4941 struct regset_info *regset;
4942 int saw_general_regs = 0;
4943 int pid;
4944 struct iovec iov;
4945
4946 pid = lwpid_of (current_thread);
4947 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4948 {
4949 void *buf, *data;
4950 int nt_type, res;
4951
4952 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4953 continue;
4954
4955 buf = xmalloc (regset->size);
4956
4957 nt_type = regset->nt_type;
4958 if (nt_type)
4959 {
4960 iov.iov_base = buf;
4961 iov.iov_len = regset->size;
4962 data = (void *) &iov;
4963 }
4964 else
4965 data = buf;
4966
4967 #ifndef __sparc__
4968 res = ptrace (regset->get_request, pid,
4969 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4970 #else
4971 res = ptrace (regset->get_request, pid, data, nt_type);
4972 #endif
4973 if (res < 0)
4974 {
4975 if (errno == EIO
4976 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
4977 {
4978 /* If we get EIO on a regset, or an EINVAL and the regset is
4979 optional, do not try it again for this process mode. */
4980 disable_regset (regsets_info, regset);
4981 }
4982 else if (errno == ENODATA)
4983 {
4984 /* ENODATA may be returned if the regset is currently
4985 not "active". This can happen in normal operation,
4986 so suppress the warning in this case. */
4987 }
4988 else if (errno == ESRCH)
4989 {
4990 /* At this point, ESRCH should mean the process is
4991 already gone, in which case we simply ignore attempts
4992 to read its registers. */
4993 }
4994 else
4995 {
4996 char s[256];
4997 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4998 pid);
4999 perror (s);
5000 }
5001 }
5002 else
5003 {
5004 if (regset->type == GENERAL_REGS)
5005 saw_general_regs = 1;
5006 regset->store_function (regcache, buf);
5007 }
5008 free (buf);
5009 }
5010 if (saw_general_regs)
5011 return 0;
5012 else
5013 return 1;
5014 }
5015
5016 static int
5017 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5018 struct regcache *regcache)
5019 {
5020 struct regset_info *regset;
5021 int saw_general_regs = 0;
5022 int pid;
5023 struct iovec iov;
5024
5025 pid = lwpid_of (current_thread);
5026 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5027 {
5028 void *buf, *data;
5029 int nt_type, res;
5030
5031 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5032 || regset->fill_function == NULL)
5033 continue;
5034
5035 buf = xmalloc (regset->size);
5036
5037 /* First fill the buffer with the current register set contents,
5038 in case there are any items in the kernel's regset that are
5039 not in gdbserver's regcache. */
5040
5041 nt_type = regset->nt_type;
5042 if (nt_type)
5043 {
5044 iov.iov_base = buf;
5045 iov.iov_len = regset->size;
5046 data = (void *) &iov;
5047 }
5048 else
5049 data = buf;
5050
5051 #ifndef __sparc__
5052 res = ptrace (regset->get_request, pid,
5053 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5054 #else
5055 res = ptrace (regset->get_request, pid, data, nt_type);
5056 #endif
5057
5058 if (res == 0)
5059 {
5060 /* Then overlay our cached registers on that. */
5061 regset->fill_function (regcache, buf);
5062
5063 /* Only now do we write the register set. */
5064 #ifndef __sparc__
5065 res = ptrace (regset->set_request, pid,
5066 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5067 #else
5068 res = ptrace (regset->set_request, pid, data, nt_type);
5069 #endif
5070 }
5071
5072 if (res < 0)
5073 {
5074 if (errno == EIO
5075 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5076 {
5077 /* If we get EIO on a regset, or an EINVAL and the regset is
5078 optional, do not try it again for this process mode. */
5079 disable_regset (regsets_info, regset);
5080 }
5081 else if (errno == ESRCH)
5082 {
5083 /* At this point, ESRCH should mean the process is
5084 already gone, in which case we simply ignore attempts
5085 to change its registers. See also the related
5086 comment in resume_one_lwp. */
5087 free (buf);
5088 return 0;
5089 }
5090 else
5091 {
5092 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5093 }
5094 }
5095 else if (regset->type == GENERAL_REGS)
5096 saw_general_regs = 1;
5097 free (buf);
5098 }
5099 if (saw_general_regs)
5100 return 0;
5101 else
5102 return 1;
5103 }
5104
5105 #else /* !HAVE_LINUX_REGSETS */
5106
5107 #define use_linux_regsets 0
5108 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5109 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5110
5111 #endif
5112
5113 /* Return 1 if register REGNO is supported by one of the regset ptrace
5114 calls or 0 if it has to be transferred individually. */
5115
5116 static int
5117 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5118 {
5119 unsigned char mask = 1 << (regno % 8);
5120 size_t index = regno / 8;
5121
5122 return (use_linux_regsets
5123 && (regs_info->regset_bitmap == NULL
5124 || (regs_info->regset_bitmap[index] & mask) != 0));
5125 }
5126
5127 #ifdef HAVE_LINUX_USRREGS
5128
5129 static int
5130 register_addr (const struct usrregs_info *usrregs, int regnum)
5131 {
5132 int addr;
5133
5134 if (regnum < 0 || regnum >= usrregs->num_regs)
5135 error ("Invalid register number %d.", regnum);
5136
5137 addr = usrregs->regmap[regnum];
5138
5139 return addr;
5140 }
5141
5142
5143 void
5144 linux_process_target::fetch_register (const usrregs_info *usrregs,
5145 regcache *regcache, int regno)
5146 {
5147 CORE_ADDR regaddr;
5148 int i, size;
5149 char *buf;
5150 int pid;
5151
5152 if (regno >= usrregs->num_regs)
5153 return;
5154 if (low_cannot_fetch_register (regno))
5155 return;
5156
5157 regaddr = register_addr (usrregs, regno);
5158 if (regaddr == -1)
5159 return;
5160
5161 size = ((register_size (regcache->tdesc, regno)
5162 + sizeof (PTRACE_XFER_TYPE) - 1)
5163 & -sizeof (PTRACE_XFER_TYPE));
5164 buf = (char *) alloca (size);
5165
5166 pid = lwpid_of (current_thread);
5167 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5168 {
5169 errno = 0;
5170 *(PTRACE_XFER_TYPE *) (buf + i) =
5171 ptrace (PTRACE_PEEKUSER, pid,
5172 /* Coerce to a uintptr_t first to avoid potential gcc warning
5173 of coercing an 8 byte integer to a 4 byte pointer. */
5174 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5175 regaddr += sizeof (PTRACE_XFER_TYPE);
5176 if (errno != 0)
5177 {
5178 /* Mark register REGNO unavailable. */
5179 supply_register (regcache, regno, NULL);
5180 return;
5181 }
5182 }
5183
5184 low_supply_ptrace_register (regcache, regno, buf);
5185 }
5186
5187 void
5188 linux_process_target::store_register (const usrregs_info *usrregs,
5189 regcache *regcache, int regno)
5190 {
5191 CORE_ADDR regaddr;
5192 int i, size;
5193 char *buf;
5194 int pid;
5195
5196 if (regno >= usrregs->num_regs)
5197 return;
5198 if (low_cannot_store_register (regno))
5199 return;
5200
5201 regaddr = register_addr (usrregs, regno);
5202 if (regaddr == -1)
5203 return;
5204
5205 size = ((register_size (regcache->tdesc, regno)
5206 + sizeof (PTRACE_XFER_TYPE) - 1)
5207 & -sizeof (PTRACE_XFER_TYPE));
5208 buf = (char *) alloca (size);
5209 memset (buf, 0, size);
5210
5211 low_collect_ptrace_register (regcache, regno, buf);
5212
5213 pid = lwpid_of (current_thread);
5214 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5215 {
5216 errno = 0;
5217 ptrace (PTRACE_POKEUSER, pid,
5218 /* Coerce to a uintptr_t first to avoid potential gcc warning
5219 about coercing an 8 byte integer to a 4 byte pointer. */
5220 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5221 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5222 if (errno != 0)
5223 {
5224 /* At this point, ESRCH should mean the process is
5225 already gone, in which case we simply ignore attempts
5226 to change its registers. See also the related
5227 comment in resume_one_lwp. */
5228 if (errno == ESRCH)
5229 return;
5230
5231
5232 if (!low_cannot_store_register (regno))
5233 error ("writing register %d: %s", regno, safe_strerror (errno));
5234 }
5235 regaddr += sizeof (PTRACE_XFER_TYPE);
5236 }
5237 }
5238 #endif /* HAVE_LINUX_USRREGS */
5239
5240 void
5241 linux_process_target::low_collect_ptrace_register (regcache *regcache,
5242 int regno, char *buf)
5243 {
5244 collect_register (regcache, regno, buf);
5245 }
5246
5247 void
5248 linux_process_target::low_supply_ptrace_register (regcache *regcache,
5249 int regno, const char *buf)
5250 {
5251 supply_register (regcache, regno, buf);
5252 }
5253
5254 void
5255 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5256 regcache *regcache,
5257 int regno, int all)
5258 {
5259 #ifdef HAVE_LINUX_USRREGS
5260 struct usrregs_info *usr = regs_info->usrregs;
5261
5262 if (regno == -1)
5263 {
5264 for (regno = 0; regno < usr->num_regs; regno++)
5265 if (all || !linux_register_in_regsets (regs_info, regno))
5266 fetch_register (usr, regcache, regno);
5267 }
5268 else
5269 fetch_register (usr, regcache, regno);
5270 #endif
5271 }
5272
5273 void
5274 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5275 regcache *regcache,
5276 int regno, int all)
5277 {
5278 #ifdef HAVE_LINUX_USRREGS
5279 struct usrregs_info *usr = regs_info->usrregs;
5280
5281 if (regno == -1)
5282 {
5283 for (regno = 0; regno < usr->num_regs; regno++)
5284 if (all || !linux_register_in_regsets (regs_info, regno))
5285 store_register (usr, regcache, regno);
5286 }
5287 else
5288 store_register (usr, regcache, regno);
5289 #endif
5290 }
5291
5292 void
5293 linux_process_target::fetch_registers (regcache *regcache, int regno)
5294 {
5295 int use_regsets;
5296 int all = 0;
5297 const regs_info *regs_info = get_regs_info ();
5298
5299 if (regno == -1)
5300 {
5301 if (regs_info->usrregs != NULL)
5302 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5303 low_fetch_register (regcache, regno);
5304
5305 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5306 if (regs_info->usrregs != NULL)
5307 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5308 }
5309 else
5310 {
5311 if (low_fetch_register (regcache, regno))
5312 return;
5313
5314 use_regsets = linux_register_in_regsets (regs_info, regno);
5315 if (use_regsets)
5316 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5317 regcache);
5318 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5319 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5320 }
5321 }
5322
5323 void
5324 linux_process_target::store_registers (regcache *regcache, int regno)
5325 {
5326 int use_regsets;
5327 int all = 0;
5328 const regs_info *regs_info = get_regs_info ();
5329
5330 if (regno == -1)
5331 {
5332 all = regsets_store_inferior_registers (regs_info->regsets_info,
5333 regcache);
5334 if (regs_info->usrregs != NULL)
5335 usr_store_inferior_registers (regs_info, regcache, regno, all);
5336 }
5337 else
5338 {
5339 use_regsets = linux_register_in_regsets (regs_info, regno);
5340 if (use_regsets)
5341 all = regsets_store_inferior_registers (regs_info->regsets_info,
5342 regcache);
5343 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5344 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5345 }
5346 }
5347
5348 bool
5349 linux_process_target::low_fetch_register (regcache *regcache, int regno)
5350 {
5351 return false;
5352 }
5353
5354 /* A wrapper for the read_memory target op. */
5355
5356 static int
5357 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5358 {
5359 return the_target->read_memory (memaddr, myaddr, len);
5360 }
5361
5362
5363 /* Helper for read_memory/write_memory using /proc/PID/mem. Because
5364 we can use a single read/write call, this can be much more
5365 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
5366 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running threads.
5367 One an only one of READBUF and WRITEBUF is non-null. If READBUF is
5368 not null, then we're reading, otherwise we're writing. */
5369
5370 static int
5371 proc_xfer_memory (CORE_ADDR memaddr, unsigned char *readbuf,
5372 const gdb_byte *writebuf, int len)
5373 {
5374 gdb_assert ((readbuf == nullptr) != (writebuf == nullptr));
5375
5376 process_info *proc = current_process ();
5377
5378 int fd = proc->priv->mem_fd;
5379 if (fd == -1)
5380 return EIO;
5381
5382 while (len > 0)
5383 {
5384 int bytes;
5385
5386 /* If pread64 is available, use it. It's faster if the kernel
5387 supports it (only one syscall), and it's 64-bit safe even on
5388 32-bit platforms (for instance, SPARC debugging a SPARC64
5389 application). */
5390 #ifdef HAVE_PREAD64
5391 bytes = (readbuf != nullptr
5392 ? pread64 (fd, readbuf, len, memaddr)
5393 : pwrite64 (fd, writebuf, len, memaddr));
5394 #else
5395 bytes = -1;
5396 if (lseek (fd, memaddr, SEEK_SET) != -1)
5397 bytes = (readbuf != nullptr
5398 ? read (fd, readbuf, len)
5399 ? write (fd, writebuf, len));
5400 #endif
5401
5402 if (bytes < 0)
5403 return errno;
5404 else if (bytes == 0)
5405 {
5406 /* EOF means the address space is gone, the whole process
5407 exited or execed. */
5408 return EIO;
5409 }
5410
5411 memaddr += bytes;
5412 if (readbuf != nullptr)
5413 readbuf += bytes;
5414 else
5415 writebuf += bytes;
5416 len -= bytes;
5417 }
5418
5419 return 0;
5420 }
5421
5422 int
5423 linux_process_target::read_memory (CORE_ADDR memaddr,
5424 unsigned char *myaddr, int len)
5425 {
5426 return proc_xfer_memory (memaddr, myaddr, nullptr, len);
5427 }
5428
5429 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5430 memory at MEMADDR. On failure (cannot write to the inferior)
5431 returns the value of errno. Always succeeds if LEN is zero. */
5432
5433 int
5434 linux_process_target::write_memory (CORE_ADDR memaddr,
5435 const unsigned char *myaddr, int len)
5436 {
5437 if (debug_threads)
5438 {
5439 /* Dump up to four bytes. */
5440 char str[4 * 2 + 1];
5441 char *p = str;
5442 int dump = len < 4 ? len : 4;
5443
5444 for (int i = 0; i < dump; i++)
5445 {
5446 sprintf (p, "%02x", myaddr[i]);
5447 p += 2;
5448 }
5449 *p = '\0';
5450
5451 threads_debug_printf ("Writing %s to 0x%08lx in process %d",
5452 str, (long) memaddr, current_process ()->pid);
5453 }
5454
5455 return proc_xfer_memory (memaddr, nullptr, myaddr, len);
5456 }
5457
5458 void
5459 linux_process_target::look_up_symbols ()
5460 {
5461 #ifdef USE_THREAD_DB
5462 struct process_info *proc = current_process ();
5463
5464 if (proc->priv->thread_db != NULL)
5465 return;
5466
5467 thread_db_init ();
5468 #endif
5469 }
5470
5471 void
5472 linux_process_target::request_interrupt ()
5473 {
5474 /* Send a SIGINT to the process group. This acts just like the user
5475 typed a ^C on the controlling terminal. */
5476 ::kill (-signal_pid, SIGINT);
5477 }
5478
5479 bool
5480 linux_process_target::supports_read_auxv ()
5481 {
5482 return true;
5483 }
5484
5485 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5486 to debugger memory starting at MYADDR. */
5487
5488 int
5489 linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5490 unsigned int len)
5491 {
5492 char filename[PATH_MAX];
5493 int fd, n;
5494 int pid = lwpid_of (current_thread);
5495
5496 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5497
5498 fd = open (filename, O_RDONLY);
5499 if (fd < 0)
5500 return -1;
5501
5502 if (offset != (CORE_ADDR) 0
5503 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5504 n = -1;
5505 else
5506 n = read (fd, myaddr, len);
5507
5508 close (fd);
5509
5510 return n;
5511 }
5512
5513 int
5514 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5515 int size, raw_breakpoint *bp)
5516 {
5517 if (type == raw_bkpt_type_sw)
5518 return insert_memory_breakpoint (bp);
5519 else
5520 return low_insert_point (type, addr, size, bp);
5521 }
5522
5523 int
5524 linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5525 int size, raw_breakpoint *bp)
5526 {
5527 /* Unsupported (see target.h). */
5528 return 1;
5529 }
5530
5531 int
5532 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5533 int size, raw_breakpoint *bp)
5534 {
5535 if (type == raw_bkpt_type_sw)
5536 return remove_memory_breakpoint (bp);
5537 else
5538 return low_remove_point (type, addr, size, bp);
5539 }
5540
5541 int
5542 linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5543 int size, raw_breakpoint *bp)
5544 {
5545 /* Unsupported (see target.h). */
5546 return 1;
5547 }
5548
5549 /* Implement the stopped_by_sw_breakpoint target_ops
5550 method. */
5551
5552 bool
5553 linux_process_target::stopped_by_sw_breakpoint ()
5554 {
5555 struct lwp_info *lwp = get_thread_lwp (current_thread);
5556
5557 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5558 }
5559
5560 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5561 method. */
5562
5563 bool
5564 linux_process_target::supports_stopped_by_sw_breakpoint ()
5565 {
5566 return USE_SIGTRAP_SIGINFO;
5567 }
5568
5569 /* Implement the stopped_by_hw_breakpoint target_ops
5570 method. */
5571
5572 bool
5573 linux_process_target::stopped_by_hw_breakpoint ()
5574 {
5575 struct lwp_info *lwp = get_thread_lwp (current_thread);
5576
5577 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5578 }
5579
5580 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5581 method. */
5582
5583 bool
5584 linux_process_target::supports_stopped_by_hw_breakpoint ()
5585 {
5586 return USE_SIGTRAP_SIGINFO;
5587 }
5588
5589 /* Implement the supports_hardware_single_step target_ops method. */
5590
5591 bool
5592 linux_process_target::supports_hardware_single_step ()
5593 {
5594 return true;
5595 }
5596
5597 bool
5598 linux_process_target::stopped_by_watchpoint ()
5599 {
5600 struct lwp_info *lwp = get_thread_lwp (current_thread);
5601
5602 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5603 }
5604
5605 CORE_ADDR
5606 linux_process_target::stopped_data_address ()
5607 {
5608 struct lwp_info *lwp = get_thread_lwp (current_thread);
5609
5610 return lwp->stopped_data_address;
5611 }
5612
5613 /* This is only used for targets that define PT_TEXT_ADDR,
5614 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5615 the target has different ways of acquiring this information, like
5616 loadmaps. */
5617
5618 bool
5619 linux_process_target::supports_read_offsets ()
5620 {
5621 #ifdef SUPPORTS_READ_OFFSETS
5622 return true;
5623 #else
5624 return false;
5625 #endif
5626 }
5627
5628 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5629 to tell gdb about. */
5630
5631 int
5632 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5633 {
5634 #ifdef SUPPORTS_READ_OFFSETS
5635 unsigned long text, text_end, data;
5636 int pid = lwpid_of (current_thread);
5637
5638 errno = 0;
5639
5640 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5641 (PTRACE_TYPE_ARG4) 0);
5642 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5643 (PTRACE_TYPE_ARG4) 0);
5644 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5645 (PTRACE_TYPE_ARG4) 0);
5646
5647 if (errno == 0)
5648 {
5649 /* Both text and data offsets produced at compile-time (and so
5650 used by gdb) are relative to the beginning of the program,
5651 with the data segment immediately following the text segment.
5652 However, the actual runtime layout in memory may put the data
5653 somewhere else, so when we send gdb a data base-address, we
5654 use the real data base address and subtract the compile-time
5655 data base-address from it (which is just the length of the
5656 text segment). BSS immediately follows data in both
5657 cases. */
5658 *text_p = text;
5659 *data_p = data - (text_end - text);
5660
5661 return 1;
5662 }
5663 return 0;
5664 #else
5665 gdb_assert_not_reached ("target op read_offsets not supported");
5666 #endif
5667 }
5668
5669 bool
5670 linux_process_target::supports_get_tls_address ()
5671 {
5672 #ifdef USE_THREAD_DB
5673 return true;
5674 #else
5675 return false;
5676 #endif
5677 }
5678
5679 int
5680 linux_process_target::get_tls_address (thread_info *thread,
5681 CORE_ADDR offset,
5682 CORE_ADDR load_module,
5683 CORE_ADDR *address)
5684 {
5685 #ifdef USE_THREAD_DB
5686 return thread_db_get_tls_address (thread, offset, load_module, address);
5687 #else
5688 return -1;
5689 #endif
5690 }
5691
5692 bool
5693 linux_process_target::supports_qxfer_osdata ()
5694 {
5695 return true;
5696 }
5697
5698 int
5699 linux_process_target::qxfer_osdata (const char *annex,
5700 unsigned char *readbuf,
5701 unsigned const char *writebuf,
5702 CORE_ADDR offset, int len)
5703 {
5704 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5705 }
5706
5707 void
5708 linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5709 gdb_byte *inf_siginfo, int direction)
5710 {
5711 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
5712
5713 /* If there was no callback, or the callback didn't do anything,
5714 then just do a straight memcpy. */
5715 if (!done)
5716 {
5717 if (direction == 1)
5718 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5719 else
5720 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5721 }
5722 }
5723
5724 bool
5725 linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5726 int direction)
5727 {
5728 return false;
5729 }
5730
5731 bool
5732 linux_process_target::supports_qxfer_siginfo ()
5733 {
5734 return true;
5735 }
5736
5737 int
5738 linux_process_target::qxfer_siginfo (const char *annex,
5739 unsigned char *readbuf,
5740 unsigned const char *writebuf,
5741 CORE_ADDR offset, int len)
5742 {
5743 int pid;
5744 siginfo_t siginfo;
5745 gdb_byte inf_siginfo[sizeof (siginfo_t)];
5746
5747 if (current_thread == NULL)
5748 return -1;
5749
5750 pid = lwpid_of (current_thread);
5751
5752 threads_debug_printf ("%s siginfo for lwp %d.",
5753 readbuf != NULL ? "Reading" : "Writing",
5754 pid);
5755
5756 if (offset >= sizeof (siginfo))
5757 return -1;
5758
5759 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5760 return -1;
5761
5762 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5763 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5764 inferior with a 64-bit GDBSERVER should look the same as debugging it
5765 with a 32-bit GDBSERVER, we need to convert it. */
5766 siginfo_fixup (&siginfo, inf_siginfo, 0);
5767
5768 if (offset + len > sizeof (siginfo))
5769 len = sizeof (siginfo) - offset;
5770
5771 if (readbuf != NULL)
5772 memcpy (readbuf, inf_siginfo + offset, len);
5773 else
5774 {
5775 memcpy (inf_siginfo + offset, writebuf, len);
5776
5777 /* Convert back to ptrace layout before flushing it out. */
5778 siginfo_fixup (&siginfo, inf_siginfo, 1);
5779
5780 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5781 return -1;
5782 }
5783
5784 return len;
5785 }
5786
5787 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5788 so we notice when children change state; as the handler for the
5789 sigsuspend in my_waitpid. */
5790
5791 static void
5792 sigchld_handler (int signo)
5793 {
5794 int old_errno = errno;
5795
5796 if (debug_threads)
5797 {
5798 do
5799 {
5800 /* Use the async signal safe debug function. */
5801 if (debug_write ("sigchld_handler\n",
5802 sizeof ("sigchld_handler\n") - 1) < 0)
5803 break; /* just ignore */
5804 } while (0);
5805 }
5806
5807 if (target_is_async_p ())
5808 async_file_mark (); /* trigger a linux_wait */
5809
5810 errno = old_errno;
5811 }
5812
5813 bool
5814 linux_process_target::supports_non_stop ()
5815 {
5816 return true;
5817 }
5818
5819 bool
5820 linux_process_target::async (bool enable)
5821 {
5822 bool previous = target_is_async_p ();
5823
5824 threads_debug_printf ("async (%d), previous=%d",
5825 enable, previous);
5826
5827 if (previous != enable)
5828 {
5829 sigset_t mask;
5830 sigemptyset (&mask);
5831 sigaddset (&mask, SIGCHLD);
5832
5833 gdb_sigmask (SIG_BLOCK, &mask, NULL);
5834
5835 if (enable)
5836 {
5837 if (!linux_event_pipe.open_pipe ())
5838 {
5839 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5840
5841 warning ("creating event pipe failed.");
5842 return previous;
5843 }
5844
5845 /* Register the event loop handler. */
5846 add_file_handler (linux_event_pipe.event_fd (),
5847 handle_target_event, NULL,
5848 "linux-low");
5849
5850 /* Always trigger a linux_wait. */
5851 async_file_mark ();
5852 }
5853 else
5854 {
5855 delete_file_handler (linux_event_pipe.event_fd ());
5856
5857 linux_event_pipe.close_pipe ();
5858 }
5859
5860 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5861 }
5862
5863 return previous;
5864 }
5865
5866 int
5867 linux_process_target::start_non_stop (bool nonstop)
5868 {
5869 /* Register or unregister from event-loop accordingly. */
5870 target_async (nonstop);
5871
5872 if (target_is_async_p () != (nonstop != false))
5873 return -1;
5874
5875 return 0;
5876 }
5877
5878 bool
5879 linux_process_target::supports_multi_process ()
5880 {
5881 return true;
5882 }
5883
5884 /* Check if fork events are supported. */
5885
5886 bool
5887 linux_process_target::supports_fork_events ()
5888 {
5889 return true;
5890 }
5891
5892 /* Check if vfork events are supported. */
5893
5894 bool
5895 linux_process_target::supports_vfork_events ()
5896 {
5897 return true;
5898 }
5899
5900 /* Check if exec events are supported. */
5901
5902 bool
5903 linux_process_target::supports_exec_events ()
5904 {
5905 return true;
5906 }
5907
5908 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5909 ptrace flags for all inferiors. This is in case the new GDB connection
5910 doesn't support the same set of events that the previous one did. */
5911
5912 void
5913 linux_process_target::handle_new_gdb_connection ()
5914 {
5915 /* Request that all the lwps reset their ptrace options. */
5916 for_each_thread ([] (thread_info *thread)
5917 {
5918 struct lwp_info *lwp = get_thread_lwp (thread);
5919
5920 if (!lwp->stopped)
5921 {
5922 /* Stop the lwp so we can modify its ptrace options. */
5923 lwp->must_set_ptrace_flags = 1;
5924 linux_stop_lwp (lwp);
5925 }
5926 else
5927 {
5928 /* Already stopped; go ahead and set the ptrace options. */
5929 struct process_info *proc = find_process_pid (pid_of (thread));
5930 int options = linux_low_ptrace_options (proc->attached);
5931
5932 linux_enable_event_reporting (lwpid_of (thread), options);
5933 lwp->must_set_ptrace_flags = 0;
5934 }
5935 });
5936 }
5937
5938 int
5939 linux_process_target::handle_monitor_command (char *mon)
5940 {
5941 #ifdef USE_THREAD_DB
5942 return thread_db_handle_monitor_command (mon);
5943 #else
5944 return 0;
5945 #endif
5946 }
5947
5948 int
5949 linux_process_target::core_of_thread (ptid_t ptid)
5950 {
5951 return linux_common_core_of_thread (ptid);
5952 }
5953
5954 bool
5955 linux_process_target::supports_disable_randomization ()
5956 {
5957 return true;
5958 }
5959
5960 bool
5961 linux_process_target::supports_agent ()
5962 {
5963 return true;
5964 }
5965
5966 bool
5967 linux_process_target::supports_range_stepping ()
5968 {
5969 if (supports_software_single_step ())
5970 return true;
5971
5972 return low_supports_range_stepping ();
5973 }
5974
5975 bool
5976 linux_process_target::low_supports_range_stepping ()
5977 {
5978 return false;
5979 }
5980
5981 bool
5982 linux_process_target::supports_pid_to_exec_file ()
5983 {
5984 return true;
5985 }
5986
5987 const char *
5988 linux_process_target::pid_to_exec_file (int pid)
5989 {
5990 return linux_proc_pid_to_exec_file (pid);
5991 }
5992
5993 bool
5994 linux_process_target::supports_multifs ()
5995 {
5996 return true;
5997 }
5998
5999 int
6000 linux_process_target::multifs_open (int pid, const char *filename,
6001 int flags, mode_t mode)
6002 {
6003 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6004 }
6005
6006 int
6007 linux_process_target::multifs_unlink (int pid, const char *filename)
6008 {
6009 return linux_mntns_unlink (pid, filename);
6010 }
6011
6012 ssize_t
6013 linux_process_target::multifs_readlink (int pid, const char *filename,
6014 char *buf, size_t bufsiz)
6015 {
6016 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6017 }
6018
6019 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6020 struct target_loadseg
6021 {
6022 /* Core address to which the segment is mapped. */
6023 Elf32_Addr addr;
6024 /* VMA recorded in the program header. */
6025 Elf32_Addr p_vaddr;
6026 /* Size of this segment in memory. */
6027 Elf32_Word p_memsz;
6028 };
6029
6030 # if defined PT_GETDSBT
6031 struct target_loadmap
6032 {
6033 /* Protocol version number, must be zero. */
6034 Elf32_Word version;
6035 /* Pointer to the DSBT table, its size, and the DSBT index. */
6036 unsigned *dsbt_table;
6037 unsigned dsbt_size, dsbt_index;
6038 /* Number of segments in this map. */
6039 Elf32_Word nsegs;
6040 /* The actual memory map. */
6041 struct target_loadseg segs[/*nsegs*/];
6042 };
6043 # define LINUX_LOADMAP PT_GETDSBT
6044 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6045 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6046 # else
6047 struct target_loadmap
6048 {
6049 /* Protocol version number, must be zero. */
6050 Elf32_Half version;
6051 /* Number of segments in this map. */
6052 Elf32_Half nsegs;
6053 /* The actual memory map. */
6054 struct target_loadseg segs[/*nsegs*/];
6055 };
6056 # define LINUX_LOADMAP PTRACE_GETFDPIC
6057 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6058 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6059 # endif
6060
6061 bool
6062 linux_process_target::supports_read_loadmap ()
6063 {
6064 return true;
6065 }
6066
6067 int
6068 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6069 unsigned char *myaddr, unsigned int len)
6070 {
6071 int pid = lwpid_of (current_thread);
6072 int addr = -1;
6073 struct target_loadmap *data = NULL;
6074 unsigned int actual_length, copy_length;
6075
6076 if (strcmp (annex, "exec") == 0)
6077 addr = (int) LINUX_LOADMAP_EXEC;
6078 else if (strcmp (annex, "interp") == 0)
6079 addr = (int) LINUX_LOADMAP_INTERP;
6080 else
6081 return -1;
6082
6083 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6084 return -1;
6085
6086 if (data == NULL)
6087 return -1;
6088
6089 actual_length = sizeof (struct target_loadmap)
6090 + sizeof (struct target_loadseg) * data->nsegs;
6091
6092 if (offset < 0 || offset > actual_length)
6093 return -1;
6094
6095 copy_length = actual_length - offset < len ? actual_length - offset : len;
6096 memcpy (myaddr, (char *) data + offset, copy_length);
6097 return copy_length;
6098 }
6099 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6100
6101 bool
6102 linux_process_target::supports_catch_syscall ()
6103 {
6104 return low_supports_catch_syscall ();
6105 }
6106
6107 bool
6108 linux_process_target::low_supports_catch_syscall ()
6109 {
6110 return false;
6111 }
6112
6113 CORE_ADDR
6114 linux_process_target::read_pc (regcache *regcache)
6115 {
6116 if (!low_supports_breakpoints ())
6117 return 0;
6118
6119 return low_get_pc (regcache);
6120 }
6121
6122 void
6123 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6124 {
6125 gdb_assert (low_supports_breakpoints ());
6126
6127 low_set_pc (regcache, pc);
6128 }
6129
6130 bool
6131 linux_process_target::supports_thread_stopped ()
6132 {
6133 return true;
6134 }
6135
6136 bool
6137 linux_process_target::thread_stopped (thread_info *thread)
6138 {
6139 return get_thread_lwp (thread)->stopped;
6140 }
6141
6142 /* This exposes stop-all-threads functionality to other modules. */
6143
6144 void
6145 linux_process_target::pause_all (bool freeze)
6146 {
6147 stop_all_lwps (freeze, NULL);
6148 }
6149
6150 /* This exposes unstop-all-threads functionality to other gdbserver
6151 modules. */
6152
6153 void
6154 linux_process_target::unpause_all (bool unfreeze)
6155 {
6156 unstop_all_lwps (unfreeze, NULL);
6157 }
6158
6159 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6160
6161 static int
6162 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6163 CORE_ADDR *phdr_memaddr, int *num_phdr)
6164 {
6165 char filename[PATH_MAX];
6166 int fd;
6167 const int auxv_size = is_elf64
6168 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6169 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6170
6171 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6172
6173 fd = open (filename, O_RDONLY);
6174 if (fd < 0)
6175 return 1;
6176
6177 *phdr_memaddr = 0;
6178 *num_phdr = 0;
6179 while (read (fd, buf, auxv_size) == auxv_size
6180 && (*phdr_memaddr == 0 || *num_phdr == 0))
6181 {
6182 if (is_elf64)
6183 {
6184 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6185
6186 switch (aux->a_type)
6187 {
6188 case AT_PHDR:
6189 *phdr_memaddr = aux->a_un.a_val;
6190 break;
6191 case AT_PHNUM:
6192 *num_phdr = aux->a_un.a_val;
6193 break;
6194 }
6195 }
6196 else
6197 {
6198 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6199
6200 switch (aux->a_type)
6201 {
6202 case AT_PHDR:
6203 *phdr_memaddr = aux->a_un.a_val;
6204 break;
6205 case AT_PHNUM:
6206 *num_phdr = aux->a_un.a_val;
6207 break;
6208 }
6209 }
6210 }
6211
6212 close (fd);
6213
6214 if (*phdr_memaddr == 0 || *num_phdr == 0)
6215 {
6216 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6217 "phdr_memaddr = %ld, phdr_num = %d",
6218 (long) *phdr_memaddr, *num_phdr);
6219 return 2;
6220 }
6221
6222 return 0;
6223 }
6224
6225 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6226
6227 static CORE_ADDR
6228 get_dynamic (const int pid, const int is_elf64)
6229 {
6230 CORE_ADDR phdr_memaddr, relocation;
6231 int num_phdr, i;
6232 unsigned char *phdr_buf;
6233 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6234
6235 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6236 return 0;
6237
6238 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6239 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6240
6241 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6242 return 0;
6243
6244 /* Compute relocation: it is expected to be 0 for "regular" executables,
6245 non-zero for PIE ones. */
6246 relocation = -1;
6247 for (i = 0; relocation == -1 && i < num_phdr; i++)
6248 if (is_elf64)
6249 {
6250 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6251
6252 if (p->p_type == PT_PHDR)
6253 relocation = phdr_memaddr - p->p_vaddr;
6254 }
6255 else
6256 {
6257 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6258
6259 if (p->p_type == PT_PHDR)
6260 relocation = phdr_memaddr - p->p_vaddr;
6261 }
6262
6263 if (relocation == -1)
6264 {
6265 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6266 any real world executables, including PIE executables, have always
6267 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6268 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6269 or present DT_DEBUG anyway (fpc binaries are statically linked).
6270
6271 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6272
6273 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6274
6275 return 0;
6276 }
6277
6278 for (i = 0; i < num_phdr; i++)
6279 {
6280 if (is_elf64)
6281 {
6282 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6283
6284 if (p->p_type == PT_DYNAMIC)
6285 return p->p_vaddr + relocation;
6286 }
6287 else
6288 {
6289 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6290
6291 if (p->p_type == PT_DYNAMIC)
6292 return p->p_vaddr + relocation;
6293 }
6294 }
6295
6296 return 0;
6297 }
6298
6299 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6300 can be 0 if the inferior does not yet have the library list initialized.
6301 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6302 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6303
6304 static CORE_ADDR
6305 get_r_debug (const int pid, const int is_elf64)
6306 {
6307 CORE_ADDR dynamic_memaddr;
6308 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6309 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6310 CORE_ADDR map = -1;
6311
6312 dynamic_memaddr = get_dynamic (pid, is_elf64);
6313 if (dynamic_memaddr == 0)
6314 return map;
6315
6316 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6317 {
6318 if (is_elf64)
6319 {
6320 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6321 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6322 union
6323 {
6324 Elf64_Xword map;
6325 unsigned char buf[sizeof (Elf64_Xword)];
6326 }
6327 rld_map;
6328 #endif
6329 #ifdef DT_MIPS_RLD_MAP
6330 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6331 {
6332 if (linux_read_memory (dyn->d_un.d_val,
6333 rld_map.buf, sizeof (rld_map.buf)) == 0)
6334 return rld_map.map;
6335 else
6336 break;
6337 }
6338 #endif /* DT_MIPS_RLD_MAP */
6339 #ifdef DT_MIPS_RLD_MAP_REL
6340 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6341 {
6342 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6343 rld_map.buf, sizeof (rld_map.buf)) == 0)
6344 return rld_map.map;
6345 else
6346 break;
6347 }
6348 #endif /* DT_MIPS_RLD_MAP_REL */
6349
6350 if (dyn->d_tag == DT_DEBUG && map == -1)
6351 map = dyn->d_un.d_val;
6352
6353 if (dyn->d_tag == DT_NULL)
6354 break;
6355 }
6356 else
6357 {
6358 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6359 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6360 union
6361 {
6362 Elf32_Word map;
6363 unsigned char buf[sizeof (Elf32_Word)];
6364 }
6365 rld_map;
6366 #endif
6367 #ifdef DT_MIPS_RLD_MAP
6368 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6369 {
6370 if (linux_read_memory (dyn->d_un.d_val,
6371 rld_map.buf, sizeof (rld_map.buf)) == 0)
6372 return rld_map.map;
6373 else
6374 break;
6375 }
6376 #endif /* DT_MIPS_RLD_MAP */
6377 #ifdef DT_MIPS_RLD_MAP_REL
6378 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6379 {
6380 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6381 rld_map.buf, sizeof (rld_map.buf)) == 0)
6382 return rld_map.map;
6383 else
6384 break;
6385 }
6386 #endif /* DT_MIPS_RLD_MAP_REL */
6387
6388 if (dyn->d_tag == DT_DEBUG && map == -1)
6389 map = dyn->d_un.d_val;
6390
6391 if (dyn->d_tag == DT_NULL)
6392 break;
6393 }
6394
6395 dynamic_memaddr += dyn_size;
6396 }
6397
6398 return map;
6399 }
6400
6401 /* Read one pointer from MEMADDR in the inferior. */
6402
6403 static int
6404 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6405 {
6406 int ret;
6407
6408 /* Go through a union so this works on either big or little endian
6409 hosts, when the inferior's pointer size is smaller than the size
6410 of CORE_ADDR. It is assumed the inferior's endianness is the
6411 same of the superior's. */
6412 union
6413 {
6414 CORE_ADDR core_addr;
6415 unsigned int ui;
6416 unsigned char uc;
6417 } addr;
6418
6419 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6420 if (ret == 0)
6421 {
6422 if (ptr_size == sizeof (CORE_ADDR))
6423 *ptr = addr.core_addr;
6424 else if (ptr_size == sizeof (unsigned int))
6425 *ptr = addr.ui;
6426 else
6427 gdb_assert_not_reached ("unhandled pointer size");
6428 }
6429 return ret;
6430 }
6431
6432 bool
6433 linux_process_target::supports_qxfer_libraries_svr4 ()
6434 {
6435 return true;
6436 }
6437
6438 struct link_map_offsets
6439 {
6440 /* Offset and size of r_debug.r_version. */
6441 int r_version_offset;
6442
6443 /* Offset and size of r_debug.r_map. */
6444 int r_map_offset;
6445
6446 /* Offset of r_debug_extended.r_next. */
6447 int r_next_offset;
6448
6449 /* Offset to l_addr field in struct link_map. */
6450 int l_addr_offset;
6451
6452 /* Offset to l_name field in struct link_map. */
6453 int l_name_offset;
6454
6455 /* Offset to l_ld field in struct link_map. */
6456 int l_ld_offset;
6457
6458 /* Offset to l_next field in struct link_map. */
6459 int l_next_offset;
6460
6461 /* Offset to l_prev field in struct link_map. */
6462 int l_prev_offset;
6463 };
6464
6465 static const link_map_offsets lmo_32bit_offsets =
6466 {
6467 0, /* r_version offset. */
6468 4, /* r_debug.r_map offset. */
6469 20, /* r_debug_extended.r_next. */
6470 0, /* l_addr offset in link_map. */
6471 4, /* l_name offset in link_map. */
6472 8, /* l_ld offset in link_map. */
6473 12, /* l_next offset in link_map. */
6474 16 /* l_prev offset in link_map. */
6475 };
6476
6477 static const link_map_offsets lmo_64bit_offsets =
6478 {
6479 0, /* r_version offset. */
6480 8, /* r_debug.r_map offset. */
6481 40, /* r_debug_extended.r_next. */
6482 0, /* l_addr offset in link_map. */
6483 8, /* l_name offset in link_map. */
6484 16, /* l_ld offset in link_map. */
6485 24, /* l_next offset in link_map. */
6486 32 /* l_prev offset in link_map. */
6487 };
6488
6489 /* Get the loaded shared libraries from one namespace. */
6490
6491 static void
6492 read_link_map (std::string &document, CORE_ADDR lm_addr, CORE_ADDR lm_prev,
6493 int ptr_size, const link_map_offsets *lmo, bool ignore_first,
6494 int &header_done)
6495 {
6496 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6497
6498 while (lm_addr
6499 && read_one_ptr (lm_addr + lmo->l_name_offset,
6500 &l_name, ptr_size) == 0
6501 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6502 &l_addr, ptr_size) == 0
6503 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6504 &l_ld, ptr_size) == 0
6505 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6506 &l_prev, ptr_size) == 0
6507 && read_one_ptr (lm_addr + lmo->l_next_offset,
6508 &l_next, ptr_size) == 0)
6509 {
6510 unsigned char libname[PATH_MAX];
6511
6512 if (lm_prev != l_prev)
6513 {
6514 warning ("Corrupted shared library list: 0x%s != 0x%s",
6515 paddress (lm_prev), paddress (l_prev));
6516 break;
6517 }
6518
6519 /* Ignore the first entry even if it has valid name as the first entry
6520 corresponds to the main executable. The first entry should not be
6521 skipped if the dynamic loader was loaded late by a static executable
6522 (see solib-svr4.c parameter ignore_first). But in such case the main
6523 executable does not have PT_DYNAMIC present and this function already
6524 exited above due to failed get_r_debug. */
6525 if (ignore_first && lm_prev == 0)
6526 string_appendf (document, " main-lm=\"0x%s\"", paddress (lm_addr));
6527 else
6528 {
6529 /* Not checking for error because reading may stop before
6530 we've got PATH_MAX worth of characters. */
6531 libname[0] = '\0';
6532 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6533 libname[sizeof (libname) - 1] = '\0';
6534 if (libname[0] != '\0')
6535 {
6536 if (!header_done)
6537 {
6538 /* Terminate `<library-list-svr4'. */
6539 document += '>';
6540 header_done = 1;
6541 }
6542
6543 string_appendf (document, "<library name=\"");
6544 xml_escape_text_append (&document, (char *) libname);
6545 string_appendf (document, "\" lm=\"0x%s\" l_addr=\"0x%s\" "
6546 "l_ld=\"0x%s\"/>",
6547 paddress (lm_addr), paddress (l_addr),
6548 paddress (l_ld));
6549 }
6550 }
6551
6552 lm_prev = lm_addr;
6553 lm_addr = l_next;
6554 }
6555 }
6556
6557 /* Construct qXfer:libraries-svr4:read reply. */
6558
6559 int
6560 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6561 unsigned char *readbuf,
6562 unsigned const char *writebuf,
6563 CORE_ADDR offset, int len)
6564 {
6565 struct process_info_private *const priv = current_process ()->priv;
6566 char filename[PATH_MAX];
6567 int pid, is_elf64;
6568 unsigned int machine;
6569 CORE_ADDR lm_addr = 0, lm_prev = 0;
6570 int header_done = 0;
6571
6572 if (writebuf != NULL)
6573 return -2;
6574 if (readbuf == NULL)
6575 return -1;
6576
6577 pid = lwpid_of (current_thread);
6578 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6579 is_elf64 = elf_64_file_p (filename, &machine);
6580 const link_map_offsets *lmo;
6581 int ptr_size;
6582 if (is_elf64)
6583 {
6584 lmo = &lmo_64bit_offsets;
6585 ptr_size = 8;
6586 }
6587 else
6588 {
6589 lmo = &lmo_32bit_offsets;
6590 ptr_size = 4;
6591 }
6592
6593 while (annex[0] != '\0')
6594 {
6595 const char *sep;
6596 CORE_ADDR *addrp;
6597 int name_len;
6598
6599 sep = strchr (annex, '=');
6600 if (sep == NULL)
6601 break;
6602
6603 name_len = sep - annex;
6604 if (name_len == 5 && startswith (annex, "start"))
6605 addrp = &lm_addr;
6606 else if (name_len == 4 && startswith (annex, "prev"))
6607 addrp = &lm_prev;
6608 else
6609 {
6610 annex = strchr (sep, ';');
6611 if (annex == NULL)
6612 break;
6613 annex++;
6614 continue;
6615 }
6616
6617 annex = decode_address_to_semicolon (addrp, sep + 1);
6618 }
6619
6620 std::string document = "<library-list-svr4 version=\"1.0\"";
6621
6622 /* When the starting LM_ADDR is passed in the annex, only traverse that
6623 namespace.
6624
6625 Otherwise, start with R_DEBUG and traverse all namespaces we find. */
6626 if (lm_addr != 0)
6627 read_link_map (document, lm_addr, lm_prev, ptr_size, lmo, false,
6628 header_done);
6629 else
6630 {
6631 if (lm_prev != 0)
6632 warning ("ignoring prev=0x%s without start", paddress (lm_prev));
6633
6634 CORE_ADDR r_debug = priv->r_debug;
6635 if (r_debug == 0)
6636 r_debug = priv->r_debug = get_r_debug (pid, is_elf64);
6637
6638 /* We failed to find DT_DEBUG. Such situation will not change
6639 for this inferior - do not retry it. Report it to GDB as
6640 E01, see for the reasons at the GDB solib-svr4.c side. */
6641 if (r_debug == (CORE_ADDR) -1)
6642 return -1;
6643
6644 bool ignore_first = true;
6645 while (r_debug != 0)
6646 {
6647 int r_version = 0;
6648 if (linux_read_memory (r_debug + lmo->r_version_offset,
6649 (unsigned char *) &r_version,
6650 sizeof (r_version)) != 0)
6651 {
6652 warning ("unable to read r_version from 0x%s",
6653 paddress (r_debug + lmo->r_version_offset));
6654 break;
6655 }
6656
6657 if (r_version < 1)
6658 {
6659 warning ("unexpected r_debug version %d", r_version);
6660 break;
6661 }
6662
6663 if (read_one_ptr (r_debug + lmo->r_map_offset, &lm_addr,
6664 ptr_size) != 0)
6665 {
6666 warning ("unable to read r_map from 0x%s",
6667 paddress (r_debug + lmo->r_map_offset));
6668 break;
6669 }
6670
6671 read_link_map (document, lm_addr, 0, ptr_size, lmo,
6672 ignore_first, header_done);
6673
6674 if (r_version < 2)
6675 break;
6676
6677 /* Only applies to the default namespace. */
6678 ignore_first = false;
6679
6680 if (read_one_ptr (r_debug + lmo->r_next_offset, &r_debug,
6681 ptr_size) != 0)
6682 {
6683 warning ("unable to read r_next from 0x%s",
6684 paddress (r_debug + lmo->r_next_offset));
6685 break;
6686 }
6687 }
6688 }
6689
6690 if (!header_done)
6691 {
6692 /* Empty list; terminate `<library-list-svr4'. */
6693 document += "/>";
6694 }
6695 else
6696 document += "</library-list-svr4>";
6697
6698 int document_len = document.length ();
6699 if (offset < document_len)
6700 document_len -= offset;
6701 else
6702 document_len = 0;
6703 if (len > document_len)
6704 len = document_len;
6705
6706 memcpy (readbuf, document.data () + offset, len);
6707
6708 return len;
6709 }
6710
6711 #ifdef HAVE_LINUX_BTRACE
6712
6713 btrace_target_info *
6714 linux_process_target::enable_btrace (thread_info *tp,
6715 const btrace_config *conf)
6716 {
6717 return linux_enable_btrace (tp->id, conf);
6718 }
6719
6720 /* See to_disable_btrace target method. */
6721
6722 int
6723 linux_process_target::disable_btrace (btrace_target_info *tinfo)
6724 {
6725 enum btrace_error err;
6726
6727 err = linux_disable_btrace (tinfo);
6728 return (err == BTRACE_ERR_NONE ? 0 : -1);
6729 }
6730
6731 /* Encode an Intel Processor Trace configuration. */
6732
6733 static void
6734 linux_low_encode_pt_config (struct buffer *buffer,
6735 const struct btrace_data_pt_config *config)
6736 {
6737 buffer_grow_str (buffer, "<pt-config>\n");
6738
6739 switch (config->cpu.vendor)
6740 {
6741 case CV_INTEL:
6742 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6743 "model=\"%u\" stepping=\"%u\"/>\n",
6744 config->cpu.family, config->cpu.model,
6745 config->cpu.stepping);
6746 break;
6747
6748 default:
6749 break;
6750 }
6751
6752 buffer_grow_str (buffer, "</pt-config>\n");
6753 }
6754
6755 /* Encode a raw buffer. */
6756
6757 static void
6758 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6759 unsigned int size)
6760 {
6761 if (size == 0)
6762 return;
6763
6764 /* We use hex encoding - see gdbsupport/rsp-low.h. */
6765 buffer_grow_str (buffer, "<raw>\n");
6766
6767 while (size-- > 0)
6768 {
6769 char elem[2];
6770
6771 elem[0] = tohex ((*data >> 4) & 0xf);
6772 elem[1] = tohex (*data++ & 0xf);
6773
6774 buffer_grow (buffer, elem, 2);
6775 }
6776
6777 buffer_grow_str (buffer, "</raw>\n");
6778 }
6779
6780 /* See to_read_btrace target method. */
6781
6782 int
6783 linux_process_target::read_btrace (btrace_target_info *tinfo,
6784 buffer *buffer,
6785 enum btrace_read_type type)
6786 {
6787 struct btrace_data btrace;
6788 enum btrace_error err;
6789
6790 err = linux_read_btrace (&btrace, tinfo, type);
6791 if (err != BTRACE_ERR_NONE)
6792 {
6793 if (err == BTRACE_ERR_OVERFLOW)
6794 buffer_grow_str0 (buffer, "E.Overflow.");
6795 else
6796 buffer_grow_str0 (buffer, "E.Generic Error.");
6797
6798 return -1;
6799 }
6800
6801 switch (btrace.format)
6802 {
6803 case BTRACE_FORMAT_NONE:
6804 buffer_grow_str0 (buffer, "E.No Trace.");
6805 return -1;
6806
6807 case BTRACE_FORMAT_BTS:
6808 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6809 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6810
6811 for (const btrace_block &block : *btrace.variant.bts.blocks)
6812 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6813 paddress (block.begin), paddress (block.end));
6814
6815 buffer_grow_str0 (buffer, "</btrace>\n");
6816 break;
6817
6818 case BTRACE_FORMAT_PT:
6819 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6820 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6821 buffer_grow_str (buffer, "<pt>\n");
6822
6823 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6824
6825 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6826 btrace.variant.pt.size);
6827
6828 buffer_grow_str (buffer, "</pt>\n");
6829 buffer_grow_str0 (buffer, "</btrace>\n");
6830 break;
6831
6832 default:
6833 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
6834 return -1;
6835 }
6836
6837 return 0;
6838 }
6839
6840 /* See to_btrace_conf target method. */
6841
6842 int
6843 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
6844 buffer *buffer)
6845 {
6846 const struct btrace_config *conf;
6847
6848 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6849 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6850
6851 conf = linux_btrace_conf (tinfo);
6852 if (conf != NULL)
6853 {
6854 switch (conf->format)
6855 {
6856 case BTRACE_FORMAT_NONE:
6857 break;
6858
6859 case BTRACE_FORMAT_BTS:
6860 buffer_xml_printf (buffer, "<bts");
6861 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6862 buffer_xml_printf (buffer, " />\n");
6863 break;
6864
6865 case BTRACE_FORMAT_PT:
6866 buffer_xml_printf (buffer, "<pt");
6867 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
6868 buffer_xml_printf (buffer, "/>\n");
6869 break;
6870 }
6871 }
6872
6873 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6874 return 0;
6875 }
6876 #endif /* HAVE_LINUX_BTRACE */
6877
6878 /* See nat/linux-nat.h. */
6879
6880 ptid_t
6881 current_lwp_ptid (void)
6882 {
6883 return ptid_of (current_thread);
6884 }
6885
6886 const char *
6887 linux_process_target::thread_name (ptid_t thread)
6888 {
6889 return linux_proc_tid_get_name (thread);
6890 }
6891
6892 #if USE_THREAD_DB
6893 bool
6894 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
6895 int *handle_len)
6896 {
6897 return thread_db_thread_handle (ptid, handle, handle_len);
6898 }
6899 #endif
6900
6901 thread_info *
6902 linux_process_target::thread_pending_parent (thread_info *thread)
6903 {
6904 lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
6905
6906 if (parent == nullptr)
6907 return nullptr;
6908
6909 return get_lwp_thread (parent);
6910 }
6911
6912 thread_info *
6913 linux_process_target::thread_pending_child (thread_info *thread)
6914 {
6915 lwp_info *child = get_thread_lwp (thread)->pending_child ();
6916
6917 if (child == nullptr)
6918 return nullptr;
6919
6920 return get_lwp_thread (child);
6921 }
6922
6923 /* Default implementation of linux_target_ops method "set_pc" for
6924 32-bit pc register which is literally named "pc". */
6925
6926 void
6927 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
6928 {
6929 uint32_t newpc = pc;
6930
6931 supply_register_by_name (regcache, "pc", &newpc);
6932 }
6933
6934 /* Default implementation of linux_target_ops method "get_pc" for
6935 32-bit pc register which is literally named "pc". */
6936
6937 CORE_ADDR
6938 linux_get_pc_32bit (struct regcache *regcache)
6939 {
6940 uint32_t pc;
6941
6942 collect_register_by_name (regcache, "pc", &pc);
6943 threads_debug_printf ("stop pc is 0x%" PRIx32, pc);
6944 return pc;
6945 }
6946
6947 /* Default implementation of linux_target_ops method "set_pc" for
6948 64-bit pc register which is literally named "pc". */
6949
6950 void
6951 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
6952 {
6953 uint64_t newpc = pc;
6954
6955 supply_register_by_name (regcache, "pc", &newpc);
6956 }
6957
6958 /* Default implementation of linux_target_ops method "get_pc" for
6959 64-bit pc register which is literally named "pc". */
6960
6961 CORE_ADDR
6962 linux_get_pc_64bit (struct regcache *regcache)
6963 {
6964 uint64_t pc;
6965
6966 collect_register_by_name (regcache, "pc", &pc);
6967 threads_debug_printf ("stop pc is 0x%" PRIx64, pc);
6968 return pc;
6969 }
6970
6971 /* See linux-low.h. */
6972
6973 int
6974 linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
6975 {
6976 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
6977 int offset = 0;
6978
6979 gdb_assert (wordsize == 4 || wordsize == 8);
6980
6981 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
6982 {
6983 if (wordsize == 4)
6984 {
6985 uint32_t *data_p = (uint32_t *) data;
6986 if (data_p[0] == match)
6987 {
6988 *valp = data_p[1];
6989 return 1;
6990 }
6991 }
6992 else
6993 {
6994 uint64_t *data_p = (uint64_t *) data;
6995 if (data_p[0] == match)
6996 {
6997 *valp = data_p[1];
6998 return 1;
6999 }
7000 }
7001
7002 offset += 2 * wordsize;
7003 }
7004
7005 return 0;
7006 }
7007
7008 /* See linux-low.h. */
7009
7010 CORE_ADDR
7011 linux_get_hwcap (int wordsize)
7012 {
7013 CORE_ADDR hwcap = 0;
7014 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7015 return hwcap;
7016 }
7017
7018 /* See linux-low.h. */
7019
7020 CORE_ADDR
7021 linux_get_hwcap2 (int wordsize)
7022 {
7023 CORE_ADDR hwcap2 = 0;
7024 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7025 return hwcap2;
7026 }
7027
7028 #ifdef HAVE_LINUX_REGSETS
7029 void
7030 initialize_regsets_info (struct regsets_info *info)
7031 {
7032 for (info->num_regsets = 0;
7033 info->regsets[info->num_regsets].size >= 0;
7034 info->num_regsets++)
7035 ;
7036 }
7037 #endif
7038
7039 void
7040 initialize_low (void)
7041 {
7042 struct sigaction sigchld_action;
7043
7044 memset (&sigchld_action, 0, sizeof (sigchld_action));
7045 set_target_ops (the_linux_target);
7046
7047 linux_ptrace_init_warnings ();
7048 linux_proc_init_warnings ();
7049
7050 sigchld_action.sa_handler = sigchld_handler;
7051 sigemptyset (&sigchld_action.sa_mask);
7052 sigchld_action.sa_flags = SA_RESTART;
7053 sigaction (SIGCHLD, &sigchld_action, NULL);
7054
7055 initialize_low_arch ();
7056
7057 linux_check_ptrace_features ();
7058 }