]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdbserver/linux-low.cc
Introduce GDB_THREAD_OPTION_EXIT thread option, fix step-over-thread-exit
[thirdparty/binutils-gdb.git] / gdbserver / linux-low.cc
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
213516ef 2 Copyright (C) 1995-2023 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
268a13a5 22#include "gdbsupport/agent.h"
de0d863e 23#include "tdesc.h"
cdc8e9b2
JB
24#include "gdbsupport/event-loop.h"
25#include "gdbsupport/event-pipe.h"
268a13a5
TT
26#include "gdbsupport/rsp-low.h"
27#include "gdbsupport/signals-state-save-restore.h"
96d7229d
LM
28#include "nat/linux-nat.h"
29#include "nat/linux-waitpid.h"
268a13a5 30#include "gdbsupport/gdb_wait.h"
5826e159 31#include "nat/gdb_ptrace.h"
125f8a3d
GB
32#include "nat/linux-ptrace.h"
33#include "nat/linux-procfs.h"
8cc73a39 34#include "nat/linux-personality.h"
da6d8c04
DJ
35#include <signal.h>
36#include <sys/ioctl.h>
37#include <fcntl.h>
0a30fbc4 38#include <unistd.h>
fd500816 39#include <sys/syscall.h>
f9387fc3 40#include <sched.h>
07e059b5
VP
41#include <ctype.h>
42#include <pwd.h>
43#include <sys/types.h>
44#include <dirent.h>
53ce3c39 45#include <sys/stat.h>
efcbbd14 46#include <sys/vfs.h>
1570b33e 47#include <sys/uio.h>
268a13a5 48#include "gdbsupport/filestuff.h"
c144c7a0 49#include "tracepoint.h"
276d4552 50#include <inttypes.h>
268a13a5 51#include "gdbsupport/common-inferior.h"
2090129c 52#include "nat/fork-inferior.h"
268a13a5 53#include "gdbsupport/environ.h"
21987b9c 54#include "gdbsupport/gdb-sigmask.h"
268a13a5 55#include "gdbsupport/scoped_restore.h"
957f3f49
DE
56#ifndef ELFMAG0
57/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
58 then ELFMAG0 will have been defined. If it didn't get included by
59 gdb_proc_service.h then including it will likely introduce a duplicate
60 definition of elf_fpregset_t. */
61#include <elf.h>
62#endif
14d2069a 63#include "nat/linux-namespaces.h"
efcbbd14 64
fd462a61
DJ
65#ifndef O_LARGEFILE
66#define O_LARGEFILE 0
67#endif
1a981360 68
69f4c9cc
AH
69#ifndef AT_HWCAP2
70#define AT_HWCAP2 26
71#endif
72
db0dfaa0
LM
73/* Some targets did not define these ptrace constants from the start,
74 so gdbserver defines them locally here. In the future, these may
75 be removed after they are added to asm/ptrace.h. */
76#if !(defined(PT_TEXT_ADDR) \
77 || defined(PT_DATA_ADDR) \
78 || defined(PT_TEXT_END_ADDR))
79#if defined(__mcoldfire__)
80/* These are still undefined in 3.10 kernels. */
81#define PT_TEXT_ADDR 49*4
82#define PT_DATA_ADDR 50*4
83#define PT_TEXT_END_ADDR 51*4
db0dfaa0
LM
84/* These are still undefined in 3.10 kernels. */
85#elif defined(__TMS320C6X__)
86#define PT_TEXT_ADDR (0x10000*4)
87#define PT_DATA_ADDR (0x10004*4)
88#define PT_TEXT_END_ADDR (0x10008*4)
89#endif
90#endif
91
5203ae1e
TBA
92#if (defined(__UCLIBC__) \
93 && defined(HAS_NOMMU) \
94 && defined(PT_TEXT_ADDR) \
95 && defined(PT_DATA_ADDR) \
96 && defined(PT_TEXT_END_ADDR))
97#define SUPPORTS_READ_OFFSETS
98#endif
99
9accd112 100#ifdef HAVE_LINUX_BTRACE
125f8a3d 101# include "nat/linux-btrace.h"
268a13a5 102# include "gdbsupport/btrace-common.h"
9accd112
MM
103#endif
104
8365dcf5
TJB
105#ifndef HAVE_ELF32_AUXV_T
106/* Copied from glibc's elf.h. */
107typedef struct
108{
109 uint32_t a_type; /* Entry type */
110 union
111 {
112 uint32_t a_val; /* Integer value */
113 /* We use to have pointer elements added here. We cannot do that,
114 though, since it does not work when using 32-bit definitions
115 on 64-bit platforms and vice versa. */
116 } a_un;
117} Elf32_auxv_t;
118#endif
119
120#ifndef HAVE_ELF64_AUXV_T
121/* Copied from glibc's elf.h. */
122typedef struct
123{
124 uint64_t a_type; /* Entry type */
125 union
126 {
127 uint64_t a_val; /* Integer value */
128 /* We use to have pointer elements added here. We cannot do that,
129 though, since it does not work when using 32-bit definitions
130 on 64-bit platforms and vice versa. */
131 } a_un;
132} Elf64_auxv_t;
133#endif
134
ded48a5e
YQ
135/* Does the current host support PTRACE_GETREGSET? */
136int have_ptrace_getregset = -1;
137
8a841a35
PA
138/* Return TRUE if THREAD is the leader thread of the process. */
139
140static bool
141is_leader (thread_info *thread)
142{
143 ptid_t ptid = ptid_of (thread);
144 return ptid.pid () == ptid.lwp ();
145}
146
cff068da
GB
147/* LWP accessors. */
148
149/* See nat/linux-nat.h. */
150
151ptid_t
152ptid_of_lwp (struct lwp_info *lwp)
153{
154 return ptid_of (get_lwp_thread (lwp));
155}
156
157/* See nat/linux-nat.h. */
158
4b134ca1
GB
159void
160lwp_set_arch_private_info (struct lwp_info *lwp,
161 struct arch_lwp_info *info)
162{
163 lwp->arch_private = info;
164}
165
166/* See nat/linux-nat.h. */
167
168struct arch_lwp_info *
169lwp_arch_private_info (struct lwp_info *lwp)
170{
171 return lwp->arch_private;
172}
173
174/* See nat/linux-nat.h. */
175
cff068da
GB
176int
177lwp_is_stopped (struct lwp_info *lwp)
178{
179 return lwp->stopped;
180}
181
182/* See nat/linux-nat.h. */
183
184enum target_stop_reason
185lwp_stop_reason (struct lwp_info *lwp)
186{
187 return lwp->stop_reason;
188}
189
0e00e962
AA
190/* See nat/linux-nat.h. */
191
192int
193lwp_is_stepping (struct lwp_info *lwp)
194{
195 return lwp->stepping;
196}
197
05044653
PA
198/* A list of all unknown processes which receive stop signals. Some
199 other process will presumably claim each of these as forked
200 children momentarily. */
24a09b5f 201
05044653
PA
202struct simple_pid_list
203{
204 /* The process ID. */
205 int pid;
206
207 /* The status as reported by waitpid. */
208 int status;
209
210 /* Next in chain. */
211 struct simple_pid_list *next;
212};
05c309a8 213static struct simple_pid_list *stopped_pids;
05044653
PA
214
215/* Trivial list manipulation functions to keep track of a list of new
216 stopped processes. */
217
218static void
219add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
220{
8d749320 221 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
05044653
PA
222
223 new_pid->pid = pid;
224 new_pid->status = status;
225 new_pid->next = *listp;
226 *listp = new_pid;
227}
228
229static int
230pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
231{
232 struct simple_pid_list **p;
233
234 for (p = listp; *p != NULL; p = &(*p)->next)
235 if ((*p)->pid == pid)
236 {
237 struct simple_pid_list *next = (*p)->next;
238
239 *statusp = (*p)->status;
240 xfree (*p);
241 *p = next;
242 return 1;
243 }
244 return 0;
245}
24a09b5f 246
bde24c0a
PA
247enum stopping_threads_kind
248 {
249 /* Not stopping threads presently. */
250 NOT_STOPPING_THREADS,
251
252 /* Stopping threads. */
253 STOPPING_THREADS,
254
255 /* Stopping and suspending threads. */
256 STOPPING_AND_SUSPENDING_THREADS
257 };
258
259/* This is set while stop_all_lwps is in effect. */
6bd434d6 260static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
261
262/* FIXME make into a target method? */
24a09b5f 263int using_threads = 1;
24a09b5f 264
fa593d66
PA
265/* True if we're presently stabilizing threads (moving them out of
266 jump pads). */
267static int stabilizing_threads;
268
f50bf8e5 269static void unsuspend_all_lwps (struct lwp_info *except);
95954743 270static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
00db26fa 271static int lwp_is_marked_dead (struct lwp_info *lwp);
d50171e4 272static int kill_lwp (unsigned long lwpid, int signo);
863d01bd 273static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
ece66d65 274static int linux_low_ptrace_options (int attached);
ced2dffb 275static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
d50171e4 276
582511be
PA
277/* When the event-loop is doing a step-over, this points at the thread
278 being stepped. */
6bd434d6 279static ptid_t step_over_bkpt;
582511be 280
bf9ae9d8
TBA
281bool
282linux_process_target::low_supports_breakpoints ()
283{
284 return false;
285}
d50171e4 286
bf9ae9d8
TBA
287CORE_ADDR
288linux_process_target::low_get_pc (regcache *regcache)
289{
290 return 0;
291}
292
293void
294linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
d50171e4 295{
bf9ae9d8 296 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
d50171e4 297}
0d62e5e8 298
7582c77c
TBA
299std::vector<CORE_ADDR>
300linux_process_target::low_get_next_pcs (regcache *regcache)
301{
302 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
303 "implemented");
304}
305
d4807ea2
TBA
306int
307linux_process_target::low_decr_pc_after_break ()
308{
309 return 0;
310}
311
c2d6af84
PA
312/* True if LWP is stopped in its stepping range. */
313
314static int
315lwp_in_step_range (struct lwp_info *lwp)
316{
317 CORE_ADDR pc = lwp->stop_pc;
318
319 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
320}
321
cdc8e9b2
JB
322/* The event pipe registered as a waitable file in the event loop. */
323static event_pipe linux_event_pipe;
bd99dc85
PA
324
325/* True if we're currently in async mode. */
cdc8e9b2 326#define target_is_async_p() (linux_event_pipe.is_open ())
bd99dc85 327
02fc4de7 328static void send_sigstop (struct lwp_info *lwp);
bd99dc85 329
d0722149
DE
330/* Return non-zero if HEADER is a 64-bit ELF file. */
331
332static int
214d508e 333elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 334{
214d508e
L
335 if (header->e_ident[EI_MAG0] == ELFMAG0
336 && header->e_ident[EI_MAG1] == ELFMAG1
337 && header->e_ident[EI_MAG2] == ELFMAG2
338 && header->e_ident[EI_MAG3] == ELFMAG3)
339 {
340 *machine = header->e_machine;
341 return header->e_ident[EI_CLASS] == ELFCLASS64;
342
343 }
344 *machine = EM_NONE;
345 return -1;
d0722149
DE
346}
347
348/* Return non-zero if FILE is a 64-bit ELF file,
349 zero if the file is not a 64-bit ELF file,
350 and -1 if the file is not accessible or doesn't exist. */
351
be07f1a2 352static int
214d508e 353elf_64_file_p (const char *file, unsigned int *machine)
d0722149 354{
957f3f49 355 Elf64_Ehdr header;
d0722149
DE
356 int fd;
357
358 fd = open (file, O_RDONLY);
359 if (fd < 0)
360 return -1;
361
362 if (read (fd, &header, sizeof (header)) != sizeof (header))
363 {
364 close (fd);
365 return 0;
366 }
367 close (fd);
368
214d508e 369 return elf_64_header_p (&header, machine);
d0722149
DE
370}
371
be07f1a2
PA
372/* Accepts an integer PID; Returns true if the executable PID is
373 running is a 64-bit ELF file.. */
374
375int
214d508e 376linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 377{
d8d2a3ee 378 char file[PATH_MAX];
be07f1a2
PA
379
380 sprintf (file, "/proc/%d/exe", pid);
214d508e 381 return elf_64_file_p (file, machine);
be07f1a2
PA
382}
383
fd000fb3
TBA
384void
385linux_process_target::delete_lwp (lwp_info *lwp)
bd99dc85 386{
fa96cb38
PA
387 struct thread_info *thr = get_lwp_thread (lwp);
388
c058728c 389 threads_debug_printf ("deleting %ld", lwpid_of (thr));
fa96cb38
PA
390
391 remove_thread (thr);
466eecee 392
fd000fb3 393 low_delete_thread (lwp->arch_private);
466eecee 394
013e3554 395 delete lwp;
bd99dc85
PA
396}
397
fd000fb3
TBA
398void
399linux_process_target::low_delete_thread (arch_lwp_info *info)
400{
401 /* Default implementation should be overridden if architecture-specific
402 info is being used. */
403 gdb_assert (info == nullptr);
404}
95954743 405
421490af
PA
406/* Open the /proc/PID/mem file for PROC. */
407
408static void
409open_proc_mem_file (process_info *proc)
410{
411 gdb_assert (proc->priv->mem_fd == -1);
412
413 char filename[64];
414 xsnprintf (filename, sizeof filename, "/proc/%d/mem", proc->pid);
415
416 proc->priv->mem_fd
417 = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
418}
419
fd000fb3 420process_info *
421490af 421linux_process_target::add_linux_process_no_mem_file (int pid, int attached)
95954743
PA
422{
423 struct process_info *proc;
424
95954743 425 proc = add_process (pid, attached);
8d749320 426 proc->priv = XCNEW (struct process_info_private);
95954743 427
fd000fb3 428 proc->priv->arch_private = low_new_process ();
421490af
PA
429 proc->priv->mem_fd = -1;
430
431 return proc;
432}
433
aa5ca48f 434
421490af
PA
435process_info *
436linux_process_target::add_linux_process (int pid, int attached)
437{
438 process_info *proc = add_linux_process_no_mem_file (pid, attached);
439 open_proc_mem_file (proc);
95954743
PA
440 return proc;
441}
442
f551c8ef
SM
443void
444linux_process_target::remove_linux_process (process_info *proc)
445{
446 if (proc->priv->mem_fd >= 0)
447 close (proc->priv->mem_fd);
448
449 this->low_delete_process (proc->priv->arch_private);
450
451 xfree (proc->priv);
452 proc->priv = nullptr;
453
454 remove_process (proc);
455}
456
fd000fb3
TBA
457arch_process_info *
458linux_process_target::low_new_process ()
459{
460 return nullptr;
461}
462
463void
464linux_process_target::low_delete_process (arch_process_info *info)
465{
466 /* Default implementation must be overridden if architecture-specific
467 info exists. */
468 gdb_assert (info == nullptr);
469}
470
471void
472linux_process_target::low_new_fork (process_info *parent, process_info *child)
473{
474 /* Nop. */
475}
476
797bcff5
TBA
477void
478linux_process_target::arch_setup_thread (thread_info *thread)
94585166 479{
24583e45
TBA
480 scoped_restore_current_thread restore_thread;
481 switch_to_thread (thread);
94585166 482
797bcff5 483 low_arch_setup ();
94585166
DB
484}
485
d16f3f6c
TBA
486int
487linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
488 int wstat)
24a09b5f 489{
c12a5089 490 client_state &cs = get_client_state ();
94585166 491 struct lwp_info *event_lwp = *orig_event_lwp;
89a5711c 492 int event = linux_ptrace_get_extended_event (wstat);
de0d863e 493 struct thread_info *event_thr = get_lwp_thread (event_lwp);
24a09b5f 494
183be222 495 gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
65706a29 496
82075af2
JS
497 /* All extended events we currently use are mid-syscall. Only
498 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
499 you have to be using PTRACE_SEIZE to get that. */
500 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
501
c269dbdb
DB
502 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
503 || (event == PTRACE_EVENT_CLONE))
24a09b5f
DJ
504 {
505 unsigned long new_pid;
05044653 506 int ret, status;
24a09b5f 507
de0d863e 508 /* Get the pid of the new lwp. */
d86d4aaf 509 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 510 &new_pid);
24a09b5f
DJ
511
512 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 513 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
514 {
515 /* The new child has a pending SIGSTOP. We can't affect it until it
516 hits the SIGSTOP, but we're already attached. */
517
97438e3f 518 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
519
520 if (ret == -1)
521 perror_with_name ("waiting for new child");
522 else if (ret != new_pid)
523 warning ("wait returned unexpected PID %d", ret);
da5898ce 524 else if (!WIFSTOPPED (status))
24a09b5f
DJ
525 warning ("wait returned unexpected status 0x%x", status);
526 }
527
393a6b59 528 if (debug_threads)
de0d863e 529 {
393a6b59
PA
530 debug_printf ("HEW: Got %s event from LWP %ld, new child is %ld\n",
531 (event == PTRACE_EVENT_FORK ? "fork"
532 : event == PTRACE_EVENT_VFORK ? "vfork"
533 : event == PTRACE_EVENT_CLONE ? "clone"
534 : "???"),
535 ptid_of (event_thr).lwp (),
536 new_pid);
537 }
538
539 ptid_t child_ptid = (event != PTRACE_EVENT_CLONE
540 ? ptid_t (new_pid, new_pid)
541 : ptid_t (ptid_of (event_thr).pid (), new_pid));
de0d863e 542
393a6b59
PA
543 lwp_info *child_lwp = add_lwp (child_ptid);
544 gdb_assert (child_lwp != NULL);
545 child_lwp->stopped = 1;
546 if (event != PTRACE_EVENT_CLONE)
547 child_lwp->must_set_ptrace_flags = 1;
548 child_lwp->status_pending_p = 0;
de0d863e 549
393a6b59 550 thread_info *child_thr = get_lwp_thread (child_lwp);
de0d863e 551
393a6b59
PA
552 /* If we're suspending all threads, leave this one suspended
553 too. If the fork/clone parent is stepping over a breakpoint,
554 all other threads have been suspended already. Leave the
555 child suspended too. */
556 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
557 || event_lwp->bp_reinsert != 0)
558 {
559 threads_debug_printf ("leaving child suspended");
560 child_lwp->suspended = 1;
561 }
562
563 if (event_lwp->bp_reinsert != 0
564 && supports_software_single_step ()
565 && event == PTRACE_EVENT_VFORK)
566 {
567 /* If we leave single-step breakpoints there, child will
568 hit it, so uninsert single-step breakpoints from parent
569 (and child). Once vfork child is done, reinsert
570 them back to parent. */
571 uninsert_single_step_breakpoints (event_thr);
572 }
573
574 if (event != PTRACE_EVENT_CLONE)
575 {
de0d863e
DB
576 /* Add the new process to the tables and clone the breakpoint
577 lists of the parent. We need to do this even if the new process
578 will be detached, since we will need the process object and the
579 breakpoints to remove any breakpoints from memory when we
580 detach, and the client side will access registers. */
393a6b59 581 process_info *child_proc = add_linux_process (new_pid, 0);
de0d863e 582 gdb_assert (child_proc != NULL);
863d01bd 583
393a6b59 584 process_info *parent_proc = get_thread_process (event_thr);
de0d863e 585 child_proc->attached = parent_proc->attached;
2e7b624b 586
63c40ec7 587 clone_all_breakpoints (child_thr, event_thr);
de0d863e 588
51a948fd
AB
589 target_desc_up tdesc = allocate_target_description ();
590 copy_target_description (tdesc.get (), parent_proc->tdesc);
591 child_proc->tdesc = tdesc.release ();
de0d863e 592
3a8a0396 593 /* Clone arch-specific process data. */
fd000fb3 594 low_new_fork (parent_proc, child_proc);
393a6b59 595 }
3a8a0396 596
393a6b59
PA
597 /* Save fork/clone info in the parent thread. */
598 if (event == PTRACE_EVENT_FORK)
599 event_lwp->waitstatus.set_forked (child_ptid);
600 else if (event == PTRACE_EVENT_VFORK)
601 event_lwp->waitstatus.set_vforked (child_ptid);
602 else if (event == PTRACE_EVENT_CLONE
603 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
604 event_lwp->waitstatus.set_thread_cloned (child_ptid);
605
606 if (event != PTRACE_EVENT_CLONE
607 || (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
608 {
de0d863e 609 /* The status_pending field contains bits denoting the
393a6b59
PA
610 extended event, so when the pending event is handled, the
611 handler will look at lwp->waitstatus. */
de0d863e
DB
612 event_lwp->status_pending_p = 1;
613 event_lwp->status_pending = wstat;
614
393a6b59
PA
615 /* Link the threads until the parent's event is passed on to
616 GDB. */
617 event_lwp->relative = child_lwp;
618 child_lwp->relative = event_lwp;
de0d863e
DB
619 }
620
393a6b59
PA
621 /* If the parent thread is doing step-over with single-step
622 breakpoints, the list of single-step breakpoints are cloned
623 from the parent's. Remove them from the child process.
624 In case of vfork, we'll reinsert them back once vforked
625 child is done. */
626 if (event_lwp->bp_reinsert != 0
627 && supports_software_single_step ())
628 {
629 /* The child process is forked and stopped, so it is safe
630 to access its memory without stopping all other threads
631 from other processes. */
632 delete_single_step_breakpoints (child_thr);
e27d73f6 633
393a6b59
PA
634 gdb_assert (has_single_step_breakpoints (event_thr));
635 gdb_assert (!has_single_step_breakpoints (child_thr));
636 }
bde24c0a 637
da5898ce
DJ
638 /* Normally we will get the pending SIGSTOP. But in some cases
639 we might get another signal delivered to the group first.
f21cc1a2 640 If we do get another signal, be sure not to lose it. */
20ba1ce6 641 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 642 {
393a6b59
PA
643 child_lwp->stop_expected = 1;
644 child_lwp->status_pending_p = 1;
645 child_lwp->status_pending = status;
da5898ce 646 }
393a6b59 647 else if (event == PTRACE_EVENT_CLONE && cs.report_thread_events)
65706a29 648 {
393a6b59
PA
649 child_lwp->waitstatus.set_thread_created ();
650 child_lwp->status_pending_p = 1;
651 child_lwp->status_pending = status;
65706a29 652 }
de0d863e 653
393a6b59
PA
654 if (event == PTRACE_EVENT_CLONE)
655 {
a0aad537 656#ifdef USE_THREAD_DB
393a6b59 657 thread_db_notice_clone (event_thr, child_ptid);
a0aad537 658#endif
393a6b59 659 }
86299109 660
393a6b59
PA
661 if (event == PTRACE_EVENT_CLONE
662 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) == 0)
663 {
664 threads_debug_printf
665 ("not reporting clone event from LWP %ld, new child is %ld\n",
666 ptid_of (event_thr).lwp (),
667 new_pid);
668 return 1;
669 }
670
671 /* Leave the child stopped until GDB processes the parent
672 event. */
673 child_thr->last_resume_kind = resume_stop;
674 child_thr->last_status.set_stopped (GDB_SIGNAL_0);
675
676 /* Report the event. */
677 threads_debug_printf
678 ("reporting %s event from LWP %ld, new child is %ld\n",
679 (event == PTRACE_EVENT_FORK ? "fork"
680 : event == PTRACE_EVENT_VFORK ? "vfork"
681 : event == PTRACE_EVENT_CLONE ? "clone"
682 : "???"),
683 ptid_of (event_thr).lwp (),
684 new_pid);
685 return 0;
24a09b5f 686 }
c269dbdb
DB
687 else if (event == PTRACE_EVENT_VFORK_DONE)
688 {
183be222 689 event_lwp->waitstatus.set_vfork_done ();
c269dbdb 690
7582c77c 691 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
2e7b624b 692 {
3b9a79ef 693 reinsert_single_step_breakpoints (event_thr);
2e7b624b 694
3b9a79ef 695 gdb_assert (has_single_step_breakpoints (event_thr));
2e7b624b
YQ
696 }
697
c269dbdb
DB
698 /* Report the event. */
699 return 0;
700 }
c12a5089 701 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
94585166
DB
702 {
703 struct process_info *proc;
f27866ba 704 std::vector<int> syscalls_to_catch;
94585166
DB
705 ptid_t event_ptid;
706 pid_t event_pid;
707
c058728c
SM
708 threads_debug_printf ("Got exec event from LWP %ld",
709 lwpid_of (event_thr));
94585166
DB
710
711 /* Get the event ptid. */
712 event_ptid = ptid_of (event_thr);
e99b03dc 713 event_pid = event_ptid.pid ();
94585166 714
82075af2 715 /* Save the syscall list from the execing process. */
94585166 716 proc = get_thread_process (event_thr);
f27866ba 717 syscalls_to_catch = std::move (proc->syscalls_to_catch);
82075af2
JS
718
719 /* Delete the execing process and all its threads. */
d16f3f6c 720 mourn (proc);
24583e45 721 switch_to_thread (nullptr);
94585166
DB
722
723 /* Create a new process/lwp/thread. */
fd000fb3 724 proc = add_linux_process (event_pid, 0);
94585166
DB
725 event_lwp = add_lwp (event_ptid);
726 event_thr = get_lwp_thread (event_lwp);
727 gdb_assert (current_thread == event_thr);
797bcff5 728 arch_setup_thread (event_thr);
94585166
DB
729
730 /* Set the event status. */
183be222
SM
731 event_lwp->waitstatus.set_execd
732 (make_unique_xstrdup
733 (linux_proc_pid_to_exec_file (lwpid_of (event_thr))));
94585166
DB
734
735 /* Mark the exec status as pending. */
736 event_lwp->stopped = 1;
737 event_lwp->status_pending_p = 1;
738 event_lwp->status_pending = wstat;
739 event_thr->last_resume_kind = resume_continue;
183be222 740 event_thr->last_status.set_ignore ();
94585166 741
82075af2
JS
742 /* Update syscall state in the new lwp, effectively mid-syscall too. */
743 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
744
745 /* Restore the list to catch. Don't rely on the client, which is free
746 to avoid sending a new list when the architecture doesn't change.
747 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
f27866ba 748 proc->syscalls_to_catch = std::move (syscalls_to_catch);
82075af2 749
94585166
DB
750 /* Report the event. */
751 *orig_event_lwp = event_lwp;
752 return 0;
753 }
de0d863e 754
f34652de 755 internal_error (_("unknown ptrace event %d"), event);
24a09b5f
DJ
756}
757
df95181f
TBA
758CORE_ADDR
759linux_process_target::get_pc (lwp_info *lwp)
d50171e4 760{
a9deee17
PA
761 process_info *proc = get_thread_process (get_lwp_thread (lwp));
762 gdb_assert (!proc->starting_up);
d50171e4 763
bf9ae9d8 764 if (!low_supports_breakpoints ())
d50171e4
PA
765 return 0;
766
24583e45
TBA
767 scoped_restore_current_thread restore_thread;
768 switch_to_thread (get_lwp_thread (lwp));
d50171e4 769
a9deee17
PA
770 struct regcache *regcache = get_thread_regcache (current_thread, 1);
771 CORE_ADDR pc = low_get_pc (regcache);
d50171e4 772
c058728c 773 threads_debug_printf ("pc is 0x%lx", (long) pc);
d50171e4 774
d50171e4
PA
775 return pc;
776}
777
9eedd27d
TBA
778void
779linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
82075af2 780{
82075af2
JS
781 struct regcache *regcache;
782
24583e45
TBA
783 scoped_restore_current_thread restore_thread;
784 switch_to_thread (get_lwp_thread (lwp));
82075af2
JS
785
786 regcache = get_thread_regcache (current_thread, 1);
9eedd27d 787 low_get_syscall_trapinfo (regcache, sysno);
82075af2 788
c058728c 789 threads_debug_printf ("get_syscall_trapinfo sysno %d", *sysno);
82075af2
JS
790}
791
9eedd27d
TBA
792void
793linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
794{
795 /* By default, report an unknown system call number. */
796 *sysno = UNKNOWN_SYSCALL;
797}
798
df95181f
TBA
799bool
800linux_process_target::save_stop_reason (lwp_info *lwp)
0d62e5e8 801{
582511be
PA
802 CORE_ADDR pc;
803 CORE_ADDR sw_breakpoint_pc;
3e572f71
PA
804#if USE_SIGTRAP_SIGINFO
805 siginfo_t siginfo;
806#endif
d50171e4 807
bf9ae9d8 808 if (!low_supports_breakpoints ())
df95181f 809 return false;
0d62e5e8 810
a9deee17
PA
811 process_info *proc = get_thread_process (get_lwp_thread (lwp));
812 if (proc->starting_up)
813 {
814 /* Claim we have the stop PC so that the caller doesn't try to
815 fetch it itself. */
816 return true;
817 }
818
582511be 819 pc = get_pc (lwp);
d4807ea2 820 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
d50171e4 821
582511be 822 /* breakpoint_at reads from the current thread. */
24583e45
TBA
823 scoped_restore_current_thread restore_thread;
824 switch_to_thread (get_lwp_thread (lwp));
47c0c975 825
3e572f71
PA
826#if USE_SIGTRAP_SIGINFO
827 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
828 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
829 {
830 if (siginfo.si_signo == SIGTRAP)
831 {
e7ad2f14
PA
832 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
833 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 834 {
e7ad2f14
PA
835 /* The si_code is ambiguous on this arch -- check debug
836 registers. */
837 if (!check_stopped_by_watchpoint (lwp))
838 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
839 }
840 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
841 {
842 /* If we determine the LWP stopped for a SW breakpoint,
843 trust it. Particularly don't check watchpoint
844 registers, because at least on s390, we'd find
845 stopped-by-watchpoint as long as there's a watchpoint
846 set. */
3e572f71 847 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
3e572f71 848 }
e7ad2f14 849 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 850 {
e7ad2f14
PA
851 /* This can indicate either a hardware breakpoint or
852 hardware watchpoint. Check debug registers. */
853 if (!check_stopped_by_watchpoint (lwp))
854 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
3e572f71 855 }
2bf6fb9d
PA
856 else if (siginfo.si_code == TRAP_TRACE)
857 {
e7ad2f14
PA
858 /* We may have single stepped an instruction that
859 triggered a watchpoint. In that case, on some
860 architectures (such as x86), instead of TRAP_HWBKPT,
861 si_code indicates TRAP_TRACE, and we need to check
862 the debug registers separately. */
863 if (!check_stopped_by_watchpoint (lwp))
864 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
2bf6fb9d 865 }
3e572f71
PA
866 }
867 }
868#else
582511be
PA
869 /* We may have just stepped a breakpoint instruction. E.g., in
870 non-stop mode, GDB first tells the thread A to step a range, and
871 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
872 case we need to report the breakpoint PC. */
873 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
d7146cda 874 && low_breakpoint_at (sw_breakpoint_pc))
e7ad2f14
PA
875 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
876
877 if (hardware_breakpoint_inserted_here (pc))
878 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
879
880 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
881 check_stopped_by_watchpoint (lwp);
882#endif
883
884 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
582511be 885 {
c058728c
SM
886 threads_debug_printf
887 ("%s stopped by software breakpoint",
888 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
582511be
PA
889
890 /* Back up the PC if necessary. */
891 if (pc != sw_breakpoint_pc)
e7ad2f14 892 {
582511be
PA
893 struct regcache *regcache
894 = get_thread_regcache (current_thread, 1);
bf9ae9d8 895 low_set_pc (regcache, sw_breakpoint_pc);
582511be
PA
896 }
897
e7ad2f14
PA
898 /* Update this so we record the correct stop PC below. */
899 pc = sw_breakpoint_pc;
582511be 900 }
e7ad2f14 901 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
c058728c
SM
902 threads_debug_printf
903 ("%s stopped by hardware breakpoint",
904 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14 905 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
c058728c
SM
906 threads_debug_printf
907 ("%s stopped by hardware watchpoint",
908 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14 909 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
c058728c
SM
910 threads_debug_printf
911 ("%s stopped by trace",
912 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14
PA
913
914 lwp->stop_pc = pc;
df95181f 915 return true;
0d62e5e8 916}
ce3a066d 917
fd000fb3
TBA
918lwp_info *
919linux_process_target::add_lwp (ptid_t ptid)
611cb4a5 920{
c360a473 921 lwp_info *lwp = new lwp_info;
0d62e5e8 922
754e3168
AH
923 lwp->thread = add_thread (ptid, lwp);
924
fd000fb3 925 low_new_thread (lwp);
aa5ca48f 926
54a0b537 927 return lwp;
0d62e5e8 928}
611cb4a5 929
fd000fb3
TBA
930void
931linux_process_target::low_new_thread (lwp_info *info)
932{
933 /* Nop. */
934}
935
2090129c
SDJ
936/* Callback to be used when calling fork_inferior, responsible for
937 actually initiating the tracing of the inferior. */
938
939static void
940linux_ptrace_fun ()
941{
942 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
943 (PTRACE_TYPE_ARG4) 0) < 0)
50fa3001 944 trace_start_error_with_name ("ptrace");
2090129c
SDJ
945
946 if (setpgid (0, 0) < 0)
947 trace_start_error_with_name ("setpgid");
948
949 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
950 stdout to stderr so that inferior i/o doesn't corrupt the connection.
951 Also, redirect stdin to /dev/null. */
952 if (remote_connection_is_stdio ())
953 {
954 if (close (0) < 0)
955 trace_start_error_with_name ("close");
956 if (open ("/dev/null", O_RDONLY) < 0)
957 trace_start_error_with_name ("open");
958 if (dup2 (2, 1) < 0)
959 trace_start_error_with_name ("dup2");
960 if (write (2, "stdin/stdout redirected\n",
961 sizeof ("stdin/stdout redirected\n") - 1) < 0)
962 {
963 /* Errors ignored. */;
964 }
965 }
966}
967
da6d8c04 968/* Start an inferior process and returns its pid.
2090129c
SDJ
969 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
970 are its arguments. */
da6d8c04 971
15295543
TBA
972int
973linux_process_target::create_inferior (const char *program,
974 const std::vector<char *> &program_args)
da6d8c04 975{
c12a5089 976 client_state &cs = get_client_state ();
a6dbe5df 977 struct lwp_info *new_lwp;
da6d8c04 978 int pid;
95954743 979 ptid_t ptid;
03583c20 980
41272101
TT
981 {
982 maybe_disable_address_space_randomization restore_personality
c12a5089 983 (cs.disable_randomization);
bea571eb 984 std::string str_program_args = construct_inferior_arguments (program_args);
41272101
TT
985
986 pid = fork_inferior (program,
987 str_program_args.c_str (),
988 get_environ ()->envp (), linux_ptrace_fun,
989 NULL, NULL, NULL, NULL);
990 }
03583c20 991
421490af
PA
992 /* When spawning a new process, we can't open the mem file yet. We
993 still have to nurse the process through the shell, and that execs
994 a couple times. The address space a /proc/PID/mem file is
995 accessing is destroyed on exec. */
996 process_info *proc = add_linux_process_no_mem_file (pid, 0);
95954743 997
184ea2f7 998 ptid = ptid_t (pid, pid);
95954743 999 new_lwp = add_lwp (ptid);
a6dbe5df 1000 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 1001
2090129c
SDJ
1002 post_fork_inferior (pid, program);
1003
421490af
PA
1004 /* PROC is now past the shell running the program we want, so we can
1005 open the /proc/PID/mem file. */
1006 open_proc_mem_file (proc);
1007
a9fa9f7d 1008 return pid;
da6d8c04
DJ
1009}
1010
ece66d65
JS
1011/* Implement the post_create_inferior target_ops method. */
1012
6dee9afb
TBA
1013void
1014linux_process_target::post_create_inferior ()
ece66d65
JS
1015{
1016 struct lwp_info *lwp = get_thread_lwp (current_thread);
1017
797bcff5 1018 low_arch_setup ();
ece66d65
JS
1019
1020 if (lwp->must_set_ptrace_flags)
1021 {
1022 struct process_info *proc = current_process ();
1023 int options = linux_low_ptrace_options (proc->attached);
1024
1025 linux_enable_event_reporting (lwpid_of (current_thread), options);
1026 lwp->must_set_ptrace_flags = 0;
1027 }
1028}
1029
7ae1a6a6 1030int
fd000fb3 1031linux_process_target::attach_lwp (ptid_t ptid)
da6d8c04 1032{
54a0b537 1033 struct lwp_info *new_lwp;
e38504b3 1034 int lwpid = ptid.lwp ();
611cb4a5 1035
b8e1b30e 1036 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 1037 != 0)
7ae1a6a6 1038 return errno;
24a09b5f 1039
b3312d80 1040 new_lwp = add_lwp (ptid);
0d62e5e8 1041
a6dbe5df
PA
1042 /* We need to wait for SIGSTOP before being able to make the next
1043 ptrace call on this LWP. */
1044 new_lwp->must_set_ptrace_flags = 1;
1045
644cebc9 1046 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2 1047 {
c058728c 1048 threads_debug_printf ("Attached to a stopped process");
c14d7ab2
PA
1049
1050 /* The process is definitely stopped. It is in a job control
1051 stop, unless the kernel predates the TASK_STOPPED /
1052 TASK_TRACED distinction, in which case it might be in a
1053 ptrace stop. Make sure it is in a ptrace stop; from there we
1054 can kill it, signal it, et cetera.
1055
1056 First make sure there is a pending SIGSTOP. Since we are
1057 already attached, the process can not transition from stopped
1058 to running without a PTRACE_CONT; so we know this signal will
1059 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1060 probably already in the queue (unless this kernel is old
1061 enough to use TASK_STOPPED for ptrace stops); but since
1062 SIGSTOP is not an RT signal, it can only be queued once. */
1063 kill_lwp (lwpid, SIGSTOP);
1064
1065 /* Finally, resume the stopped process. This will deliver the
1066 SIGSTOP (or a higher priority signal, just like normal
1067 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 1068 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
1069 }
1070
0d62e5e8 1071 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
1072 brings it to a halt.
1073
1074 There are several cases to consider here:
1075
1076 1) gdbserver has already attached to the process and is being notified
1b3f6016 1077 of a new thread that is being created.
d50171e4
PA
1078 In this case we should ignore that SIGSTOP and resume the
1079 process. This is handled below by setting stop_expected = 1,
8336d594 1080 and the fact that add_thread sets last_resume_kind ==
d50171e4 1081 resume_continue.
0e21c1ec
DE
1082
1083 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
1084 to it via attach_inferior.
1085 In this case we want the process thread to stop.
d50171e4
PA
1086 This is handled by having linux_attach set last_resume_kind ==
1087 resume_stop after we return.
e3deef73
LM
1088
1089 If the pid we are attaching to is also the tgid, we attach to and
1090 stop all the existing threads. Otherwise, we attach to pid and
1091 ignore any other threads in the same group as this pid.
0e21c1ec
DE
1092
1093 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
1094 existing threads.
1095 In this case we want the thread to stop.
1096 FIXME: This case is currently not properly handled.
1097 We should wait for the SIGSTOP but don't. Things work apparently
1098 because enough time passes between when we ptrace (ATTACH) and when
1099 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
1100
1101 On the other hand, if we are currently trying to stop all threads, we
1102 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 1103 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
1104 end of the list, and so the new thread has not yet reached
1105 wait_for_sigstop (but will). */
d50171e4 1106 new_lwp->stop_expected = 1;
0d62e5e8 1107
7ae1a6a6 1108 return 0;
95954743
PA
1109}
1110
8784d563
PA
1111/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1112 already attached. Returns true if a new LWP is found, false
1113 otherwise. */
1114
1115static int
1116attach_proc_task_lwp_callback (ptid_t ptid)
1117{
1118 /* Is this a new thread? */
1119 if (find_thread_ptid (ptid) == NULL)
1120 {
e38504b3 1121 int lwpid = ptid.lwp ();
8784d563
PA
1122 int err;
1123
c058728c 1124 threads_debug_printf ("Found new lwp %d", lwpid);
8784d563 1125
fd000fb3 1126 err = the_linux_target->attach_lwp (ptid);
8784d563
PA
1127
1128 /* Be quiet if we simply raced with the thread exiting. EPERM
1129 is returned if the thread's task still exists, and is marked
1130 as exited or zombie, as well as other conditions, so in that
1131 case, confirm the status in /proc/PID/status. */
1132 if (err == ESRCH
1133 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
c058728c
SM
1134 threads_debug_printf
1135 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1136 lwpid, err, safe_strerror (err));
8784d563
PA
1137 else if (err != 0)
1138 {
4d9b86e1 1139 std::string reason
50fa3001 1140 = linux_ptrace_attach_fail_reason_string (ptid, err);
4d9b86e1
SM
1141
1142 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
8784d563
PA
1143 }
1144
1145 return 1;
1146 }
1147 return 0;
1148}
1149
500c1d85
PA
1150static void async_file_mark (void);
1151
e3deef73
LM
1152/* Attach to PID. If PID is the tgid, attach to it and all
1153 of its threads. */
1154
ef03dad8
TBA
1155int
1156linux_process_target::attach (unsigned long pid)
0d62e5e8 1157{
500c1d85
PA
1158 struct process_info *proc;
1159 struct thread_info *initial_thread;
184ea2f7 1160 ptid_t ptid = ptid_t (pid, pid);
7ae1a6a6
PA
1161 int err;
1162
421490af
PA
1163 /* Delay opening the /proc/PID/mem file until we've successfully
1164 attached. */
1165 proc = add_linux_process_no_mem_file (pid, 1);
df0da8a2 1166
e3deef73
LM
1167 /* Attach to PID. We will check for other threads
1168 soon. */
fd000fb3 1169 err = attach_lwp (ptid);
7ae1a6a6 1170 if (err != 0)
4d9b86e1 1171 {
f551c8ef 1172 this->remove_linux_process (proc);
4d9b86e1 1173
50fa3001
SDJ
1174 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1175 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
4d9b86e1 1176 }
7ae1a6a6 1177
421490af
PA
1178 open_proc_mem_file (proc);
1179
500c1d85
PA
1180 /* Don't ignore the initial SIGSTOP if we just attached to this
1181 process. It will be collected by wait shortly. */
184ea2f7 1182 initial_thread = find_thread_ptid (ptid_t (pid, pid));
59487af3 1183 gdb_assert (initial_thread != nullptr);
500c1d85 1184 initial_thread->last_resume_kind = resume_stop;
0d62e5e8 1185
8784d563
PA
1186 /* We must attach to every LWP. If /proc is mounted, use that to
1187 find them now. On the one hand, the inferior may be using raw
1188 clone instead of using pthreads. On the other hand, even if it
1189 is using pthreads, GDB may not be connected yet (thread_db needs
1190 to do symbol lookups, through qSymbol). Also, thread_db walks
1191 structures in the inferior's address space to find the list of
1192 threads/LWPs, and those structures may well be corrupted. Note
1193 that once thread_db is loaded, we'll still use it to list threads
1194 and associate pthread info with each LWP. */
1195 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
500c1d85
PA
1196
1197 /* GDB will shortly read the xml target description for this
1198 process, to figure out the process' architecture. But the target
1199 description is only filled in when the first process/thread in
1200 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1201 that now, otherwise, if GDB is fast enough, it could read the
1202 target description _before_ that initial stop. */
1203 if (non_stop)
1204 {
1205 struct lwp_info *lwp;
1206 int wstat, lwpid;
f2907e49 1207 ptid_t pid_ptid = ptid_t (pid);
500c1d85 1208
d16f3f6c 1209 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
500c1d85
PA
1210 gdb_assert (lwpid > 0);
1211
f2907e49 1212 lwp = find_lwp_pid (ptid_t (lwpid));
59487af3 1213 gdb_assert (lwp != nullptr);
500c1d85
PA
1214
1215 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1216 {
1217 lwp->status_pending_p = 1;
1218 lwp->status_pending = wstat;
1219 }
1220
1221 initial_thread->last_resume_kind = resume_continue;
1222
1223 async_file_mark ();
1224
1225 gdb_assert (proc->tdesc != NULL);
1226 }
1227
95954743
PA
1228 return 0;
1229}
1230
95954743 1231static int
e4eb0dec 1232last_thread_of_process_p (int pid)
95954743 1233{
e4eb0dec 1234 bool seen_one = false;
95954743 1235
da4ae14a 1236 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
95954743 1237 {
e4eb0dec
SM
1238 if (!seen_one)
1239 {
1240 /* This is the first thread of this process we see. */
1241 seen_one = true;
1242 return false;
1243 }
1244 else
1245 {
1246 /* This is the second thread of this process we see. */
1247 return true;
1248 }
1249 });
da6d8c04 1250
e4eb0dec 1251 return thread == NULL;
95954743
PA
1252}
1253
da84f473
PA
1254/* Kill LWP. */
1255
1256static void
1257linux_kill_one_lwp (struct lwp_info *lwp)
1258{
d86d4aaf
DE
1259 struct thread_info *thr = get_lwp_thread (lwp);
1260 int pid = lwpid_of (thr);
da84f473
PA
1261
1262 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1263 there is no signal context, and ptrace(PTRACE_KILL) (or
1264 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1265 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1266 alternative is to kill with SIGKILL. We only need one SIGKILL
1267 per process, not one for each thread. But since we still support
4a6ed09b
PA
1268 support debugging programs using raw clone without CLONE_THREAD,
1269 we send one for each thread. For years, we used PTRACE_KILL
1270 only, so we're being a bit paranoid about some old kernels where
1271 PTRACE_KILL might work better (dubious if there are any such, but
1272 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1273 second, and so we're fine everywhere. */
da84f473
PA
1274
1275 errno = 0;
69ff6be5 1276 kill_lwp (pid, SIGKILL);
da84f473 1277 if (debug_threads)
ce9e3fe7
PA
1278 {
1279 int save_errno = errno;
1280
c058728c
SM
1281 threads_debug_printf ("kill_lwp (SIGKILL) %s, 0, 0 (%s)",
1282 target_pid_to_str (ptid_of (thr)).c_str (),
1283 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1284 }
da84f473
PA
1285
1286 errno = 0;
b8e1b30e 1287 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 1288 if (debug_threads)
ce9e3fe7
PA
1289 {
1290 int save_errno = errno;
1291
c058728c
SM
1292 threads_debug_printf ("PTRACE_KILL %s, 0, 0 (%s)",
1293 target_pid_to_str (ptid_of (thr)).c_str (),
1294 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1295 }
da84f473
PA
1296}
1297
e76126e8
PA
1298/* Kill LWP and wait for it to die. */
1299
1300static void
1301kill_wait_lwp (struct lwp_info *lwp)
1302{
1303 struct thread_info *thr = get_lwp_thread (lwp);
e99b03dc 1304 int pid = ptid_of (thr).pid ();
e38504b3 1305 int lwpid = ptid_of (thr).lwp ();
e76126e8
PA
1306 int wstat;
1307 int res;
1308
c058728c 1309 threads_debug_printf ("killing lwp %d, for pid: %d", lwpid, pid);
e76126e8
PA
1310
1311 do
1312 {
1313 linux_kill_one_lwp (lwp);
1314
1315 /* Make sure it died. Notes:
1316
1317 - The loop is most likely unnecessary.
1318
d16f3f6c 1319 - We don't use wait_for_event as that could delete lwps
e76126e8
PA
1320 while we're iterating over them. We're not interested in
1321 any pending status at this point, only in making sure all
1322 wait status on the kernel side are collected until the
1323 process is reaped.
1324
1325 - We don't use __WALL here as the __WALL emulation relies on
1326 SIGCHLD, and killing a stopped process doesn't generate
1327 one, nor an exit status.
1328 */
1329 res = my_waitpid (lwpid, &wstat, 0);
1330 if (res == -1 && errno == ECHILD)
1331 res = my_waitpid (lwpid, &wstat, __WCLONE);
1332 } while (res > 0 && WIFSTOPPED (wstat));
1333
586b02a9
PA
1334 /* Even if it was stopped, the child may have already disappeared.
1335 E.g., if it was killed by SIGKILL. */
1336 if (res < 0 && errno != ECHILD)
1337 perror_with_name ("kill_wait_lwp");
e76126e8
PA
1338}
1339
578290ec 1340/* Callback for `for_each_thread'. Kills an lwp of a given process,
da84f473 1341 except the leader. */
95954743 1342
578290ec
SM
1343static void
1344kill_one_lwp_callback (thread_info *thread, int pid)
da6d8c04 1345{
54a0b537 1346 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 1347
fd500816
DJ
1348 /* We avoid killing the first thread here, because of a Linux kernel (at
1349 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1350 the children get a chance to be reaped, it will remain a zombie
1351 forever. */
95954743 1352
d86d4aaf 1353 if (lwpid_of (thread) == pid)
95954743 1354 {
c058728c
SM
1355 threads_debug_printf ("is last of process %s",
1356 target_pid_to_str (thread->id).c_str ());
578290ec 1357 return;
95954743 1358 }
fd500816 1359
e76126e8 1360 kill_wait_lwp (lwp);
da6d8c04
DJ
1361}
1362
c6885a57
TBA
1363int
1364linux_process_target::kill (process_info *process)
0d62e5e8 1365{
a780ef4f 1366 int pid = process->pid;
9d606399 1367
f9e39928
PA
1368 /* If we're killing a running inferior, make sure it is stopped
1369 first, as PTRACE_KILL will not work otherwise. */
7984d532 1370 stop_all_lwps (0, NULL);
f9e39928 1371
578290ec
SM
1372 for_each_thread (pid, [&] (thread_info *thread)
1373 {
1374 kill_one_lwp_callback (thread, pid);
1375 });
fd500816 1376
54a0b537 1377 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1378 thread in the list, so do so now. */
a780ef4f 1379 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
bd99dc85 1380
784867a5 1381 if (lwp == NULL)
c058728c 1382 threads_debug_printf ("cannot find lwp for pid: %d", pid);
784867a5 1383 else
e76126e8 1384 kill_wait_lwp (lwp);
2d717e4f 1385
8adb37b9 1386 mourn (process);
f9e39928
PA
1387
1388 /* Since we presently can only stop all lwps of all processes, we
1389 need to unstop lwps of other processes. */
7984d532 1390 unstop_all_lwps (0, NULL);
95954743 1391 return 0;
0d62e5e8
DJ
1392}
1393
9b224c5e
PA
1394/* Get pending signal of THREAD, for detaching purposes. This is the
1395 signal the thread last stopped for, which we need to deliver to the
1396 thread when detaching, otherwise, it'd be suppressed/lost. */
1397
1398static int
1399get_detach_signal (struct thread_info *thread)
1400{
c12a5089 1401 client_state &cs = get_client_state ();
a493e3e2 1402 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1403 int status;
1404 struct lwp_info *lp = get_thread_lwp (thread);
1405
1406 if (lp->status_pending_p)
1407 status = lp->status_pending;
1408 else
1409 {
1410 /* If the thread had been suspended by gdbserver, and it stopped
1411 cleanly, then it'll have stopped with SIGSTOP. But we don't
1412 want to deliver that SIGSTOP. */
183be222
SM
1413 if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1414 || thread->last_status.sig () == GDB_SIGNAL_0)
9b224c5e
PA
1415 return 0;
1416
1417 /* Otherwise, we may need to deliver the signal we
1418 intercepted. */
1419 status = lp->last_status;
1420 }
1421
1422 if (!WIFSTOPPED (status))
1423 {
c058728c
SM
1424 threads_debug_printf ("lwp %s hasn't stopped: no pending signal",
1425 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e
PA
1426 return 0;
1427 }
1428
1429 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1430 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e 1431 {
c058728c
SM
1432 threads_debug_printf ("lwp %s had stopped with extended "
1433 "status: no pending signal",
1434 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e
PA
1435 return 0;
1436 }
1437
2ea28649 1438 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e 1439
c12a5089 1440 if (cs.program_signals_p && !cs.program_signals[signo])
9b224c5e 1441 {
c058728c
SM
1442 threads_debug_printf ("lwp %s had signal %s, but it is in nopass state",
1443 target_pid_to_str (ptid_of (thread)).c_str (),
1444 gdb_signal_to_string (signo));
9b224c5e
PA
1445 return 0;
1446 }
c12a5089 1447 else if (!cs.program_signals_p
9b224c5e
PA
1448 /* If we have no way to know which signals GDB does not
1449 want to have passed to the program, assume
1450 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1451 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e 1452 {
c058728c
SM
1453 threads_debug_printf ("lwp %s had signal %s, "
1454 "but we don't know if we should pass it. "
1455 "Default to not.",
1456 target_pid_to_str (ptid_of (thread)).c_str (),
1457 gdb_signal_to_string (signo));
9b224c5e
PA
1458 return 0;
1459 }
1460 else
1461 {
c058728c
SM
1462 threads_debug_printf ("lwp %s has pending signal %s: delivering it",
1463 target_pid_to_str (ptid_of (thread)).c_str (),
1464 gdb_signal_to_string (signo));
9b224c5e
PA
1465
1466 return WSTOPSIG (status);
1467 }
1468}
1469
fd000fb3
TBA
1470void
1471linux_process_target::detach_one_lwp (lwp_info *lwp)
6ad8ae5c 1472{
ced2dffb 1473 struct thread_info *thread = get_lwp_thread (lwp);
9b224c5e 1474 int sig;
ced2dffb 1475 int lwpid;
6ad8ae5c 1476
9b224c5e 1477 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1478 if (lwp->stop_expected)
ae13219e 1479 {
c058728c
SM
1480 threads_debug_printf ("Sending SIGCONT to %s",
1481 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e 1482
d86d4aaf 1483 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1484 lwp->stop_expected = 0;
ae13219e
DJ
1485 }
1486
9b224c5e
PA
1487 /* Pass on any pending signal for this thread. */
1488 sig = get_detach_signal (thread);
1489
ced2dffb
PA
1490 /* Preparing to resume may try to write registers, and fail if the
1491 lwp is zombie. If that happens, ignore the error. We'll handle
1492 it below, when detach fails with ESRCH. */
a70b8144 1493 try
ced2dffb
PA
1494 {
1495 /* Flush any pending changes to the process's registers. */
1496 regcache_invalidate_thread (thread);
1497
1498 /* Finally, let it resume. */
d7599cc0 1499 low_prepare_to_resume (lwp);
ced2dffb 1500 }
230d2906 1501 catch (const gdb_exception_error &ex)
ced2dffb
PA
1502 {
1503 if (!check_ptrace_stopped_lwp_gone (lwp))
eedc3f4f 1504 throw;
ced2dffb 1505 }
ced2dffb
PA
1506
1507 lwpid = lwpid_of (thread);
1508 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1509 (PTRACE_TYPE_ARG4) (long) sig) < 0)
ced2dffb
PA
1510 {
1511 int save_errno = errno;
1512
1513 /* We know the thread exists, so ESRCH must mean the lwp is
1514 zombie. This can happen if one of the already-detached
1515 threads exits the whole thread group. In that case we're
1516 still attached, and must reap the lwp. */
1517 if (save_errno == ESRCH)
1518 {
1519 int ret, status;
1520
1521 ret = my_waitpid (lwpid, &status, __WALL);
1522 if (ret == -1)
1523 {
1524 warning (_("Couldn't reap LWP %d while detaching: %s"),
6d91ce9a 1525 lwpid, safe_strerror (errno));
ced2dffb
PA
1526 }
1527 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1528 {
1529 warning (_("Reaping LWP %d while detaching "
1530 "returned unexpected status 0x%x"),
1531 lwpid, status);
1532 }
1533 }
1534 else
1535 {
1536 error (_("Can't detach %s: %s"),
61d7f128 1537 target_pid_to_str (ptid_of (thread)).c_str (),
6d91ce9a 1538 safe_strerror (save_errno));
ced2dffb
PA
1539 }
1540 }
c058728c
SM
1541 else
1542 threads_debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)",
1543 target_pid_to_str (ptid_of (thread)).c_str (),
1544 strsignal (sig));
bd99dc85
PA
1545
1546 delete_lwp (lwp);
ced2dffb
PA
1547}
1548
9061c9cf
TBA
1549int
1550linux_process_target::detach (process_info *process)
95954743 1551{
ced2dffb 1552 struct lwp_info *main_lwp;
95954743 1553
863d01bd
PA
1554 /* As there's a step over already in progress, let it finish first,
1555 otherwise nesting a stabilize_threads operation on top gets real
1556 messy. */
1557 complete_ongoing_step_over ();
1558
f9e39928 1559 /* Stop all threads before detaching. First, ptrace requires that
30baf67b 1560 the thread is stopped to successfully detach. Second, thread_db
f9e39928
PA
1561 may need to uninstall thread event breakpoints from memory, which
1562 only works with a stopped process anyway. */
7984d532 1563 stop_all_lwps (0, NULL);
f9e39928 1564
ca5c370d 1565#ifdef USE_THREAD_DB
8336d594 1566 thread_db_detach (process);
ca5c370d
PA
1567#endif
1568
fa593d66 1569 /* Stabilize threads (move out of jump pads). */
5c9eb2f2 1570 target_stabilize_threads ();
fa593d66 1571
ced2dffb
PA
1572 /* Detach from the clone lwps first. If the thread group exits just
1573 while we're detaching, we must reap the clone lwps before we're
1574 able to reap the leader. */
fd000fb3
TBA
1575 for_each_thread (process->pid, [this] (thread_info *thread)
1576 {
1577 /* We don't actually detach from the thread group leader just yet.
1578 If the thread group exits, we must reap the zombie clone lwps
1579 before we're able to reap the leader. */
1580 if (thread->id.pid () == thread->id.lwp ())
1581 return;
1582
1583 lwp_info *lwp = get_thread_lwp (thread);
1584 detach_one_lwp (lwp);
1585 });
ced2dffb 1586
ef2ddb33 1587 main_lwp = find_lwp_pid (ptid_t (process->pid));
59487af3 1588 gdb_assert (main_lwp != nullptr);
fd000fb3 1589 detach_one_lwp (main_lwp);
8336d594 1590
8adb37b9 1591 mourn (process);
f9e39928
PA
1592
1593 /* Since we presently can only stop all lwps of all processes, we
1594 need to unstop lwps of other processes. */
7984d532 1595 unstop_all_lwps (0, NULL);
f9e39928
PA
1596 return 0;
1597}
1598
1599/* Remove all LWPs that belong to process PROC from the lwp list. */
1600
8adb37b9
TBA
1601void
1602linux_process_target::mourn (process_info *process)
8336d594 1603{
8336d594
PA
1604#ifdef USE_THREAD_DB
1605 thread_db_mourn (process);
1606#endif
1607
fd000fb3 1608 for_each_thread (process->pid, [this] (thread_info *thread)
6b2a85da
SM
1609 {
1610 delete_lwp (get_thread_lwp (thread));
1611 });
f9e39928 1612
f551c8ef 1613 this->remove_linux_process (process);
8336d594
PA
1614}
1615
95a49a39
TBA
1616void
1617linux_process_target::join (int pid)
444d6139 1618{
444d6139
PA
1619 int status, ret;
1620
1621 do {
d105de22 1622 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1623 if (WIFEXITED (status) || WIFSIGNALED (status))
1624 break;
1625 } while (ret != -1 || errno != ECHILD);
1626}
1627
13d3d99b
TBA
1628/* Return true if the given thread is still alive. */
1629
1630bool
1631linux_process_target::thread_alive (ptid_t ptid)
0d62e5e8 1632{
95954743
PA
1633 struct lwp_info *lwp = find_lwp_pid (ptid);
1634
1635 /* We assume we always know if a thread exits. If a whole process
1636 exited but we still haven't been able to report it to GDB, we'll
1637 hold on to the last lwp of the dead process. */
1638 if (lwp != NULL)
00db26fa 1639 return !lwp_is_marked_dead (lwp);
0d62e5e8
DJ
1640 else
1641 return 0;
1642}
1643
df95181f
TBA
1644bool
1645linux_process_target::thread_still_has_status_pending (thread_info *thread)
582511be
PA
1646{
1647 struct lwp_info *lp = get_thread_lwp (thread);
1648
1649 if (!lp->status_pending_p)
1650 return 0;
1651
582511be 1652 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1653 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1654 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be 1655 {
582511be
PA
1656 CORE_ADDR pc;
1657 int discard = 0;
1658
1659 gdb_assert (lp->last_status != 0);
1660
1661 pc = get_pc (lp);
1662
24583e45
TBA
1663 scoped_restore_current_thread restore_thread;
1664 switch_to_thread (thread);
582511be
PA
1665
1666 if (pc != lp->stop_pc)
1667 {
c058728c
SM
1668 threads_debug_printf ("PC of %ld changed",
1669 lwpid_of (thread));
582511be
PA
1670 discard = 1;
1671 }
3e572f71
PA
1672
1673#if !USE_SIGTRAP_SIGINFO
15c66dd6 1674 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
d7146cda 1675 && !low_breakpoint_at (pc))
582511be 1676 {
c058728c
SM
1677 threads_debug_printf ("previous SW breakpoint of %ld gone",
1678 lwpid_of (thread));
582511be
PA
1679 discard = 1;
1680 }
15c66dd6 1681 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1682 && !hardware_breakpoint_inserted_here (pc))
1683 {
c058728c
SM
1684 threads_debug_printf ("previous HW breakpoint of %ld gone",
1685 lwpid_of (thread));
582511be
PA
1686 discard = 1;
1687 }
3e572f71 1688#endif
582511be 1689
582511be
PA
1690 if (discard)
1691 {
c058728c 1692 threads_debug_printf ("discarding pending breakpoint status");
582511be
PA
1693 lp->status_pending_p = 0;
1694 return 0;
1695 }
1696 }
1697
1698 return 1;
1699}
1700
a681f9c9
PA
1701/* Returns true if LWP is resumed from the client's perspective. */
1702
1703static int
1704lwp_resumed (struct lwp_info *lwp)
1705{
1706 struct thread_info *thread = get_lwp_thread (lwp);
1707
1708 if (thread->last_resume_kind != resume_stop)
1709 return 1;
1710
1711 /* Did gdb send us a `vCont;t', but we haven't reported the
1712 corresponding stop to gdb yet? If so, the thread is still
1713 resumed/running from gdb's perspective. */
1714 if (thread->last_resume_kind == resume_stop
183be222 1715 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
a681f9c9
PA
1716 return 1;
1717
1718 return 0;
1719}
1720
df95181f
TBA
1721bool
1722linux_process_target::status_pending_p_callback (thread_info *thread,
1723 ptid_t ptid)
0d62e5e8 1724{
582511be 1725 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1726
1727 /* Check if we're only interested in events from a specific process
afa8d396 1728 or a specific LWP. */
83e1b6c1 1729 if (!thread->id.matches (ptid))
95954743 1730 return 0;
0d62e5e8 1731
a681f9c9
PA
1732 if (!lwp_resumed (lp))
1733 return 0;
1734
582511be 1735 if (lp->status_pending_p
df95181f 1736 && !thread_still_has_status_pending (thread))
582511be 1737 {
df95181f 1738 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
582511be
PA
1739 return 0;
1740 }
0d62e5e8 1741
582511be 1742 return lp->status_pending_p;
0d62e5e8
DJ
1743}
1744
95954743
PA
1745struct lwp_info *
1746find_lwp_pid (ptid_t ptid)
1747{
d4895ba2
SM
1748 long lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1749 thread_info *thread = find_thread ([lwp] (thread_info *thr_arg)
454296a2 1750 {
da4ae14a 1751 return thr_arg->id.lwp () == lwp;
454296a2 1752 });
d86d4aaf
DE
1753
1754 if (thread == NULL)
1755 return NULL;
1756
9c80ecd6 1757 return get_thread_lwp (thread);
95954743
PA
1758}
1759
fa96cb38 1760/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1761
fa96cb38
PA
1762static int
1763num_lwps (int pid)
1764{
fa96cb38 1765 int count = 0;
0d62e5e8 1766
4d3bb80e
SM
1767 for_each_thread (pid, [&] (thread_info *thread)
1768 {
9c80ecd6 1769 count++;
4d3bb80e 1770 });
3aee8918 1771
fa96cb38
PA
1772 return count;
1773}
d61ddec4 1774
6d4ee8c6
GB
1775/* See nat/linux-nat.h. */
1776
1777struct lwp_info *
1778iterate_over_lwps (ptid_t filter,
d3a70e03 1779 gdb::function_view<iterate_over_lwps_ftype> callback)
6d4ee8c6 1780{
da4ae14a 1781 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
6d1e5673 1782 {
da4ae14a 1783 lwp_info *lwp = get_thread_lwp (thr_arg);
6d1e5673 1784
d3a70e03 1785 return callback (lwp);
6d1e5673 1786 });
6d4ee8c6 1787
9c80ecd6 1788 if (thread == NULL)
6d4ee8c6
GB
1789 return NULL;
1790
9c80ecd6 1791 return get_thread_lwp (thread);
6d4ee8c6
GB
1792}
1793
fd000fb3
TBA
1794void
1795linux_process_target::check_zombie_leaders ()
fa96cb38 1796{
aa40a989
PA
1797 for_each_process ([this] (process_info *proc)
1798 {
1799 pid_t leader_pid = pid_of (proc);
1800 lwp_info *leader_lp = find_lwp_pid (ptid_t (leader_pid));
1801
1802 threads_debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1803 "num_lwps=%d, zombie=%d",
1804 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1805 linux_proc_pid_is_zombie (leader_pid));
1806
1807 if (leader_lp != NULL && !leader_lp->stopped
1808 /* Check if there are other threads in the group, as we may
8a841a35
PA
1809 have raced with the inferior simply exiting. Note this
1810 isn't a watertight check. If the inferior is
1811 multi-threaded and is exiting, it may be we see the
1812 leader as zombie before we reap all the non-leader
1813 threads. See comments below. */
aa40a989
PA
1814 && !last_thread_of_process_p (leader_pid)
1815 && linux_proc_pid_is_zombie (leader_pid))
1816 {
8a841a35
PA
1817 /* A zombie leader in a multi-threaded program can mean one
1818 of three things:
1819
1820 #1 - Only the leader exited, not the whole program, e.g.,
1821 with pthread_exit. Since we can't reap the leader's exit
1822 status until all other threads are gone and reaped too,
1823 we want to delete the zombie leader right away, as it
1824 can't be debugged, we can't read its registers, etc.
1825 This is the main reason we check for zombie leaders
1826 disappearing.
1827
1828 #2 - The whole thread-group/process exited (a group exit,
1829 via e.g. exit(3), and there is (or will be shortly) an
1830 exit reported for each thread in the process, and then
1831 finally an exit for the leader once the non-leaders are
1832 reaped.
1833
1834 #3 - There are 3 or more threads in the group, and a
1835 thread other than the leader exec'd. See comments on
1836 exec events at the top of the file.
1837
1838 Ideally we would never delete the leader for case #2.
1839 Instead, we want to collect the exit status of each
1840 non-leader thread, and then finally collect the exit
1841 status of the leader as normal and use its exit code as
1842 whole-process exit code. Unfortunately, there's no
1843 race-free way to distinguish cases #1 and #2. We can't
1844 assume the exit events for the non-leaders threads are
1845 already pending in the kernel, nor can we assume the
1846 non-leader threads are in zombie state already. Between
1847 the leader becoming zombie and the non-leaders exiting
1848 and becoming zombie themselves, there's a small time
1849 window, so such a check would be racy. Temporarily
1850 pausing all threads and checking to see if all threads
1851 exit or not before re-resuming them would work in the
1852 case that all threads are running right now, but it
1853 wouldn't work if some thread is currently already
1854 ptrace-stopped, e.g., due to scheduler-locking.
1855
1856 So what we do is we delete the leader anyhow, and then
1857 later on when we see its exit status, we re-add it back.
1858 We also make sure that we only report a whole-process
1859 exit when we see the leader exiting, as opposed to when
1860 the last LWP in the LWP list exits, which can be a
1861 non-leader if we deleted the leader here. */
aa40a989 1862 threads_debug_printf ("Thread group leader %d zombie "
8a841a35
PA
1863 "(it exited, or another thread execd), "
1864 "deleting it.",
aa40a989 1865 leader_pid);
aa40a989
PA
1866 delete_lwp (leader_lp);
1867 }
9179355e 1868 });
fa96cb38 1869}
c3adc08c 1870
a1385b7b
SM
1871/* Callback for `find_thread'. Returns the first LWP that is not
1872 stopped. */
d50171e4 1873
a1385b7b
SM
1874static bool
1875not_stopped_callback (thread_info *thread, ptid_t filter)
fa96cb38 1876{
a1385b7b
SM
1877 if (!thread->id.matches (filter))
1878 return false;
47c0c975 1879
a1385b7b 1880 lwp_info *lwp = get_thread_lwp (thread);
fa96cb38 1881
a1385b7b 1882 return !lwp->stopped;
0d62e5e8 1883}
611cb4a5 1884
863d01bd
PA
1885/* Increment LWP's suspend count. */
1886
1887static void
1888lwp_suspended_inc (struct lwp_info *lwp)
1889{
1890 lwp->suspended++;
1891
c058728c
SM
1892 if (lwp->suspended > 4)
1893 threads_debug_printf
1894 ("LWP %ld has a suspiciously high suspend count, suspended=%d",
1895 lwpid_of (get_lwp_thread (lwp)), lwp->suspended);
863d01bd
PA
1896}
1897
1898/* Decrement LWP's suspend count. */
1899
1900static void
1901lwp_suspended_decr (struct lwp_info *lwp)
1902{
1903 lwp->suspended--;
1904
1905 if (lwp->suspended < 0)
1906 {
1907 struct thread_info *thread = get_lwp_thread (lwp);
1908
f34652de 1909 internal_error ("unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
863d01bd
PA
1910 lwp->suspended);
1911 }
1912}
1913
219f2f23
PA
1914/* This function should only be called if the LWP got a SIGTRAP.
1915
1916 Handle any tracepoint steps or hits. Return true if a tracepoint
1917 event was handled, 0 otherwise. */
1918
1919static int
1920handle_tracepoints (struct lwp_info *lwp)
1921{
1922 struct thread_info *tinfo = get_lwp_thread (lwp);
1923 int tpoint_related_event = 0;
1924
582511be
PA
1925 gdb_assert (lwp->suspended == 0);
1926
7984d532
PA
1927 /* If this tracepoint hit causes a tracing stop, we'll immediately
1928 uninsert tracepoints. To do this, we temporarily pause all
1929 threads, unpatch away, and then unpause threads. We need to make
1930 sure the unpausing doesn't resume LWP too. */
863d01bd 1931 lwp_suspended_inc (lwp);
7984d532 1932
219f2f23
PA
1933 /* And we need to be sure that any all-threads-stopping doesn't try
1934 to move threads out of the jump pads, as it could deadlock the
1935 inferior (LWP could be in the jump pad, maybe even holding the
1936 lock.) */
1937
1938 /* Do any necessary step collect actions. */
1939 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1940
fa593d66
PA
1941 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1942
219f2f23
PA
1943 /* See if we just hit a tracepoint and do its main collect
1944 actions. */
1945 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1946
863d01bd 1947 lwp_suspended_decr (lwp);
7984d532
PA
1948
1949 gdb_assert (lwp->suspended == 0);
229d26fc
SM
1950 gdb_assert (!stabilizing_threads
1951 || (lwp->collecting_fast_tracepoint
1952 != fast_tpoint_collect_result::not_collecting));
7984d532 1953
219f2f23
PA
1954 if (tpoint_related_event)
1955 {
c058728c 1956 threads_debug_printf ("got a tracepoint event");
219f2f23
PA
1957 return 1;
1958 }
1959
1960 return 0;
1961}
1962
13e567af
TBA
1963fast_tpoint_collect_result
1964linux_process_target::linux_fast_tracepoint_collecting
1965 (lwp_info *lwp, fast_tpoint_collect_status *status)
fa593d66
PA
1966{
1967 CORE_ADDR thread_area;
d86d4aaf 1968 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66 1969
fa593d66
PA
1970 /* Get the thread area address. This is used to recognize which
1971 thread is which when tracing with the in-process agent library.
1972 We don't read anything from the address, and treat it as opaque;
1973 it's the address itself that we assume is unique per-thread. */
13e567af 1974 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
229d26fc 1975 return fast_tpoint_collect_result::not_collecting;
fa593d66
PA
1976
1977 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1978}
1979
13e567af
TBA
1980int
1981linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1982{
1983 return -1;
1984}
1985
d16f3f6c
TBA
1986bool
1987linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
fa593d66 1988{
24583e45
TBA
1989 scoped_restore_current_thread restore_thread;
1990 switch_to_thread (get_lwp_thread (lwp));
fa593d66
PA
1991
1992 if ((wstat == NULL
1993 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1994 && supports_fast_tracepoints ()
58b4daa5 1995 && agent_loaded_p ())
fa593d66
PA
1996 {
1997 struct fast_tpoint_collect_status status;
fa593d66 1998
c058728c
SM
1999 threads_debug_printf
2000 ("Checking whether LWP %ld needs to move out of the jump pad.",
2001 lwpid_of (current_thread));
fa593d66 2002
229d26fc
SM
2003 fast_tpoint_collect_result r
2004 = linux_fast_tracepoint_collecting (lwp, &status);
fa593d66
PA
2005
2006 if (wstat == NULL
2007 || (WSTOPSIG (*wstat) != SIGILL
2008 && WSTOPSIG (*wstat) != SIGFPE
2009 && WSTOPSIG (*wstat) != SIGSEGV
2010 && WSTOPSIG (*wstat) != SIGBUS))
2011 {
2012 lwp->collecting_fast_tracepoint = r;
2013
229d26fc 2014 if (r != fast_tpoint_collect_result::not_collecting)
fa593d66 2015 {
229d26fc
SM
2016 if (r == fast_tpoint_collect_result::before_insn
2017 && lwp->exit_jump_pad_bkpt == NULL)
fa593d66
PA
2018 {
2019 /* Haven't executed the original instruction yet.
2020 Set breakpoint there, and wait till it's hit,
2021 then single-step until exiting the jump pad. */
2022 lwp->exit_jump_pad_bkpt
2023 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2024 }
2025
c058728c
SM
2026 threads_debug_printf
2027 ("Checking whether LWP %ld needs to move out of the jump pad..."
2028 " it does", lwpid_of (current_thread));
fa593d66 2029
d16f3f6c 2030 return true;
fa593d66
PA
2031 }
2032 }
2033 else
2034 {
2035 /* If we get a synchronous signal while collecting, *and*
2036 while executing the (relocated) original instruction,
2037 reset the PC to point at the tpoint address, before
2038 reporting to GDB. Otherwise, it's an IPA lib bug: just
2039 report the signal to GDB, and pray for the best. */
2040
229d26fc
SM
2041 lwp->collecting_fast_tracepoint
2042 = fast_tpoint_collect_result::not_collecting;
fa593d66 2043
229d26fc 2044 if (r != fast_tpoint_collect_result::not_collecting
fa593d66
PA
2045 && (status.adjusted_insn_addr <= lwp->stop_pc
2046 && lwp->stop_pc < status.adjusted_insn_addr_end))
2047 {
2048 siginfo_t info;
2049 struct regcache *regcache;
2050
2051 /* The si_addr on a few signals references the address
2052 of the faulting instruction. Adjust that as
2053 well. */
2054 if ((WSTOPSIG (*wstat) == SIGILL
2055 || WSTOPSIG (*wstat) == SIGFPE
2056 || WSTOPSIG (*wstat) == SIGBUS
2057 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 2058 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2059 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
2060 /* Final check just to make sure we don't clobber
2061 the siginfo of non-kernel-sent signals. */
2062 && (uintptr_t) info.si_addr == lwp->stop_pc)
2063 {
2064 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 2065 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2066 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
2067 }
2068
0bfdf32f 2069 regcache = get_thread_regcache (current_thread, 1);
bf9ae9d8 2070 low_set_pc (regcache, status.tpoint_addr);
fa593d66
PA
2071 lwp->stop_pc = status.tpoint_addr;
2072
2073 /* Cancel any fast tracepoint lock this thread was
2074 holding. */
2075 force_unlock_trace_buffer ();
2076 }
2077
2078 if (lwp->exit_jump_pad_bkpt != NULL)
2079 {
c058728c
SM
2080 threads_debug_printf
2081 ("Cancelling fast exit-jump-pad: removing bkpt."
2082 "stopping all threads momentarily.");
fa593d66
PA
2083
2084 stop_all_lwps (1, lwp);
fa593d66
PA
2085
2086 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2087 lwp->exit_jump_pad_bkpt = NULL;
2088
2089 unstop_all_lwps (1, lwp);
2090
2091 gdb_assert (lwp->suspended >= 0);
2092 }
2093 }
2094 }
2095
c058728c
SM
2096 threads_debug_printf
2097 ("Checking whether LWP %ld needs to move out of the jump pad... no",
2098 lwpid_of (current_thread));
0cccb683 2099
d16f3f6c 2100 return false;
fa593d66
PA
2101}
2102
2103/* Enqueue one signal in the "signals to report later when out of the
2104 jump pad" list. */
2105
2106static void
2107enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2108{
d86d4aaf 2109 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66 2110
c058728c
SM
2111 threads_debug_printf ("Deferring signal %d for LWP %ld.",
2112 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2113
2114 if (debug_threads)
2115 {
013e3554 2116 for (const auto &sig : lwp->pending_signals_to_report)
c058728c 2117 threads_debug_printf (" Already queued %d", sig.signal);
fa593d66 2118
c058728c 2119 threads_debug_printf (" (no more currently queued signals)");
fa593d66
PA
2120 }
2121
1a981360
PA
2122 /* Don't enqueue non-RT signals if they are already in the deferred
2123 queue. (SIGSTOP being the easiest signal to see ending up here
2124 twice) */
2125 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2126 {
013e3554 2127 for (const auto &sig : lwp->pending_signals_to_report)
1a981360 2128 {
013e3554 2129 if (sig.signal == WSTOPSIG (*wstat))
1a981360 2130 {
c058728c
SM
2131 threads_debug_printf
2132 ("Not requeuing already queued non-RT signal %d for LWP %ld",
2133 sig.signal, lwpid_of (thread));
1a981360
PA
2134 return;
2135 }
2136 }
2137 }
2138
013e3554 2139 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
8d749320 2140
d86d4aaf 2141 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554 2142 &lwp->pending_signals_to_report.back ().info);
fa593d66
PA
2143}
2144
2145/* Dequeue one signal from the "signals to report later when out of
2146 the jump pad" list. */
2147
2148static int
2149dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2150{
d86d4aaf
DE
2151 struct thread_info *thread = get_lwp_thread (lwp);
2152
013e3554 2153 if (!lwp->pending_signals_to_report.empty ())
fa593d66 2154 {
013e3554 2155 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
fa593d66 2156
013e3554
TBA
2157 *wstat = W_STOPCODE (p_sig.signal);
2158 if (p_sig.info.si_signo != 0)
d86d4aaf 2159 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554
TBA
2160 &p_sig.info);
2161
2162 lwp->pending_signals_to_report.pop_front ();
fa593d66 2163
c058728c
SM
2164 threads_debug_printf ("Reporting deferred signal %d for LWP %ld.",
2165 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2166
2167 if (debug_threads)
2168 {
013e3554 2169 for (const auto &sig : lwp->pending_signals_to_report)
c058728c 2170 threads_debug_printf (" Still queued %d", sig.signal);
fa593d66 2171
c058728c 2172 threads_debug_printf (" (no more queued signals)");
fa593d66
PA
2173 }
2174
2175 return 1;
2176 }
2177
2178 return 0;
2179}
2180
ac1bbaca
TBA
2181bool
2182linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
582511be 2183{
24583e45
TBA
2184 scoped_restore_current_thread restore_thread;
2185 switch_to_thread (get_lwp_thread (child));
d50171e4 2186
ac1bbaca
TBA
2187 if (low_stopped_by_watchpoint ())
2188 {
2189 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2190 child->stopped_data_address = low_stopped_data_address ();
2191 }
582511be 2192
ac1bbaca
TBA
2193 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2194}
d50171e4 2195
ac1bbaca
TBA
2196bool
2197linux_process_target::low_stopped_by_watchpoint ()
2198{
2199 return false;
2200}
d50171e4 2201
ac1bbaca
TBA
2202CORE_ADDR
2203linux_process_target::low_stopped_data_address ()
2204{
2205 return 0;
c4d9ceb6
YQ
2206}
2207
de0d863e
DB
2208/* Return the ptrace options that we want to try to enable. */
2209
2210static int
2211linux_low_ptrace_options (int attached)
2212{
c12a5089 2213 client_state &cs = get_client_state ();
de0d863e
DB
2214 int options = 0;
2215
2216 if (!attached)
2217 options |= PTRACE_O_EXITKILL;
2218
c12a5089 2219 if (cs.report_fork_events)
de0d863e
DB
2220 options |= PTRACE_O_TRACEFORK;
2221
c12a5089 2222 if (cs.report_vfork_events)
c269dbdb
DB
2223 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2224
c12a5089 2225 if (cs.report_exec_events)
94585166
DB
2226 options |= PTRACE_O_TRACEEXEC;
2227
82075af2
JS
2228 options |= PTRACE_O_TRACESYSGOOD;
2229
de0d863e
DB
2230 return options;
2231}
2232
1a48f002 2233void
d16f3f6c 2234linux_process_target::filter_event (int lwpid, int wstat)
fa96cb38 2235{
c12a5089 2236 client_state &cs = get_client_state ();
fa96cb38
PA
2237 struct lwp_info *child;
2238 struct thread_info *thread;
582511be 2239 int have_stop_pc = 0;
fa96cb38 2240
f2907e49 2241 child = find_lwp_pid (ptid_t (lwpid));
fa96cb38 2242
5406bc3f
PA
2243 /* Check for events reported by anything not in our LWP list. */
2244 if (child == nullptr)
94585166 2245 {
5406bc3f
PA
2246 if (WIFSTOPPED (wstat))
2247 {
2248 if (WSTOPSIG (wstat) == SIGTRAP
2249 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2250 {
2251 /* A non-leader thread exec'ed after we've seen the
2252 leader zombie, and removed it from our lists (in
2253 check_zombie_leaders). The non-leader thread changes
2254 its tid to the tgid. */
2255 threads_debug_printf
2256 ("Re-adding thread group leader LWP %d after exec.",
2257 lwpid);
94585166 2258
5406bc3f
PA
2259 child = add_lwp (ptid_t (lwpid, lwpid));
2260 child->stopped = 1;
2261 switch_to_thread (child->thread);
2262 }
2263 else
2264 {
2265 /* A process we are controlling has forked and the new
2266 child's stop was reported to us by the kernel. Save
2267 its PID and go back to waiting for the fork event to
2268 be reported - the stopped process might be returned
2269 from waitpid before or after the fork event is. */
2270 threads_debug_printf
2271 ("Saving LWP %d status %s in stopped_pids list",
2272 lwpid, status_to_str (wstat).c_str ());
2273 add_to_pid_list (&stopped_pids, lwpid, wstat);
2274 }
2275 }
2276 else
2277 {
2278 /* Don't report an event for the exit of an LWP not in our
2279 list, i.e. not part of any inferior we're debugging.
2280 This can happen if we detach from a program we originally
8a841a35
PA
2281 forked and then it exits. However, note that we may have
2282 earlier deleted a leader of an inferior we're debugging,
2283 in check_zombie_leaders. Re-add it back here if so. */
2284 find_process ([&] (process_info *proc)
2285 {
2286 if (proc->pid == lwpid)
2287 {
2288 threads_debug_printf
2289 ("Re-adding thread group leader LWP %d after exit.",
2290 lwpid);
2291
2292 child = add_lwp (ptid_t (lwpid, lwpid));
2293 return true;
2294 }
2295 return false;
2296 });
5406bc3f 2297 }
94585166 2298
5406bc3f
PA
2299 if (child == nullptr)
2300 return;
fa96cb38 2301 }
fa96cb38
PA
2302
2303 thread = get_lwp_thread (child);
2304
2305 child->stopped = 1;
2306
2307 child->last_status = wstat;
2308
582511be
PA
2309 /* Check if the thread has exited. */
2310 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2311 {
c058728c 2312 threads_debug_printf ("%d exited", lwpid);
f50bf8e5
YQ
2313
2314 if (finish_step_over (child))
2315 {
2316 /* Unsuspend all other LWPs, and set them back running again. */
2317 unsuspend_all_lwps (child);
2318 }
2319
8a841a35
PA
2320 /* If this is not the leader LWP, then the exit signal was not
2321 the end of the debugged application and should be ignored,
2322 unless GDB wants to hear about thread exits. */
2323 if (cs.report_thread_events || is_leader (thread))
582511be 2324 {
65706a29
PA
2325 /* Since events are serialized to GDB core, and we can't
2326 report this one right now. Leave the status pending for
2327 the next time we're able to report it. */
2328 mark_lwp_dead (child, wstat);
1a48f002 2329 return;
582511be
PA
2330 }
2331 else
2332 {
65706a29 2333 delete_lwp (child);
1a48f002 2334 return;
582511be
PA
2335 }
2336 }
2337
2338 gdb_assert (WIFSTOPPED (wstat));
2339
fa96cb38
PA
2340 if (WIFSTOPPED (wstat))
2341 {
2342 struct process_info *proc;
2343
c06cbd92 2344 /* Architecture-specific setup after inferior is running. */
fa96cb38 2345 proc = find_process_pid (pid_of (thread));
c06cbd92 2346 if (proc->tdesc == NULL)
fa96cb38 2347 {
c06cbd92
YQ
2348 if (proc->attached)
2349 {
c06cbd92
YQ
2350 /* This needs to happen after we have attached to the
2351 inferior and it is stopped for the first time, but
2352 before we access any inferior registers. */
797bcff5 2353 arch_setup_thread (thread);
c06cbd92
YQ
2354 }
2355 else
2356 {
2357 /* The process is started, but GDBserver will do
2358 architecture-specific setup after the program stops at
2359 the first instruction. */
2360 child->status_pending_p = 1;
2361 child->status_pending = wstat;
1a48f002 2362 return;
c06cbd92 2363 }
fa96cb38
PA
2364 }
2365 }
2366
fa96cb38
PA
2367 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2368 {
beed38b8 2369 struct process_info *proc = find_process_pid (pid_of (thread));
de0d863e 2370 int options = linux_low_ptrace_options (proc->attached);
beed38b8 2371
de0d863e 2372 linux_enable_event_reporting (lwpid, options);
fa96cb38
PA
2373 child->must_set_ptrace_flags = 0;
2374 }
2375
82075af2
JS
2376 /* Always update syscall_state, even if it will be filtered later. */
2377 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2378 {
2379 child->syscall_state
2380 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2381 ? TARGET_WAITKIND_SYSCALL_RETURN
2382 : TARGET_WAITKIND_SYSCALL_ENTRY);
2383 }
2384 else
2385 {
2386 /* Almost all other ptrace-stops are known to be outside of system
2387 calls, with further exceptions in handle_extended_wait. */
2388 child->syscall_state = TARGET_WAITKIND_IGNORE;
2389 }
2390
e7ad2f14
PA
2391 /* Be careful to not overwrite stop_pc until save_stop_reason is
2392 called. */
fa96cb38 2393 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2394 && linux_is_extended_waitstatus (wstat))
fa96cb38 2395 {
582511be 2396 child->stop_pc = get_pc (child);
94585166 2397 if (handle_extended_wait (&child, wstat))
de0d863e
DB
2398 {
2399 /* The event has been handled, so just return without
2400 reporting it. */
1a48f002 2401 return;
de0d863e 2402 }
fa96cb38
PA
2403 }
2404
80aea927 2405 if (linux_wstatus_maybe_breakpoint (wstat))
582511be 2406 {
e7ad2f14 2407 if (save_stop_reason (child))
582511be
PA
2408 have_stop_pc = 1;
2409 }
2410
2411 if (!have_stop_pc)
2412 child->stop_pc = get_pc (child);
2413
fa96cb38
PA
2414 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2415 && child->stop_expected)
2416 {
c058728c
SM
2417 threads_debug_printf ("Expected stop.");
2418
fa96cb38
PA
2419 child->stop_expected = 0;
2420
2421 if (thread->last_resume_kind == resume_stop)
2422 {
2423 /* We want to report the stop to the core. Treat the
2424 SIGSTOP as a normal event. */
c058728c
SM
2425 threads_debug_printf ("resume_stop SIGSTOP caught for %s.",
2426 target_pid_to_str (ptid_of (thread)).c_str ());
fa96cb38
PA
2427 }
2428 else if (stopping_threads != NOT_STOPPING_THREADS)
2429 {
2430 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2431 pending. */
c058728c
SM
2432 threads_debug_printf ("SIGSTOP caught for %s while stopping threads.",
2433 target_pid_to_str (ptid_of (thread)).c_str ());
1a48f002 2434 return;
fa96cb38
PA
2435 }
2436 else
2437 {
2bf6fb9d 2438 /* This is a delayed SIGSTOP. Filter out the event. */
c058728c 2439 threads_debug_printf ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2bf6fb9d 2440 child->stepping ? "step" : "continue",
61d7f128 2441 target_pid_to_str (ptid_of (thread)).c_str ());
2bf6fb9d 2442
df95181f 2443 resume_one_lwp (child, child->stepping, 0, NULL);
1a48f002 2444 return;
fa96cb38
PA
2445 }
2446 }
2447
582511be
PA
2448 child->status_pending_p = 1;
2449 child->status_pending = wstat;
1a48f002 2450 return;
fa96cb38
PA
2451}
2452
b31cdfa6
TBA
2453bool
2454linux_process_target::maybe_hw_step (thread_info *thread)
f79b145d 2455{
b31cdfa6
TBA
2456 if (supports_hardware_single_step ())
2457 return true;
f79b145d
YQ
2458 else
2459 {
3b9a79ef 2460 /* GDBserver must insert single-step breakpoint for software
f79b145d 2461 single step. */
3b9a79ef 2462 gdb_assert (has_single_step_breakpoints (thread));
b31cdfa6 2463 return false;
f79b145d
YQ
2464 }
2465}
2466
df95181f
TBA
2467void
2468linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
20ba1ce6 2469{
20ba1ce6
PA
2470 struct lwp_info *lp = get_thread_lwp (thread);
2471
2472 if (lp->stopped
863d01bd 2473 && !lp->suspended
20ba1ce6 2474 && !lp->status_pending_p
183be222 2475 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
20ba1ce6 2476 {
8901d193
YQ
2477 int step = 0;
2478
2479 if (thread->last_resume_kind == resume_step)
b6d8d612
KB
2480 {
2481 if (supports_software_single_step ())
2482 install_software_single_step_breakpoints (lp);
2483
2484 step = maybe_hw_step (thread);
2485 }
20ba1ce6 2486
c058728c
SM
2487 threads_debug_printf ("resuming stopped-resumed LWP %s at %s: step=%d",
2488 target_pid_to_str (ptid_of (thread)).c_str (),
2489 paddress (lp->stop_pc), step);
20ba1ce6 2490
df95181f 2491 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
20ba1ce6
PA
2492 }
2493}
2494
d16f3f6c
TBA
2495int
2496linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2497 ptid_t filter_ptid,
2498 int *wstatp, int options)
0d62e5e8 2499{
d86d4aaf 2500 struct thread_info *event_thread;
d50171e4 2501 struct lwp_info *event_child, *requested_child;
fa96cb38 2502 sigset_t block_mask, prev_mask;
d50171e4 2503
fa96cb38 2504 retry:
d86d4aaf
DE
2505 /* N.B. event_thread points to the thread_info struct that contains
2506 event_child. Keep them in sync. */
2507 event_thread = NULL;
d50171e4
PA
2508 event_child = NULL;
2509 requested_child = NULL;
0d62e5e8 2510
95954743 2511 /* Check for a lwp with a pending status. */
bd99dc85 2512
d7e15655 2513 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
0d62e5e8 2514 {
83e1b6c1
SM
2515 event_thread = find_thread_in_random ([&] (thread_info *thread)
2516 {
2517 return status_pending_p_callback (thread, filter_ptid);
2518 });
2519
d86d4aaf 2520 if (event_thread != NULL)
c058728c
SM
2521 {
2522 event_child = get_thread_lwp (event_thread);
2523 threads_debug_printf ("Got a pending child %ld", lwpid_of (event_thread));
2524 }
0d62e5e8 2525 }
d7e15655 2526 else if (filter_ptid != null_ptid)
0d62e5e8 2527 {
fa96cb38 2528 requested_child = find_lwp_pid (filter_ptid);
59487af3 2529 gdb_assert (requested_child != nullptr);
d50171e4 2530
bde24c0a 2531 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66 2532 && requested_child->status_pending_p
229d26fc
SM
2533 && (requested_child->collecting_fast_tracepoint
2534 != fast_tpoint_collect_result::not_collecting))
fa593d66
PA
2535 {
2536 enqueue_one_deferred_signal (requested_child,
2537 &requested_child->status_pending);
2538 requested_child->status_pending_p = 0;
2539 requested_child->status_pending = 0;
df95181f 2540 resume_one_lwp (requested_child, 0, 0, NULL);
fa593d66
PA
2541 }
2542
2543 if (requested_child->suspended
2544 && requested_child->status_pending_p)
38e08fca 2545 {
f34652de 2546 internal_error ("requesting an event out of a"
38e08fca
GB
2547 " suspended child?");
2548 }
fa593d66 2549
d50171e4 2550 if (requested_child->status_pending_p)
d86d4aaf
DE
2551 {
2552 event_child = requested_child;
2553 event_thread = get_lwp_thread (event_child);
2554 }
0d62e5e8 2555 }
611cb4a5 2556
0d62e5e8
DJ
2557 if (event_child != NULL)
2558 {
c058728c
SM
2559 threads_debug_printf ("Got an event from pending child %ld (%04x)",
2560 lwpid_of (event_thread),
2561 event_child->status_pending);
2562
fa96cb38 2563 *wstatp = event_child->status_pending;
bd99dc85
PA
2564 event_child->status_pending_p = 0;
2565 event_child->status_pending = 0;
24583e45 2566 switch_to_thread (event_thread);
d86d4aaf 2567 return lwpid_of (event_thread);
0d62e5e8
DJ
2568 }
2569
fa96cb38
PA
2570 /* But if we don't find a pending event, we'll have to wait.
2571
2572 We only enter this loop if no process has a pending wait status.
2573 Thus any action taken in response to a wait status inside this
2574 loop is responding as soon as we detect the status, not after any
2575 pending events. */
d8301ad1 2576
fa96cb38
PA
2577 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2578 all signals while here. */
2579 sigfillset (&block_mask);
21987b9c 2580 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
fa96cb38 2581
582511be
PA
2582 /* Always pull all events out of the kernel. We'll randomly select
2583 an event LWP out of all that have events, to prevent
2584 starvation. */
fa96cb38 2585 while (event_child == NULL)
0d62e5e8 2586 {
fa96cb38 2587 pid_t ret = 0;
0d62e5e8 2588
fa96cb38
PA
2589 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2590 quirks:
0d62e5e8 2591
fa96cb38
PA
2592 - If the thread group leader exits while other threads in the
2593 thread group still exist, waitpid(TGID, ...) hangs. That
2594 waitpid won't return an exit status until the other threads
2595 in the group are reaped.
611cb4a5 2596
fa96cb38
PA
2597 - When a non-leader thread execs, that thread just vanishes
2598 without reporting an exit (so we'd hang if we waited for it
2599 explicitly in that case). The exec event is reported to
94585166 2600 the TGID pid. */
fa96cb38
PA
2601 errno = 0;
2602 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2603
c058728c
SM
2604 threads_debug_printf ("waitpid(-1, ...) returned %d, %s",
2605 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
0d62e5e8 2606
fa96cb38 2607 if (ret > 0)
0d62e5e8 2608 {
c058728c
SM
2609 threads_debug_printf ("waitpid %ld received %s",
2610 (long) ret, status_to_str (*wstatp).c_str ());
89be2091 2611
582511be
PA
2612 /* Filter all events. IOW, leave all events pending. We'll
2613 randomly select an event LWP out of all that have events
2614 below. */
d16f3f6c 2615 filter_event (ret, *wstatp);
fa96cb38
PA
2616 /* Retry until nothing comes out of waitpid. A single
2617 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2618 continue;
2619 }
2620
20ba1ce6
PA
2621 /* Now that we've pulled all events out of the kernel, resume
2622 LWPs that don't have an interesting event to report. */
2623 if (stopping_threads == NOT_STOPPING_THREADS)
df95181f
TBA
2624 for_each_thread ([this] (thread_info *thread)
2625 {
2626 resume_stopped_resumed_lwps (thread);
2627 });
20ba1ce6
PA
2628
2629 /* ... and find an LWP with a status to report to the core, if
2630 any. */
83e1b6c1
SM
2631 event_thread = find_thread_in_random ([&] (thread_info *thread)
2632 {
2633 return status_pending_p_callback (thread, filter_ptid);
2634 });
2635
582511be
PA
2636 if (event_thread != NULL)
2637 {
2638 event_child = get_thread_lwp (event_thread);
2639 *wstatp = event_child->status_pending;
2640 event_child->status_pending_p = 0;
2641 event_child->status_pending = 0;
2642 break;
2643 }
2644
fa96cb38
PA
2645 /* Check for zombie thread group leaders. Those can't be reaped
2646 until all other threads in the thread group are. */
2647 check_zombie_leaders ();
2648
a1385b7b
SM
2649 auto not_stopped = [&] (thread_info *thread)
2650 {
2651 return not_stopped_callback (thread, wait_ptid);
2652 };
2653
fa96cb38
PA
2654 /* If there are no resumed children left in the set of LWPs we
2655 want to wait for, bail. We can't just block in
2656 waitpid/sigsuspend, because lwps might have been left stopped
2657 in trace-stop state, and we'd be stuck forever waiting for
2658 their status to change (which would only happen if we resumed
2659 them). Even if WNOHANG is set, this return code is preferred
2660 over 0 (below), as it is more detailed. */
a1385b7b 2661 if (find_thread (not_stopped) == NULL)
a6dbe5df 2662 {
c058728c
SM
2663 threads_debug_printf ("exit (no unwaited-for LWP)");
2664
21987b9c 2665 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2666 return -1;
a6dbe5df
PA
2667 }
2668
fa96cb38
PA
2669 /* No interesting event to report to the caller. */
2670 if ((options & WNOHANG))
24a09b5f 2671 {
c058728c 2672 threads_debug_printf ("WNOHANG set, no event found");
fa96cb38 2673
21987b9c 2674 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2675 return 0;
24a09b5f
DJ
2676 }
2677
fa96cb38 2678 /* Block until we get an event reported with SIGCHLD. */
c058728c 2679 threads_debug_printf ("sigsuspend'ing");
d50171e4 2680
fa96cb38 2681 sigsuspend (&prev_mask);
21987b9c 2682 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38
PA
2683 goto retry;
2684 }
d50171e4 2685
21987b9c 2686 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2687
24583e45 2688 switch_to_thread (event_thread);
d50171e4 2689
fa96cb38
PA
2690 return lwpid_of (event_thread);
2691}
2692
d16f3f6c
TBA
2693int
2694linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
fa96cb38 2695{
d16f3f6c 2696 return wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2697}
2698
6bf5e0ba
PA
2699/* Select one LWP out of those that have events pending. */
2700
2701static void
2702select_event_lwp (struct lwp_info **orig_lp)
2703{
582511be
PA
2704 struct thread_info *event_thread = NULL;
2705
2706 /* In all-stop, give preference to the LWP that is being
2707 single-stepped. There will be at most one, and it's the LWP that
2708 the core is most interested in. If we didn't do this, then we'd
2709 have to handle pending step SIGTRAPs somehow in case the core
2710 later continues the previously-stepped thread, otherwise we'd
2711 report the pending SIGTRAP, and the core, not having stepped the
2712 thread, wouldn't understand what the trap was for, and therefore
2713 would report it to the user as a random signal. */
2714 if (!non_stop)
6bf5e0ba 2715 {
39a64da5
SM
2716 event_thread = find_thread ([] (thread_info *thread)
2717 {
2718 lwp_info *lp = get_thread_lwp (thread);
2719
183be222 2720 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
39a64da5
SM
2721 && thread->last_resume_kind == resume_step
2722 && lp->status_pending_p);
2723 });
2724
582511be 2725 if (event_thread != NULL)
c058728c
SM
2726 threads_debug_printf
2727 ("Select single-step %s",
2728 target_pid_to_str (ptid_of (event_thread)).c_str ());
6bf5e0ba 2729 }
582511be 2730 if (event_thread == NULL)
6bf5e0ba
PA
2731 {
2732 /* No single-stepping LWP. Select one at random, out of those
dda83cd7 2733 which have had events. */
6bf5e0ba 2734
b0319eaa 2735 event_thread = find_thread_in_random ([&] (thread_info *thread)
39a64da5
SM
2736 {
2737 lwp_info *lp = get_thread_lwp (thread);
2738
b0319eaa 2739 /* Only resumed LWPs that have an event pending. */
183be222 2740 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
b0319eaa 2741 && lp->status_pending_p);
39a64da5 2742 });
6bf5e0ba
PA
2743 }
2744
d86d4aaf 2745 if (event_thread != NULL)
6bf5e0ba 2746 {
d86d4aaf
DE
2747 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2748
6bf5e0ba
PA
2749 /* Switch the event LWP. */
2750 *orig_lp = event_lp;
2751 }
2752}
2753
7984d532
PA
2754/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2755 NULL. */
2756
2757static void
2758unsuspend_all_lwps (struct lwp_info *except)
2759{
139720c5
SM
2760 for_each_thread ([&] (thread_info *thread)
2761 {
2762 lwp_info *lwp = get_thread_lwp (thread);
2763
2764 if (lwp != except)
2765 lwp_suspended_decr (lwp);
2766 });
7984d532
PA
2767}
2768
5a6b0a41 2769static bool lwp_running (thread_info *thread);
fa593d66
PA
2770
2771/* Stabilize threads (move out of jump pads).
2772
2773 If a thread is midway collecting a fast tracepoint, we need to
2774 finish the collection and move it out of the jump pad before
2775 reporting the signal.
2776
2777 This avoids recursion while collecting (when a signal arrives
2778 midway, and the signal handler itself collects), which would trash
2779 the trace buffer. In case the user set a breakpoint in a signal
2780 handler, this avoids the backtrace showing the jump pad, etc..
2781 Most importantly, there are certain things we can't do safely if
2782 threads are stopped in a jump pad (or in its callee's). For
2783 example:
2784
2785 - starting a new trace run. A thread still collecting the
2786 previous run, could trash the trace buffer when resumed. The trace
2787 buffer control structures would have been reset but the thread had
2788 no way to tell. The thread could even midway memcpy'ing to the
2789 buffer, which would mean that when resumed, it would clobber the
2790 trace buffer that had been set for a new run.
2791
2792 - we can't rewrite/reuse the jump pads for new tracepoints
2793 safely. Say you do tstart while a thread is stopped midway while
2794 collecting. When the thread is later resumed, it finishes the
2795 collection, and returns to the jump pad, to execute the original
2796 instruction that was under the tracepoint jump at the time the
2797 older run had been started. If the jump pad had been rewritten
2798 since for something else in the new run, the thread would now
2799 execute the wrong / random instructions. */
2800
5c9eb2f2
TBA
2801void
2802linux_process_target::stabilize_threads ()
fa593d66 2803{
13e567af
TBA
2804 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2805 {
2806 return stuck_in_jump_pad (thread);
2807 });
fa593d66 2808
d86d4aaf 2809 if (thread_stuck != NULL)
fa593d66 2810 {
c058728c
SM
2811 threads_debug_printf ("can't stabilize, LWP %ld is stuck in jump pad",
2812 lwpid_of (thread_stuck));
fa593d66
PA
2813 return;
2814 }
2815
24583e45 2816 scoped_restore_current_thread restore_thread;
fa593d66
PA
2817
2818 stabilizing_threads = 1;
2819
2820 /* Kick 'em all. */
d16f3f6c
TBA
2821 for_each_thread ([this] (thread_info *thread)
2822 {
2823 move_out_of_jump_pad (thread);
2824 });
fa593d66
PA
2825
2826 /* Loop until all are stopped out of the jump pads. */
5a6b0a41 2827 while (find_thread (lwp_running) != NULL)
fa593d66
PA
2828 {
2829 struct target_waitstatus ourstatus;
2830 struct lwp_info *lwp;
fa593d66
PA
2831 int wstat;
2832
2833 /* Note that we go through the full wait even loop. While
2834 moving threads out of jump pad, we need to be able to step
2835 over internal breakpoints and such. */
d16f3f6c 2836 wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66 2837
183be222 2838 if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
fa593d66 2839 {
0bfdf32f 2840 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2841
2842 /* Lock it. */
863d01bd 2843 lwp_suspended_inc (lwp);
fa593d66 2844
183be222 2845 if (ourstatus.sig () != GDB_SIGNAL_0
0bfdf32f 2846 || current_thread->last_resume_kind == resume_stop)
fa593d66 2847 {
183be222 2848 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
fa593d66
PA
2849 enqueue_one_deferred_signal (lwp, &wstat);
2850 }
2851 }
2852 }
2853
fcdad592 2854 unsuspend_all_lwps (NULL);
fa593d66
PA
2855
2856 stabilizing_threads = 0;
2857
b4d51a55 2858 if (debug_threads)
fa593d66 2859 {
13e567af
TBA
2860 thread_stuck = find_thread ([this] (thread_info *thread)
2861 {
2862 return stuck_in_jump_pad (thread);
2863 });
fcb056a5 2864
d86d4aaf 2865 if (thread_stuck != NULL)
c058728c
SM
2866 threads_debug_printf
2867 ("couldn't stabilize, LWP %ld got stuck in jump pad",
2868 lwpid_of (thread_stuck));
fa593d66
PA
2869 }
2870}
2871
582511be
PA
2872/* Convenience function that is called when the kernel reports an
2873 event that is not passed out to GDB. */
2874
2875static ptid_t
2876ignore_event (struct target_waitstatus *ourstatus)
2877{
2878 /* If we got an event, there may still be others, as a single
2879 SIGCHLD can indicate more than one child stopped. This forces
2880 another target_wait call. */
2881 async_file_mark ();
2882
183be222 2883 ourstatus->set_ignore ();
582511be
PA
2884 return null_ptid;
2885}
2886
fd000fb3
TBA
2887ptid_t
2888linux_process_target::filter_exit_event (lwp_info *event_child,
2889 target_waitstatus *ourstatus)
65706a29 2890{
c12a5089 2891 client_state &cs = get_client_state ();
65706a29
PA
2892 struct thread_info *thread = get_lwp_thread (event_child);
2893 ptid_t ptid = ptid_of (thread);
2894
8a841a35 2895 if (!is_leader (thread))
65706a29 2896 {
c12a5089 2897 if (cs.report_thread_events)
183be222 2898 ourstatus->set_thread_exited (0);
65706a29 2899 else
183be222 2900 ourstatus->set_ignore ();
65706a29
PA
2901
2902 delete_lwp (event_child);
2903 }
2904 return ptid;
2905}
2906
82075af2
JS
2907/* Returns 1 if GDB is interested in any event_child syscalls. */
2908
2909static int
2910gdb_catching_syscalls_p (struct lwp_info *event_child)
2911{
2912 struct thread_info *thread = get_lwp_thread (event_child);
2913 struct process_info *proc = get_thread_process (thread);
2914
f27866ba 2915 return !proc->syscalls_to_catch.empty ();
82075af2
JS
2916}
2917
9eedd27d
TBA
2918bool
2919linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
82075af2 2920{
4cc32bec 2921 int sysno;
82075af2
JS
2922 struct thread_info *thread = get_lwp_thread (event_child);
2923 struct process_info *proc = get_thread_process (thread);
2924
f27866ba 2925 if (proc->syscalls_to_catch.empty ())
9eedd27d 2926 return false;
82075af2 2927
f27866ba 2928 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
9eedd27d 2929 return true;
82075af2 2930
4cc32bec 2931 get_syscall_trapinfo (event_child, &sysno);
f27866ba
SM
2932
2933 for (int iter : proc->syscalls_to_catch)
82075af2 2934 if (iter == sysno)
9eedd27d 2935 return true;
82075af2 2936
9eedd27d 2937 return false;
82075af2
JS
2938}
2939
d16f3f6c
TBA
2940ptid_t
2941linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
b60cea74 2942 target_wait_flags target_options)
da6d8c04 2943{
c058728c
SM
2944 THREADS_SCOPED_DEBUG_ENTER_EXIT;
2945
c12a5089 2946 client_state &cs = get_client_state ();
e5f1222d 2947 int w;
fc7238bb 2948 struct lwp_info *event_child;
bd99dc85 2949 int options;
bd99dc85 2950 int pid;
6bf5e0ba
PA
2951 int step_over_finished;
2952 int bp_explains_trap;
2953 int maybe_internal_trap;
2954 int report_to_gdb;
219f2f23 2955 int trace_event;
c2d6af84 2956 int in_step_range;
f2faf941 2957 int any_resumed;
bd99dc85 2958
c058728c 2959 threads_debug_printf ("[%s]", target_pid_to_str (ptid).c_str ());
87ce2a04 2960
bd99dc85
PA
2961 /* Translate generic target options into linux options. */
2962 options = __WALL;
2963 if (target_options & TARGET_WNOHANG)
2964 options |= WNOHANG;
0d62e5e8 2965
fa593d66
PA
2966 bp_explains_trap = 0;
2967 trace_event = 0;
c2d6af84 2968 in_step_range = 0;
183be222 2969 ourstatus->set_ignore ();
bd99dc85 2970
83e1b6c1
SM
2971 auto status_pending_p_any = [&] (thread_info *thread)
2972 {
2973 return status_pending_p_callback (thread, minus_one_ptid);
2974 };
2975
a1385b7b
SM
2976 auto not_stopped = [&] (thread_info *thread)
2977 {
2978 return not_stopped_callback (thread, minus_one_ptid);
2979 };
2980
f2faf941 2981 /* Find a resumed LWP, if any. */
83e1b6c1 2982 if (find_thread (status_pending_p_any) != NULL)
f2faf941 2983 any_resumed = 1;
a1385b7b 2984 else if (find_thread (not_stopped) != NULL)
f2faf941
PA
2985 any_resumed = 1;
2986 else
2987 any_resumed = 0;
2988
d7e15655 2989 if (step_over_bkpt == null_ptid)
d16f3f6c 2990 pid = wait_for_event (ptid, &w, options);
6bf5e0ba
PA
2991 else
2992 {
c058728c
SM
2993 threads_debug_printf ("step_over_bkpt set [%s], doing a blocking wait",
2994 target_pid_to_str (step_over_bkpt).c_str ());
d16f3f6c 2995 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
6bf5e0ba
PA
2996 }
2997
f2faf941 2998 if (pid == 0 || (pid == -1 && !any_resumed))
87ce2a04 2999 {
fa96cb38
PA
3000 gdb_assert (target_options & TARGET_WNOHANG);
3001
c058728c 3002 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_IGNORE");
fa96cb38 3003
183be222 3004 ourstatus->set_ignore ();
87ce2a04
DE
3005 return null_ptid;
3006 }
fa96cb38
PA
3007 else if (pid == -1)
3008 {
c058728c 3009 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_NO_RESUMED");
bd99dc85 3010
183be222 3011 ourstatus->set_no_resumed ();
fa96cb38
PA
3012 return null_ptid;
3013 }
0d62e5e8 3014
0bfdf32f 3015 event_child = get_thread_lwp (current_thread);
0d62e5e8 3016
d16f3f6c 3017 /* wait_for_event only returns an exit status for the last
fa96cb38
PA
3018 child of a process. Report it. */
3019 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 3020 {
fa96cb38 3021 if (WIFEXITED (w))
0d62e5e8 3022 {
183be222 3023 ourstatus->set_exited (WEXITSTATUS (w));
bd99dc85 3024
c058728c
SM
3025 threads_debug_printf
3026 ("ret = %s, exited with retcode %d",
3027 target_pid_to_str (ptid_of (current_thread)).c_str (),
3028 WEXITSTATUS (w));
fa96cb38
PA
3029 }
3030 else
3031 {
183be222 3032 ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
5b1c542e 3033
c058728c
SM
3034 threads_debug_printf
3035 ("ret = %s, terminated with signal %d",
3036 target_pid_to_str (ptid_of (current_thread)).c_str (),
3037 WTERMSIG (w));
0d62e5e8 3038 }
fa96cb38 3039
183be222 3040 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
65706a29
PA
3041 return filter_exit_event (event_child, ourstatus);
3042
0bfdf32f 3043 return ptid_of (current_thread);
da6d8c04
DJ
3044 }
3045
2d97cd35
AT
3046 /* If step-over executes a breakpoint instruction, in the case of a
3047 hardware single step it means a gdb/gdbserver breakpoint had been
3048 planted on top of a permanent breakpoint, in the case of a software
3049 single step it may just mean that gdbserver hit the reinsert breakpoint.
e7ad2f14 3050 The PC has been adjusted by save_stop_reason to point at
2d97cd35
AT
3051 the breakpoint address.
3052 So in the case of the hardware single step advance the PC manually
3053 past the breakpoint and in the case of software single step advance only
3b9a79ef 3054 if it's not the single_step_breakpoint we are hitting.
2d97cd35
AT
3055 This avoids that a program would keep trapping a permanent breakpoint
3056 forever. */
d7e15655 3057 if (step_over_bkpt != null_ptid
2d97cd35
AT
3058 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3059 && (event_child->stepping
3b9a79ef 3060 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
8090aef2 3061 {
dd373349
AT
3062 int increment_pc = 0;
3063 int breakpoint_kind = 0;
3064 CORE_ADDR stop_pc = event_child->stop_pc;
3065
d16f3f6c
TBA
3066 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3067 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
8090aef2 3068
c058728c
SM
3069 threads_debug_printf
3070 ("step-over for %s executed software breakpoint",
3071 target_pid_to_str (ptid_of (current_thread)).c_str ());
8090aef2
PA
3072
3073 if (increment_pc != 0)
3074 {
3075 struct regcache *regcache
3076 = get_thread_regcache (current_thread, 1);
3077
3078 event_child->stop_pc += increment_pc;
bf9ae9d8 3079 low_set_pc (regcache, event_child->stop_pc);
8090aef2 3080
d7146cda 3081 if (!low_breakpoint_at (event_child->stop_pc))
15c66dd6 3082 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
3083 }
3084 }
3085
6bf5e0ba
PA
3086 /* If this event was not handled before, and is not a SIGTRAP, we
3087 report it. SIGILL and SIGSEGV are also treated as traps in case
3088 a breakpoint is inserted at the current PC. If this target does
3089 not support internal breakpoints at all, we also report the
3090 SIGTRAP without further processing; it's of no concern to us. */
3091 maybe_internal_trap
bf9ae9d8 3092 = (low_supports_breakpoints ()
6bf5e0ba
PA
3093 && (WSTOPSIG (w) == SIGTRAP
3094 || ((WSTOPSIG (w) == SIGILL
3095 || WSTOPSIG (w) == SIGSEGV)
d7146cda 3096 && low_breakpoint_at (event_child->stop_pc))));
6bf5e0ba
PA
3097
3098 if (maybe_internal_trap)
3099 {
3100 /* Handle anything that requires bookkeeping before deciding to
3101 report the event or continue waiting. */
3102
3103 /* First check if we can explain the SIGTRAP with an internal
3104 breakpoint, or if we should possibly report the event to GDB.
3105 Do this before anything that may remove or insert a
3106 breakpoint. */
3107 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3108
3109 /* We have a SIGTRAP, possibly a step-over dance has just
3110 finished. If so, tweak the state machine accordingly,
3b9a79ef
YQ
3111 reinsert breakpoints and delete any single-step
3112 breakpoints. */
6bf5e0ba
PA
3113 step_over_finished = finish_step_over (event_child);
3114
3115 /* Now invoke the callbacks of any internal breakpoints there. */
3116 check_breakpoints (event_child->stop_pc);
3117
219f2f23
PA
3118 /* Handle tracepoint data collecting. This may overflow the
3119 trace buffer, and cause a tracing stop, removing
3120 breakpoints. */
3121 trace_event = handle_tracepoints (event_child);
3122
6bf5e0ba 3123 if (bp_explains_trap)
c058728c 3124 threads_debug_printf ("Hit a gdbserver breakpoint.");
6bf5e0ba
PA
3125 }
3126 else
3127 {
3128 /* We have some other signal, possibly a step-over dance was in
3129 progress, and it should be cancelled too. */
3130 step_over_finished = finish_step_over (event_child);
fa593d66
PA
3131 }
3132
3133 /* We have all the data we need. Either report the event to GDB, or
3134 resume threads and keep waiting for more. */
3135
3136 /* If we're collecting a fast tracepoint, finish the collection and
3137 move out of the jump pad before delivering a signal. See
3138 linux_stabilize_threads. */
3139
3140 if (WIFSTOPPED (w)
3141 && WSTOPSIG (w) != SIGTRAP
3142 && supports_fast_tracepoints ()
58b4daa5 3143 && agent_loaded_p ())
fa593d66 3144 {
c058728c
SM
3145 threads_debug_printf ("Got signal %d for LWP %ld. Check if we need "
3146 "to defer or adjust it.",
3147 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3148
3149 /* Allow debugging the jump pad itself. */
0bfdf32f 3150 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
3151 && maybe_move_out_of_jump_pad (event_child, &w))
3152 {
3153 enqueue_one_deferred_signal (event_child, &w);
3154
c058728c
SM
3155 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad)",
3156 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66 3157
df95181f 3158 resume_one_lwp (event_child, 0, 0, NULL);
582511be
PA
3159
3160 return ignore_event (ourstatus);
fa593d66
PA
3161 }
3162 }
219f2f23 3163
229d26fc
SM
3164 if (event_child->collecting_fast_tracepoint
3165 != fast_tpoint_collect_result::not_collecting)
fa593d66 3166 {
c058728c
SM
3167 threads_debug_printf
3168 ("LWP %ld was trying to move out of the jump pad (%d). "
3169 "Check if we're already there.",
3170 lwpid_of (current_thread),
3171 (int) event_child->collecting_fast_tracepoint);
fa593d66
PA
3172
3173 trace_event = 1;
3174
3175 event_child->collecting_fast_tracepoint
3176 = linux_fast_tracepoint_collecting (event_child, NULL);
3177
229d26fc
SM
3178 if (event_child->collecting_fast_tracepoint
3179 != fast_tpoint_collect_result::before_insn)
fa593d66
PA
3180 {
3181 /* No longer need this breakpoint. */
3182 if (event_child->exit_jump_pad_bkpt != NULL)
3183 {
c058728c
SM
3184 threads_debug_printf
3185 ("No longer need exit-jump-pad bkpt; removing it."
3186 "stopping all threads momentarily.");
fa593d66
PA
3187
3188 /* Other running threads could hit this breakpoint.
3189 We don't handle moribund locations like GDB does,
3190 instead we always pause all threads when removing
3191 breakpoints, so that any step-over or
3192 decr_pc_after_break adjustment is always taken
3193 care of while the breakpoint is still
3194 inserted. */
3195 stop_all_lwps (1, event_child);
fa593d66
PA
3196
3197 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3198 event_child->exit_jump_pad_bkpt = NULL;
3199
3200 unstop_all_lwps (1, event_child);
3201
3202 gdb_assert (event_child->suspended >= 0);
3203 }
3204 }
3205
229d26fc
SM
3206 if (event_child->collecting_fast_tracepoint
3207 == fast_tpoint_collect_result::not_collecting)
fa593d66 3208 {
c058728c
SM
3209 threads_debug_printf
3210 ("fast tracepoint finished collecting successfully.");
fa593d66
PA
3211
3212 /* We may have a deferred signal to report. */
3213 if (dequeue_one_deferred_signal (event_child, &w))
c058728c 3214 threads_debug_printf ("dequeued one signal.");
3c11dd79 3215 else
fa593d66 3216 {
c058728c 3217 threads_debug_printf ("no deferred signals.");
fa593d66
PA
3218
3219 if (stabilizing_threads)
3220 {
183be222 3221 ourstatus->set_stopped (GDB_SIGNAL_0);
87ce2a04 3222
c058728c
SM
3223 threads_debug_printf
3224 ("ret = %s, stopped while stabilizing threads",
3225 target_pid_to_str (ptid_of (current_thread)).c_str ());
87ce2a04 3226
0bfdf32f 3227 return ptid_of (current_thread);
fa593d66
PA
3228 }
3229 }
3230 }
6bf5e0ba
PA
3231 }
3232
e471f25b
PA
3233 /* Check whether GDB would be interested in this event. */
3234
82075af2
JS
3235 /* Check if GDB is interested in this syscall. */
3236 if (WIFSTOPPED (w)
3237 && WSTOPSIG (w) == SYSCALL_SIGTRAP
9eedd27d 3238 && !gdb_catch_this_syscall (event_child))
82075af2 3239 {
c058728c
SM
3240 threads_debug_printf ("Ignored syscall for LWP %ld.",
3241 lwpid_of (current_thread));
82075af2 3242
df95181f 3243 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
edeeb602 3244
82075af2
JS
3245 return ignore_event (ourstatus);
3246 }
3247
e471f25b
PA
3248 /* If GDB is not interested in this signal, don't stop other
3249 threads, and don't report it to GDB. Just resume the inferior
3250 right away. We do this for threading-related signals as well as
3251 any that GDB specifically requested we ignore. But never ignore
3252 SIGSTOP if we sent it ourselves, and do not ignore signals when
3253 stepping - they may require special handling to skip the signal
c9587f88
AT
3254 handler. Also never ignore signals that could be caused by a
3255 breakpoint. */
e471f25b 3256 if (WIFSTOPPED (w)
0bfdf32f 3257 && current_thread->last_resume_kind != resume_step
e471f25b 3258 && (
1a981360 3259#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 3260 (current_process ()->priv->thread_db != NULL
e471f25b
PA
3261 && (WSTOPSIG (w) == __SIGRTMIN
3262 || WSTOPSIG (w) == __SIGRTMIN + 1))
3263 ||
3264#endif
c12a5089 3265 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 3266 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
3267 && current_thread->last_resume_kind == resume_stop)
3268 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
3269 {
3270 siginfo_t info, *info_p;
3271
c058728c
SM
3272 threads_debug_printf ("Ignored signal %d for LWP %ld.",
3273 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 3274
0bfdf32f 3275 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 3276 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
3277 info_p = &info;
3278 else
3279 info_p = NULL;
863d01bd
PA
3280
3281 if (step_over_finished)
3282 {
3283 /* We cancelled this thread's step-over above. We still
3284 need to unsuspend all other LWPs, and set them back
3285 running again while the signal handler runs. */
3286 unsuspend_all_lwps (event_child);
3287
3288 /* Enqueue the pending signal info so that proceed_all_lwps
3289 doesn't lose it. */
3290 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3291
3292 proceed_all_lwps ();
3293 }
3294 else
3295 {
df95181f
TBA
3296 resume_one_lwp (event_child, event_child->stepping,
3297 WSTOPSIG (w), info_p);
863d01bd 3298 }
edeeb602 3299
582511be 3300 return ignore_event (ourstatus);
e471f25b
PA
3301 }
3302
c2d6af84
PA
3303 /* Note that all addresses are always "out of the step range" when
3304 there's no range to begin with. */
3305 in_step_range = lwp_in_step_range (event_child);
3306
3307 /* If GDB wanted this thread to single step, and the thread is out
3308 of the step range, we always want to report the SIGTRAP, and let
3309 GDB handle it. Watchpoints should always be reported. So should
3310 signals we can't explain. A SIGTRAP we can't explain could be a
3311 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3312 do, we're be able to handle GDB breakpoints on top of internal
3313 breakpoints, by handling the internal breakpoint and still
3314 reporting the event to GDB. If we don't, we're out of luck, GDB
863d01bd
PA
3315 won't see the breakpoint hit. If we see a single-step event but
3316 the thread should be continuing, don't pass the trap to gdb.
3317 That indicates that we had previously finished a single-step but
3318 left the single-step pending -- see
3319 complete_ongoing_step_over. */
6bf5e0ba 3320 report_to_gdb = (!maybe_internal_trap
0bfdf32f 3321 || (current_thread->last_resume_kind == resume_step
c2d6af84 3322 && !in_step_range)
15c66dd6 3323 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
863d01bd
PA
3324 || (!in_step_range
3325 && !bp_explains_trap
3326 && !trace_event
3327 && !step_over_finished
3328 && !(current_thread->last_resume_kind == resume_continue
3329 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
9f3a5c85 3330 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5 3331 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
de0d863e 3332 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
183be222 3333 || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
d3ce09f5
SS
3334
3335 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
3336
3337 /* We found no reason GDB would want us to stop. We either hit one
3338 of our own breakpoints, or finished an internal step GDB
3339 shouldn't know about. */
3340 if (!report_to_gdb)
3341 {
c058728c
SM
3342 if (bp_explains_trap)
3343 threads_debug_printf ("Hit a gdbserver breakpoint.");
3344
3345 if (step_over_finished)
3346 threads_debug_printf ("Step-over finished.");
3347
3348 if (trace_event)
3349 threads_debug_printf ("Tracepoint event.");
3350
3351 if (lwp_in_step_range (event_child))
3352 threads_debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).",
3353 paddress (event_child->stop_pc),
3354 paddress (event_child->step_range_start),
3355 paddress (event_child->step_range_end));
6bf5e0ba
PA
3356
3357 /* We're not reporting this breakpoint to GDB, so apply the
3358 decr_pc_after_break adjustment to the inferior's regcache
3359 ourselves. */
3360
bf9ae9d8 3361 if (low_supports_breakpoints ())
6bf5e0ba
PA
3362 {
3363 struct regcache *regcache
0bfdf32f 3364 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3365 low_set_pc (regcache, event_child->stop_pc);
6bf5e0ba
PA
3366 }
3367
7984d532 3368 if (step_over_finished)
e3652c84
YQ
3369 {
3370 /* If we have finished stepping over a breakpoint, we've
3371 stopped and suspended all LWPs momentarily except the
3372 stepping one. This is where we resume them all again.
3373 We're going to keep waiting, so use proceed, which
3374 handles stepping over the next breakpoint. */
3375 unsuspend_all_lwps (event_child);
3376 }
3377 else
3378 {
3379 /* Remove the single-step breakpoints if any. Note that
3380 there isn't single-step breakpoint if we finished stepping
3381 over. */
7582c77c 3382 if (supports_software_single_step ()
e3652c84
YQ
3383 && has_single_step_breakpoints (current_thread))
3384 {
3385 stop_all_lwps (0, event_child);
3386 delete_single_step_breakpoints (current_thread);
3387 unstop_all_lwps (0, event_child);
3388 }
3389 }
7984d532 3390
c058728c 3391 threads_debug_printf ("proceeding all threads.");
edeeb602 3392
c058728c 3393 proceed_all_lwps ();
edeeb602 3394
582511be 3395 return ignore_event (ourstatus);
6bf5e0ba
PA
3396 }
3397
c058728c
SM
3398 if (debug_threads)
3399 {
3400 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3401 threads_debug_printf ("LWP %ld: extended event with waitstatus %s",
3402 lwpid_of (get_lwp_thread (event_child)),
3403 event_child->waitstatus.to_string ().c_str ());
3404
3405 if (current_thread->last_resume_kind == resume_step)
3406 {
3407 if (event_child->step_range_start == event_child->step_range_end)
3408 threads_debug_printf
3409 ("GDB wanted to single-step, reporting event.");
3410 else if (!lwp_in_step_range (event_child))
3411 threads_debug_printf ("Out of step range, reporting event.");
3412 }
3413
3414 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3415 threads_debug_printf ("Stopped by watchpoint.");
3416 else if (gdb_breakpoint_here (event_child->stop_pc))
3417 threads_debug_printf ("Stopped by GDB breakpoint.");
3418 }
3419
3420 threads_debug_printf ("Hit a non-gdbserver trap event.");
6bf5e0ba
PA
3421
3422 /* Alright, we're going to report a stop. */
3423
3b9a79ef 3424 /* Remove single-step breakpoints. */
7582c77c 3425 if (supports_software_single_step ())
8901d193 3426 {
3b9a79ef 3427 /* Remove single-step breakpoints or not. It it is true, stop all
8901d193
YQ
3428 lwps, so that other threads won't hit the breakpoint in the
3429 staled memory. */
3b9a79ef 3430 int remove_single_step_breakpoints_p = 0;
8901d193
YQ
3431
3432 if (non_stop)
3433 {
3b9a79ef
YQ
3434 remove_single_step_breakpoints_p
3435 = has_single_step_breakpoints (current_thread);
8901d193
YQ
3436 }
3437 else
3438 {
3439 /* In all-stop, a stop reply cancels all previous resume
3b9a79ef 3440 requests. Delete all single-step breakpoints. */
8901d193 3441
9c80ecd6
SM
3442 find_thread ([&] (thread_info *thread) {
3443 if (has_single_step_breakpoints (thread))
3444 {
3445 remove_single_step_breakpoints_p = 1;
3446 return true;
3447 }
8901d193 3448
9c80ecd6
SM
3449 return false;
3450 });
8901d193
YQ
3451 }
3452
3b9a79ef 3453 if (remove_single_step_breakpoints_p)
8901d193 3454 {
3b9a79ef 3455 /* If we remove single-step breakpoints from memory, stop all lwps,
8901d193
YQ
3456 so that other threads won't hit the breakpoint in the staled
3457 memory. */
3458 stop_all_lwps (0, event_child);
3459
3460 if (non_stop)
3461 {
3b9a79ef
YQ
3462 gdb_assert (has_single_step_breakpoints (current_thread));
3463 delete_single_step_breakpoints (current_thread);
8901d193
YQ
3464 }
3465 else
3466 {
9c80ecd6
SM
3467 for_each_thread ([] (thread_info *thread){
3468 if (has_single_step_breakpoints (thread))
3469 delete_single_step_breakpoints (thread);
3470 });
8901d193
YQ
3471 }
3472
3473 unstop_all_lwps (0, event_child);
3474 }
3475 }
3476
582511be 3477 if (!stabilizing_threads)
6bf5e0ba
PA
3478 {
3479 /* In all-stop, stop all threads. */
582511be
PA
3480 if (!non_stop)
3481 stop_all_lwps (0, NULL);
6bf5e0ba 3482
c03e6ccc 3483 if (step_over_finished)
582511be
PA
3484 {
3485 if (!non_stop)
3486 {
3487 /* If we were doing a step-over, all other threads but
3488 the stepping one had been paused in start_step_over,
3489 with their suspend counts incremented. We don't want
3490 to do a full unstop/unpause, because we're in
3491 all-stop mode (so we want threads stopped), but we
3492 still need to unsuspend the other threads, to
3493 decrement their `suspended' count back. */
3494 unsuspend_all_lwps (event_child);
3495 }
3496 else
3497 {
3498 /* If we just finished a step-over, then all threads had
3499 been momentarily paused. In all-stop, that's fine,
3500 we want threads stopped by now anyway. In non-stop,
3501 we need to re-resume threads that GDB wanted to be
3502 running. */
3503 unstop_all_lwps (1, event_child);
3504 }
3505 }
c03e6ccc 3506
3aa5cfa0
AT
3507 /* If we're not waiting for a specific LWP, choose an event LWP
3508 from among those that have had events. Giving equal priority
3509 to all LWPs that have had events helps prevent
3510 starvation. */
d7e15655 3511 if (ptid == minus_one_ptid)
3aa5cfa0
AT
3512 {
3513 event_child->status_pending_p = 1;
3514 event_child->status_pending = w;
3515
3516 select_event_lwp (&event_child);
3517
3518 /* current_thread and event_child must stay in sync. */
24583e45 3519 switch_to_thread (get_lwp_thread (event_child));
3aa5cfa0
AT
3520
3521 event_child->status_pending_p = 0;
3522 w = event_child->status_pending;
3523 }
3524
3525
fa593d66 3526 /* Stabilize threads (move out of jump pads). */
582511be 3527 if (!non_stop)
5c9eb2f2 3528 target_stabilize_threads ();
6bf5e0ba
PA
3529 }
3530 else
3531 {
3532 /* If we just finished a step-over, then all threads had been
3533 momentarily paused. In all-stop, that's fine, we want
3534 threads stopped by now anyway. In non-stop, we need to
3535 re-resume threads that GDB wanted to be running. */
3536 if (step_over_finished)
7984d532 3537 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3538 }
3539
e88cf517
SM
3540 /* At this point, we haven't set OURSTATUS. This is where we do it. */
3541 gdb_assert (ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3542
183be222 3543 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
de0d863e 3544 {
393a6b59
PA
3545 /* If the reported event is an exit, fork, vfork, clone or exec,
3546 let GDB know. */
5a04c4cf 3547
393a6b59
PA
3548 /* Break the unreported fork/vfork/clone relationship chain. */
3549 if (is_new_child_status (event_child->waitstatus.kind ()))
5a04c4cf 3550 {
393a6b59
PA
3551 event_child->relative->relative = NULL;
3552 event_child->relative = NULL;
5a04c4cf
PA
3553 }
3554
00db26fa 3555 *ourstatus = event_child->waitstatus;
de0d863e 3556 /* Clear the event lwp's waitstatus since we handled it already. */
183be222 3557 event_child->waitstatus.set_ignore ();
de0d863e
DB
3558 }
3559 else
183be222 3560 {
e88cf517 3561 /* The LWP stopped due to a plain signal or a syscall signal. Either way,
3bfdcabb 3562 event_child->waitstatus wasn't filled in with the details, so look at
e88cf517
SM
3563 the wait status W. */
3564 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3565 {
3566 int syscall_number;
3567
3568 get_syscall_trapinfo (event_child, &syscall_number);
3569 if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3570 ourstatus->set_syscall_entry (syscall_number);
3571 else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3572 ourstatus->set_syscall_return (syscall_number);
3573 else
3574 gdb_assert_not_reached ("unexpected syscall state");
3575 }
3576 else if (current_thread->last_resume_kind == resume_stop
3577 && WSTOPSIG (w) == SIGSTOP)
3578 {
3579 /* A thread that has been requested to stop by GDB with vCont;t,
3580 and it stopped cleanly, so report as SIG0. The use of
3581 SIGSTOP is an implementation detail. */
3582 ourstatus->set_stopped (GDB_SIGNAL_0);
3583 }
3584 else
3585 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
183be222 3586 }
5b1c542e 3587
582511be 3588 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3589 it was a software breakpoint, and the client doesn't know we can
3590 adjust the breakpoint ourselves. */
3591 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
c12a5089 3592 && !cs.swbreak_feature)
582511be 3593 {
d4807ea2 3594 int decr_pc = low_decr_pc_after_break ();
582511be
PA
3595
3596 if (decr_pc != 0)
3597 {
3598 struct regcache *regcache
3599 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3600 low_set_pc (regcache, event_child->stop_pc + decr_pc);
582511be
PA
3601 }
3602 }
3603
d7e15655 3604 gdb_assert (step_over_bkpt == null_ptid);
d50171e4 3605
e48359ea 3606 threads_debug_printf ("ret = %s, %s",
c058728c 3607 target_pid_to_str (ptid_of (current_thread)).c_str (),
e48359ea 3608 ourstatus->to_string ().c_str ());
bd99dc85 3609
183be222 3610 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
65706a29
PA
3611 return filter_exit_event (event_child, ourstatus);
3612
0bfdf32f 3613 return ptid_of (current_thread);
bd99dc85
PA
3614}
3615
3616/* Get rid of any pending event in the pipe. */
3617static void
3618async_file_flush (void)
3619{
cdc8e9b2 3620 linux_event_pipe.flush ();
bd99dc85
PA
3621}
3622
3623/* Put something in the pipe, so the event loop wakes up. */
3624static void
3625async_file_mark (void)
3626{
cdc8e9b2 3627 linux_event_pipe.mark ();
bd99dc85
PA
3628}
3629
6532e7e3
TBA
3630ptid_t
3631linux_process_target::wait (ptid_t ptid,
3632 target_waitstatus *ourstatus,
b60cea74 3633 target_wait_flags target_options)
bd99dc85 3634{
95954743 3635 ptid_t event_ptid;
bd99dc85 3636
bd99dc85
PA
3637 /* Flush the async file first. */
3638 if (target_is_async_p ())
3639 async_file_flush ();
3640
582511be
PA
3641 do
3642 {
d16f3f6c 3643 event_ptid = wait_1 (ptid, ourstatus, target_options);
582511be
PA
3644 }
3645 while ((target_options & TARGET_WNOHANG) == 0
183be222 3646 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3647
3648 /* If at least one stop was reported, there may be more. A single
3649 SIGCHLD can signal more than one child stop. */
3650 if (target_is_async_p ()
3651 && (target_options & TARGET_WNOHANG) != 0
d7e15655 3652 && event_ptid != null_ptid)
bd99dc85
PA
3653 async_file_mark ();
3654
3655 return event_ptid;
da6d8c04
DJ
3656}
3657
c5f62d5f 3658/* Send a signal to an LWP. */
fd500816
DJ
3659
3660static int
a1928bad 3661kill_lwp (unsigned long lwpid, int signo)
fd500816 3662{
4a6ed09b 3663 int ret;
fd500816 3664
4a6ed09b
PA
3665 errno = 0;
3666 ret = syscall (__NR_tkill, lwpid, signo);
3667 if (errno == ENOSYS)
3668 {
3669 /* If tkill fails, then we are not using nptl threads, a
3670 configuration we no longer support. */
3671 perror_with_name (("tkill"));
3672 }
3673 return ret;
fd500816
DJ
3674}
3675
964e4306
PA
3676void
3677linux_stop_lwp (struct lwp_info *lwp)
3678{
3679 send_sigstop (lwp);
3680}
3681
0d62e5e8 3682static void
02fc4de7 3683send_sigstop (struct lwp_info *lwp)
0d62e5e8 3684{
bd99dc85 3685 int pid;
0d62e5e8 3686
d86d4aaf 3687 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3688
0d62e5e8
DJ
3689 /* If we already have a pending stop signal for this process, don't
3690 send another. */
54a0b537 3691 if (lwp->stop_expected)
0d62e5e8 3692 {
c058728c 3693 threads_debug_printf ("Have pending sigstop for lwp %d", pid);
ae13219e 3694
0d62e5e8
DJ
3695 return;
3696 }
3697
c058728c 3698 threads_debug_printf ("Sending sigstop to lwp %d", pid);
0d62e5e8 3699
d50171e4 3700 lwp->stop_expected = 1;
bd99dc85 3701 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3702}
3703
df3e4dbe
SM
3704static void
3705send_sigstop (thread_info *thread, lwp_info *except)
02fc4de7 3706{
d86d4aaf 3707 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3708
7984d532
PA
3709 /* Ignore EXCEPT. */
3710 if (lwp == except)
df3e4dbe 3711 return;
7984d532 3712
02fc4de7 3713 if (lwp->stopped)
df3e4dbe 3714 return;
02fc4de7
PA
3715
3716 send_sigstop (lwp);
7984d532
PA
3717}
3718
3719/* Increment the suspend count of an LWP, and stop it, if not stopped
3720 yet. */
df3e4dbe
SM
3721static void
3722suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
7984d532 3723{
d86d4aaf 3724 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3725
3726 /* Ignore EXCEPT. */
3727 if (lwp == except)
df3e4dbe 3728 return;
7984d532 3729
863d01bd 3730 lwp_suspended_inc (lwp);
7984d532 3731
df3e4dbe 3732 send_sigstop (thread, except);
02fc4de7
PA
3733}
3734
95954743
PA
3735static void
3736mark_lwp_dead (struct lwp_info *lwp, int wstat)
3737{
95954743
PA
3738 /* Store the exit status for later. */
3739 lwp->status_pending_p = 1;
3740 lwp->status_pending = wstat;
3741
00db26fa
PA
3742 /* Store in waitstatus as well, as there's nothing else to process
3743 for this event. */
3744 if (WIFEXITED (wstat))
183be222 3745 lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
00db26fa 3746 else if (WIFSIGNALED (wstat))
183be222 3747 lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
00db26fa 3748
95954743
PA
3749 /* Prevent trying to stop it. */
3750 lwp->stopped = 1;
3751
3752 /* No further stops are expected from a dead lwp. */
3753 lwp->stop_expected = 0;
3754}
3755
00db26fa
PA
3756/* Return true if LWP has exited already, and has a pending exit event
3757 to report to GDB. */
3758
3759static int
3760lwp_is_marked_dead (struct lwp_info *lwp)
3761{
3762 return (lwp->status_pending_p
3763 && (WIFEXITED (lwp->status_pending)
3764 || WIFSIGNALED (lwp->status_pending)));
3765}
3766
d16f3f6c
TBA
3767void
3768linux_process_target::wait_for_sigstop ()
0d62e5e8 3769{
0bfdf32f 3770 struct thread_info *saved_thread;
95954743 3771 ptid_t saved_tid;
fa96cb38
PA
3772 int wstat;
3773 int ret;
0d62e5e8 3774
0bfdf32f
GB
3775 saved_thread = current_thread;
3776 if (saved_thread != NULL)
9c80ecd6 3777 saved_tid = saved_thread->id;
bd99dc85 3778 else
95954743 3779 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3780
20ac1cdb
TBA
3781 scoped_restore_current_thread restore_thread;
3782
c058728c 3783 threads_debug_printf ("pulling events");
d50171e4 3784
fa96cb38
PA
3785 /* Passing NULL_PTID as filter indicates we want all events to be
3786 left pending. Eventually this returns when there are no
3787 unwaited-for children left. */
d16f3f6c 3788 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
fa96cb38 3789 gdb_assert (ret == -1);
0d62e5e8 3790
13d3d99b 3791 if (saved_thread == NULL || mythread_alive (saved_tid))
20ac1cdb 3792 return;
0d62e5e8
DJ
3793 else
3794 {
c058728c 3795 threads_debug_printf ("Previously current thread died.");
0d62e5e8 3796
f0db101d
PA
3797 /* We can't change the current inferior behind GDB's back,
3798 otherwise, a subsequent command may apply to the wrong
3799 process. */
20ac1cdb
TBA
3800 restore_thread.dont_restore ();
3801 switch_to_thread (nullptr);
0d62e5e8
DJ
3802 }
3803}
3804
13e567af
TBA
3805bool
3806linux_process_target::stuck_in_jump_pad (thread_info *thread)
fa593d66 3807{
d86d4aaf 3808 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3809
863d01bd
PA
3810 if (lwp->suspended != 0)
3811 {
f34652de 3812 internal_error ("LWP %ld is suspended, suspended=%d\n",
863d01bd
PA
3813 lwpid_of (thread), lwp->suspended);
3814 }
fa593d66
PA
3815 gdb_assert (lwp->stopped);
3816
3817 /* Allow debugging the jump pad, gdb_collect, etc.. */
3818 return (supports_fast_tracepoints ()
58b4daa5 3819 && agent_loaded_p ()
fa593d66 3820 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3821 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66 3822 || thread->last_resume_kind == resume_step)
229d26fc
SM
3823 && (linux_fast_tracepoint_collecting (lwp, NULL)
3824 != fast_tpoint_collect_result::not_collecting));
fa593d66
PA
3825}
3826
d16f3f6c
TBA
3827void
3828linux_process_target::move_out_of_jump_pad (thread_info *thread)
fa593d66 3829{
d86d4aaf 3830 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3831 int *wstat;
3832
863d01bd
PA
3833 if (lwp->suspended != 0)
3834 {
f34652de 3835 internal_error ("LWP %ld is suspended, suspended=%d\n",
863d01bd
PA
3836 lwpid_of (thread), lwp->suspended);
3837 }
fa593d66
PA
3838 gdb_assert (lwp->stopped);
3839
f0ce0d3a 3840 /* For gdb_breakpoint_here. */
24583e45
TBA
3841 scoped_restore_current_thread restore_thread;
3842 switch_to_thread (thread);
f0ce0d3a 3843
fa593d66
PA
3844 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3845
3846 /* Allow debugging the jump pad, gdb_collect, etc. */
3847 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3848 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3849 && thread->last_resume_kind != resume_step
3850 && maybe_move_out_of_jump_pad (lwp, wstat))
3851 {
c058728c
SM
3852 threads_debug_printf ("LWP %ld needs stabilizing (in jump pad)",
3853 lwpid_of (thread));
fa593d66
PA
3854
3855 if (wstat)
3856 {
3857 lwp->status_pending_p = 0;
3858 enqueue_one_deferred_signal (lwp, wstat);
3859
c058728c
SM
3860 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad",
3861 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
3862 }
3863
df95181f 3864 resume_one_lwp (lwp, 0, 0, NULL);
fa593d66
PA
3865 }
3866 else
863d01bd 3867 lwp_suspended_inc (lwp);
fa593d66
PA
3868}
3869
5a6b0a41
SM
3870static bool
3871lwp_running (thread_info *thread)
fa593d66 3872{
d86d4aaf 3873 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3874
00db26fa 3875 if (lwp_is_marked_dead (lwp))
5a6b0a41
SM
3876 return false;
3877
3878 return !lwp->stopped;
fa593d66
PA
3879}
3880
d16f3f6c
TBA
3881void
3882linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
0d62e5e8 3883{
bde24c0a
PA
3884 /* Should not be called recursively. */
3885 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3886
c058728c
SM
3887 THREADS_SCOPED_DEBUG_ENTER_EXIT;
3888
3889 threads_debug_printf
3890 ("%s, except=%s", suspend ? "stop-and-suspend" : "stop",
3891 (except != NULL
3892 ? target_pid_to_str (ptid_of (get_lwp_thread (except))).c_str ()
3893 : "none"));
87ce2a04 3894
bde24c0a
PA
3895 stopping_threads = (suspend
3896 ? STOPPING_AND_SUSPENDING_THREADS
3897 : STOPPING_THREADS);
7984d532
PA
3898
3899 if (suspend)
df3e4dbe
SM
3900 for_each_thread ([&] (thread_info *thread)
3901 {
3902 suspend_and_send_sigstop (thread, except);
3903 });
7984d532 3904 else
df3e4dbe
SM
3905 for_each_thread ([&] (thread_info *thread)
3906 {
3907 send_sigstop (thread, except);
3908 });
3909
fa96cb38 3910 wait_for_sigstop ();
bde24c0a 3911 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04 3912
c058728c 3913 threads_debug_printf ("setting stopping_threads back to !stopping");
0d62e5e8
DJ
3914}
3915
863d01bd
PA
3916/* Enqueue one signal in the chain of signals which need to be
3917 delivered to this process on next resume. */
3918
3919static void
3920enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3921{
013e3554
TBA
3922 lwp->pending_signals.emplace_back (signal);
3923 if (info == nullptr)
3924 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
863d01bd 3925 else
013e3554 3926 lwp->pending_signals.back ().info = *info;
863d01bd
PA
3927}
3928
df95181f
TBA
3929void
3930linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
fa5308bd 3931{
984a2c04
YQ
3932 struct thread_info *thread = get_lwp_thread (lwp);
3933 struct regcache *regcache = get_thread_regcache (thread, 1);
8ce47547 3934
24583e45 3935 scoped_restore_current_thread restore_thread;
984a2c04 3936
24583e45 3937 switch_to_thread (thread);
7582c77c 3938 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
fa5308bd 3939
a0ff9e1a 3940 for (CORE_ADDR pc : next_pcs)
3b9a79ef 3941 set_single_step_breakpoint (pc, current_ptid);
fa5308bd
AT
3942}
3943
df95181f
TBA
3944int
3945linux_process_target::single_step (lwp_info* lwp)
7fe5e27e
AT
3946{
3947 int step = 0;
3948
b31cdfa6 3949 if (supports_hardware_single_step ())
7fe5e27e
AT
3950 {
3951 step = 1;
3952 }
7582c77c 3953 else if (supports_software_single_step ())
7fe5e27e
AT
3954 {
3955 install_software_single_step_breakpoints (lwp);
3956 step = 0;
3957 }
3958 else
c058728c 3959 threads_debug_printf ("stepping is not implemented on this target");
7fe5e27e
AT
3960
3961 return step;
3962}
3963
35ac8b3e 3964/* The signal can be delivered to the inferior if we are not trying to
5b061e98
YQ
3965 finish a fast tracepoint collect. Since signal can be delivered in
3966 the step-over, the program may go to signal handler and trap again
3967 after return from the signal handler. We can live with the spurious
3968 double traps. */
35ac8b3e
YQ
3969
3970static int
3971lwp_signal_can_be_delivered (struct lwp_info *lwp)
3972{
229d26fc
SM
3973 return (lwp->collecting_fast_tracepoint
3974 == fast_tpoint_collect_result::not_collecting);
35ac8b3e
YQ
3975}
3976
df95181f
TBA
3977void
3978linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
3979 int signal, siginfo_t *info)
da6d8c04 3980{
d86d4aaf 3981 struct thread_info *thread = get_lwp_thread (lwp);
82075af2 3982 int ptrace_request;
c06cbd92
YQ
3983 struct process_info *proc = get_thread_process (thread);
3984
3985 /* Note that target description may not be initialised
3986 (proc->tdesc == NULL) at this point because the program hasn't
3987 stopped at the first instruction yet. It means GDBserver skips
3988 the extra traps from the wrapper program (see option --wrapper).
3989 Code in this function that requires register access should be
3990 guarded by proc->tdesc == NULL or something else. */
0d62e5e8 3991
54a0b537 3992 if (lwp->stopped == 0)
0d62e5e8
DJ
3993 return;
3994
183be222 3995 gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
65706a29 3996
229d26fc
SM
3997 fast_tpoint_collect_result fast_tp_collecting
3998 = lwp->collecting_fast_tracepoint;
fa593d66 3999
229d26fc
SM
4000 gdb_assert (!stabilizing_threads
4001 || (fast_tp_collecting
4002 != fast_tpoint_collect_result::not_collecting));
fa593d66 4003
219f2f23
PA
4004 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4005 user used the "jump" command, or "set $pc = foo"). */
c06cbd92 4006 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
219f2f23
PA
4007 {
4008 /* Collecting 'while-stepping' actions doesn't make sense
4009 anymore. */
d86d4aaf 4010 release_while_stepping_state_list (thread);
219f2f23
PA
4011 }
4012
0d62e5e8 4013 /* If we have pending signals or status, and a new signal, enqueue the
35ac8b3e
YQ
4014 signal. Also enqueue the signal if it can't be delivered to the
4015 inferior right now. */
0d62e5e8 4016 if (signal != 0
fa593d66 4017 && (lwp->status_pending_p
013e3554 4018 || !lwp->pending_signals.empty ()
35ac8b3e 4019 || !lwp_signal_can_be_delivered (lwp)))
94610ec4
YQ
4020 {
4021 enqueue_pending_signal (lwp, signal, info);
4022
4023 /* Postpone any pending signal. It was enqueued above. */
4024 signal = 0;
4025 }
0d62e5e8 4026
d50171e4
PA
4027 if (lwp->status_pending_p)
4028 {
c058728c
SM
4029 threads_debug_printf
4030 ("Not resuming lwp %ld (%s, stop %s); has pending status",
4031 lwpid_of (thread), step ? "step" : "continue",
4032 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
4033 return;
4034 }
0d62e5e8 4035
24583e45
TBA
4036 scoped_restore_current_thread restore_thread;
4037 switch_to_thread (thread);
0d62e5e8 4038
0d62e5e8
DJ
4039 /* This bit needs some thinking about. If we get a signal that
4040 we must report while a single-step reinsert is still pending,
4041 we often end up resuming the thread. It might be better to
4042 (ew) allow a stack of pending events; then we could be sure that
4043 the reinsert happened right away and not lose any signals.
4044
4045 Making this stack would also shrink the window in which breakpoints are
54a0b537 4046 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
4047 complete correctness, so it won't solve that problem. It may be
4048 worthwhile just to solve this one, however. */
54a0b537 4049 if (lwp->bp_reinsert != 0)
0d62e5e8 4050 {
c058728c
SM
4051 threads_debug_printf (" pending reinsert at 0x%s",
4052 paddress (lwp->bp_reinsert));
d50171e4 4053
b31cdfa6 4054 if (supports_hardware_single_step ())
d50171e4 4055 {
229d26fc 4056 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
4057 {
4058 if (step == 0)
9986ba08 4059 warning ("BAD - reinserting but not stepping.");
fa593d66 4060 if (lwp->suspended)
9986ba08
PA
4061 warning ("BAD - reinserting and suspended(%d).",
4062 lwp->suspended);
fa593d66 4063 }
d50171e4 4064 }
f79b145d
YQ
4065
4066 step = maybe_hw_step (thread);
0d62e5e8
DJ
4067 }
4068
229d26fc 4069 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
c058728c
SM
4070 threads_debug_printf
4071 ("lwp %ld wants to get out of fast tracepoint jump pad "
4072 "(exit-jump-pad-bkpt)", lwpid_of (thread));
4073
229d26fc 4074 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
fa593d66 4075 {
c058728c
SM
4076 threads_debug_printf
4077 ("lwp %ld wants to get out of fast tracepoint jump pad single-stepping",
4078 lwpid_of (thread));
fa593d66 4079
b31cdfa6 4080 if (supports_hardware_single_step ())
fa593d66
PA
4081 step = 1;
4082 else
38e08fca 4083 {
f34652de 4084 internal_error ("moving out of jump pad single-stepping"
38e08fca
GB
4085 " not implemented on this target");
4086 }
fa593d66
PA
4087 }
4088
219f2f23
PA
4089 /* If we have while-stepping actions in this thread set it stepping.
4090 If we have a signal to deliver, it may or may not be set to
4091 SIG_IGN, we don't know. Assume so, and allow collecting
4092 while-stepping into a signal handler. A possible smart thing to
4093 do would be to set an internal breakpoint at the signal return
4094 address, continue, and carry on catching this while-stepping
4095 action only when that breakpoint is hit. A future
4096 enhancement. */
7fe5e27e 4097 if (thread->while_stepping != NULL)
219f2f23 4098 {
c058728c
SM
4099 threads_debug_printf
4100 ("lwp %ld has a while-stepping action -> forcing step.",
4101 lwpid_of (thread));
7fe5e27e
AT
4102
4103 step = single_step (lwp);
219f2f23
PA
4104 }
4105
bf9ae9d8 4106 if (proc->tdesc != NULL && low_supports_breakpoints ())
0d62e5e8 4107 {
0bfdf32f 4108 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be 4109
bf9ae9d8 4110 lwp->stop_pc = low_get_pc (regcache);
582511be 4111
c058728c
SM
4112 threads_debug_printf (" %s from pc 0x%lx", step ? "step" : "continue",
4113 (long) lwp->stop_pc);
0d62e5e8
DJ
4114 }
4115
35ac8b3e
YQ
4116 /* If we have pending signals, consume one if it can be delivered to
4117 the inferior. */
013e3554 4118 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
0d62e5e8 4119 {
013e3554 4120 const pending_signal &p_sig = lwp->pending_signals.front ();
0d62e5e8 4121
013e3554
TBA
4122 signal = p_sig.signal;
4123 if (p_sig.info.si_signo != 0)
d86d4aaf 4124 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554 4125 &p_sig.info);
32ca6d61 4126
013e3554 4127 lwp->pending_signals.pop_front ();
0d62e5e8
DJ
4128 }
4129
c058728c
SM
4130 threads_debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)",
4131 lwpid_of (thread), step ? "step" : "continue", signal,
4132 lwp->stop_expected ? "expected" : "not expected");
94610ec4 4133
d7599cc0 4134 low_prepare_to_resume (lwp);
aa5ca48f 4135
d86d4aaf 4136 regcache_invalidate_thread (thread);
da6d8c04 4137 errno = 0;
54a0b537 4138 lwp->stepping = step;
82075af2
JS
4139 if (step)
4140 ptrace_request = PTRACE_SINGLESTEP;
4141 else if (gdb_catching_syscalls_p (lwp))
4142 ptrace_request = PTRACE_SYSCALL;
4143 else
4144 ptrace_request = PTRACE_CONT;
4145 ptrace (ptrace_request,
4146 lwpid_of (thread),
b8e1b30e 4147 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
4148 /* Coerce to a uintptr_t first to avoid potential gcc warning
4149 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4150 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 4151
da6d8c04 4152 if (errno)
20471e00
SM
4153 {
4154 int saved_errno = errno;
4155
4156 threads_debug_printf ("ptrace errno = %d (%s)",
4157 saved_errno, strerror (saved_errno));
4158
4159 errno = saved_errno;
4160 perror_with_name ("resuming thread");
4161 }
23f238d3
PA
4162
4163 /* Successfully resumed. Clear state that no longer makes sense,
4164 and mark the LWP as running. Must not do this before resuming
4165 otherwise if that fails other code will be confused. E.g., we'd
4166 later try to stop the LWP and hang forever waiting for a stop
4167 status. Note that we must not throw after this is cleared,
4168 otherwise handle_zombie_lwp_error would get confused. */
4169 lwp->stopped = 0;
4170 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4171}
4172
d7599cc0
TBA
4173void
4174linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4175{
4176 /* Nop. */
4177}
4178
23f238d3
PA
4179/* Called when we try to resume a stopped LWP and that errors out. If
4180 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4181 or about to become), discard the error, clear any pending status
4182 the LWP may have, and return true (we'll collect the exit status
4183 soon enough). Otherwise, return false. */
4184
4185static int
4186check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4187{
4188 struct thread_info *thread = get_lwp_thread (lp);
4189
4190 /* If we get an error after resuming the LWP successfully, we'd
4191 confuse !T state for the LWP being gone. */
4192 gdb_assert (lp->stopped);
4193
4194 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4195 because even if ptrace failed with ESRCH, the tracee may be "not
4196 yet fully dead", but already refusing ptrace requests. In that
4197 case the tracee has 'R (Running)' state for a little bit
4198 (observed in Linux 3.18). See also the note on ESRCH in the
4199 ptrace(2) man page. Instead, check whether the LWP has any state
4200 other than ptrace-stopped. */
4201
4202 /* Don't assume anything if /proc/PID/status can't be read. */
4203 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 4204 {
23f238d3
PA
4205 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4206 lp->status_pending_p = 0;
4207 return 1;
4208 }
4209 return 0;
4210}
4211
df95181f
TBA
4212void
4213linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4214 siginfo_t *info)
23f238d3 4215{
a70b8144 4216 try
23f238d3 4217 {
df95181f 4218 resume_one_lwp_throw (lwp, step, signal, info);
23f238d3 4219 }
230d2906 4220 catch (const gdb_exception_error &ex)
23f238d3 4221 {
20471e00
SM
4222 if (check_ptrace_stopped_lwp_gone (lwp))
4223 {
4224 /* This could because we tried to resume an LWP after its leader
4225 exited. Mark it as resumed, so we can collect an exit event
4226 from it. */
4227 lwp->stopped = 0;
4228 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4229 }
4230 else
eedc3f4f 4231 throw;
3221518c 4232 }
da6d8c04
DJ
4233}
4234
5fdda392
SM
4235/* This function is called once per thread via for_each_thread.
4236 We look up which resume request applies to THREAD and mark it with a
4237 pointer to the appropriate resume request.
5544ad89
DJ
4238
4239 This algorithm is O(threads * resume elements), but resume elements
4240 is small (and will remain small at least until GDB supports thread
4241 suspension). */
ebcf782c 4242
5fdda392
SM
4243static void
4244linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
0d62e5e8 4245{
d86d4aaf 4246 struct lwp_info *lwp = get_thread_lwp (thread);
64386c31 4247
5fdda392 4248 for (int ndx = 0; ndx < n; ndx++)
95954743 4249 {
5fdda392 4250 ptid_t ptid = resume[ndx].thread;
d7e15655 4251 if (ptid == minus_one_ptid
9c80ecd6 4252 || ptid == thread->id
0c9070b3
YQ
4253 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4254 of PID'. */
e99b03dc 4255 || (ptid.pid () == pid_of (thread)
0e998d96 4256 && (ptid.is_pid ()
e38504b3 4257 || ptid.lwp () == -1)))
95954743 4258 {
5fdda392 4259 if (resume[ndx].kind == resume_stop
8336d594 4260 && thread->last_resume_kind == resume_stop)
d50171e4 4261 {
c058728c
SM
4262 threads_debug_printf
4263 ("already %s LWP %ld at GDB's request",
4264 (thread->last_status.kind () == TARGET_WAITKIND_STOPPED
4265 ? "stopped" : "stopping"),
4266 lwpid_of (thread));
d50171e4
PA
4267
4268 continue;
4269 }
4270
5a04c4cf
PA
4271 /* Ignore (wildcard) resume requests for already-resumed
4272 threads. */
5fdda392 4273 if (resume[ndx].kind != resume_stop
5a04c4cf
PA
4274 && thread->last_resume_kind != resume_stop)
4275 {
c058728c
SM
4276 threads_debug_printf
4277 ("already %s LWP %ld at GDB's request",
4278 (thread->last_resume_kind == resume_step
4279 ? "stepping" : "continuing"),
4280 lwpid_of (thread));
5a04c4cf
PA
4281 continue;
4282 }
4283
393a6b59
PA
4284 /* Don't let wildcard resumes resume fork/vfork/clone
4285 children that GDB does not yet know are new children. */
4286 if (lwp->relative != NULL)
5a04c4cf 4287 {
393a6b59 4288 struct lwp_info *rel = lwp->relative;
5a04c4cf
PA
4289
4290 if (rel->status_pending_p
393a6b59 4291 && is_new_child_status (rel->waitstatus.kind ()))
5a04c4cf 4292 {
c058728c
SM
4293 threads_debug_printf
4294 ("not resuming LWP %ld: has queued stop reply",
4295 lwpid_of (thread));
5a04c4cf
PA
4296 continue;
4297 }
4298 }
4299
4300 /* If the thread has a pending event that has already been
4301 reported to GDBserver core, but GDB has not pulled the
4302 event out of the vStopped queue yet, likewise, ignore the
4303 (wildcard) resume request. */
9c80ecd6 4304 if (in_queued_stop_replies (thread->id))
5a04c4cf 4305 {
c058728c
SM
4306 threads_debug_printf
4307 ("not resuming LWP %ld: has queued stop reply",
4308 lwpid_of (thread));
5a04c4cf
PA
4309 continue;
4310 }
4311
5fdda392 4312 lwp->resume = &resume[ndx];
8336d594 4313 thread->last_resume_kind = lwp->resume->kind;
fa593d66 4314
c2d6af84
PA
4315 lwp->step_range_start = lwp->resume->step_range_start;
4316 lwp->step_range_end = lwp->resume->step_range_end;
4317
fa593d66
PA
4318 /* If we had a deferred signal to report, dequeue one now.
4319 This can happen if LWP gets more than one signal while
4320 trying to get out of a jump pad. */
4321 if (lwp->stopped
4322 && !lwp->status_pending_p
4323 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4324 {
4325 lwp->status_pending_p = 1;
4326
c058728c
SM
4327 threads_debug_printf
4328 ("Dequeueing deferred signal %d for LWP %ld, "
4329 "leaving status pending.",
4330 WSTOPSIG (lwp->status_pending),
4331 lwpid_of (thread));
fa593d66
PA
4332 }
4333
5fdda392 4334 return;
95954743
PA
4335 }
4336 }
2bd7c093
PA
4337
4338 /* No resume action for this thread. */
4339 lwp->resume = NULL;
5544ad89
DJ
4340}
4341
df95181f
TBA
4342bool
4343linux_process_target::resume_status_pending (thread_info *thread)
5544ad89 4344{
d86d4aaf 4345 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4346
bd99dc85
PA
4347 /* LWPs which will not be resumed are not interesting, because
4348 we might not wait for them next time through linux_wait. */
2bd7c093 4349 if (lwp->resume == NULL)
25c28b4d 4350 return false;
64386c31 4351
df95181f 4352 return thread_still_has_status_pending (thread);
d50171e4
PA
4353}
4354
df95181f
TBA
4355bool
4356linux_process_target::thread_needs_step_over (thread_info *thread)
d50171e4 4357{
d86d4aaf 4358 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4359 CORE_ADDR pc;
c06cbd92
YQ
4360 struct process_info *proc = get_thread_process (thread);
4361
4362 /* GDBserver is skipping the extra traps from the wrapper program,
4363 don't have to do step over. */
4364 if (proc->tdesc == NULL)
eca55aec 4365 return false;
d50171e4
PA
4366
4367 /* LWPs which will not be resumed are not interesting, because we
4368 might not wait for them next time through linux_wait. */
4369
4370 if (!lwp->stopped)
4371 {
c058728c
SM
4372 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped",
4373 lwpid_of (thread));
eca55aec 4374 return false;
d50171e4
PA
4375 }
4376
8336d594 4377 if (thread->last_resume_kind == resume_stop)
d50171e4 4378 {
c058728c
SM
4379 threads_debug_printf
4380 ("Need step over [LWP %ld]? Ignoring, should remain stopped",
4381 lwpid_of (thread));
eca55aec 4382 return false;
d50171e4
PA
4383 }
4384
7984d532
PA
4385 gdb_assert (lwp->suspended >= 0);
4386
4387 if (lwp->suspended)
4388 {
c058728c
SM
4389 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, suspended",
4390 lwpid_of (thread));
eca55aec 4391 return false;
7984d532
PA
4392 }
4393
bd99dc85 4394 if (lwp->status_pending_p)
d50171e4 4395 {
c058728c
SM
4396 threads_debug_printf
4397 ("Need step over [LWP %ld]? Ignoring, has pending status.",
4398 lwpid_of (thread));
eca55aec 4399 return false;
d50171e4
PA
4400 }
4401
4402 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4403 or we have. */
4404 pc = get_pc (lwp);
4405
4406 /* If the PC has changed since we stopped, then don't do anything,
4407 and let the breakpoint/tracepoint be hit. This happens if, for
4408 instance, GDB handled the decr_pc_after_break subtraction itself,
4409 GDB is OOL stepping this thread, or the user has issued a "jump"
4410 command, or poked thread's registers herself. */
4411 if (pc != lwp->stop_pc)
4412 {
c058728c
SM
4413 threads_debug_printf
4414 ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4415 "Old stop_pc was 0x%s, PC is now 0x%s", lwpid_of (thread),
4416 paddress (lwp->stop_pc), paddress (pc));
eca55aec 4417 return false;
d50171e4
PA
4418 }
4419
484b3c32
YQ
4420 /* On software single step target, resume the inferior with signal
4421 rather than stepping over. */
7582c77c 4422 if (supports_software_single_step ()
013e3554 4423 && !lwp->pending_signals.empty ()
484b3c32
YQ
4424 && lwp_signal_can_be_delivered (lwp))
4425 {
c058728c
SM
4426 threads_debug_printf
4427 ("Need step over [LWP %ld]? Ignoring, has pending signals.",
4428 lwpid_of (thread));
484b3c32 4429
eca55aec 4430 return false;
484b3c32
YQ
4431 }
4432
24583e45
TBA
4433 scoped_restore_current_thread restore_thread;
4434 switch_to_thread (thread);
d50171e4 4435
8b07ae33 4436 /* We can only step over breakpoints we know about. */
fa593d66 4437 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 4438 {
8b07ae33 4439 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
4440 though. If the condition is being evaluated on the target's side
4441 and it evaluate to false, step over this breakpoint as well. */
4442 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
4443 && gdb_condition_true_at_breakpoint (pc)
4444 && gdb_no_commands_at_breakpoint (pc))
8b07ae33 4445 {
c058728c
SM
4446 threads_debug_printf ("Need step over [LWP %ld]? yes, but found"
4447 " GDB breakpoint at 0x%s; skipping step over",
4448 lwpid_of (thread), paddress (pc));
d50171e4 4449
eca55aec 4450 return false;
8b07ae33
PA
4451 }
4452 else
4453 {
c058728c
SM
4454 threads_debug_printf ("Need step over [LWP %ld]? yes, "
4455 "found breakpoint at 0x%s",
4456 lwpid_of (thread), paddress (pc));
d50171e4 4457
8b07ae33 4458 /* We've found an lwp that needs stepping over --- return 1 so
8f86d7aa 4459 that find_thread stops looking. */
eca55aec 4460 return true;
8b07ae33 4461 }
d50171e4
PA
4462 }
4463
c058728c
SM
4464 threads_debug_printf
4465 ("Need step over [LWP %ld]? No, no breakpoint found at 0x%s",
4466 lwpid_of (thread), paddress (pc));
c6ecbae5 4467
eca55aec 4468 return false;
5544ad89
DJ
4469}
4470
d16f3f6c
TBA
4471void
4472linux_process_target::start_step_over (lwp_info *lwp)
d50171e4 4473{
d86d4aaf 4474 struct thread_info *thread = get_lwp_thread (lwp);
d50171e4 4475 CORE_ADDR pc;
d50171e4 4476
c058728c
SM
4477 threads_debug_printf ("Starting step-over on LWP %ld. Stopping all threads",
4478 lwpid_of (thread));
d50171e4 4479
7984d532 4480 stop_all_lwps (1, lwp);
863d01bd
PA
4481
4482 if (lwp->suspended != 0)
4483 {
f34652de 4484 internal_error ("LWP %ld suspended=%d\n", lwpid_of (thread),
863d01bd
PA
4485 lwp->suspended);
4486 }
d50171e4 4487
c058728c 4488 threads_debug_printf ("Done stopping all threads for step-over.");
d50171e4
PA
4489
4490 /* Note, we should always reach here with an already adjusted PC,
4491 either by GDB (if we're resuming due to GDB's request), or by our
4492 caller, if we just finished handling an internal breakpoint GDB
4493 shouldn't care about. */
4494 pc = get_pc (lwp);
4495
24583e45
TBA
4496 bool step = false;
4497 {
4498 scoped_restore_current_thread restore_thread;
4499 switch_to_thread (thread);
d50171e4 4500
24583e45
TBA
4501 lwp->bp_reinsert = pc;
4502 uninsert_breakpoints_at (pc);
4503 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4 4504
24583e45
TBA
4505 step = single_step (lwp);
4506 }
d50171e4 4507
df95181f 4508 resume_one_lwp (lwp, step, 0, NULL);
d50171e4
PA
4509
4510 /* Require next event from this LWP. */
9c80ecd6 4511 step_over_bkpt = thread->id;
d50171e4
PA
4512}
4513
b31cdfa6
TBA
4514bool
4515linux_process_target::finish_step_over (lwp_info *lwp)
d50171e4
PA
4516{
4517 if (lwp->bp_reinsert != 0)
4518 {
24583e45 4519 scoped_restore_current_thread restore_thread;
f79b145d 4520
c058728c 4521 threads_debug_printf ("Finished step over.");
d50171e4 4522
24583e45 4523 switch_to_thread (get_lwp_thread (lwp));
f79b145d 4524
d50171e4
PA
4525 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4526 may be no breakpoint to reinsert there by now. */
4527 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4528 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4529
4530 lwp->bp_reinsert = 0;
4531
3b9a79ef
YQ
4532 /* Delete any single-step breakpoints. No longer needed. We
4533 don't have to worry about other threads hitting this trap,
4534 and later not being able to explain it, because we were
4535 stepping over a breakpoint, and we hold all threads but
4536 LWP stopped while doing that. */
b31cdfa6 4537 if (!supports_hardware_single_step ())
f79b145d 4538 {
3b9a79ef
YQ
4539 gdb_assert (has_single_step_breakpoints (current_thread));
4540 delete_single_step_breakpoints (current_thread);
f79b145d 4541 }
d50171e4
PA
4542
4543 step_over_bkpt = null_ptid;
b31cdfa6 4544 return true;
d50171e4
PA
4545 }
4546 else
b31cdfa6 4547 return false;
d50171e4
PA
4548}
4549
d16f3f6c
TBA
4550void
4551linux_process_target::complete_ongoing_step_over ()
863d01bd 4552{
d7e15655 4553 if (step_over_bkpt != null_ptid)
863d01bd
PA
4554 {
4555 struct lwp_info *lwp;
4556 int wstat;
4557 int ret;
4558
c058728c 4559 threads_debug_printf ("detach: step over in progress, finish it first");
863d01bd
PA
4560
4561 /* Passing NULL_PTID as filter indicates we want all events to
4562 be left pending. Eventually this returns when there are no
4563 unwaited-for children left. */
d16f3f6c
TBA
4564 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4565 __WALL);
863d01bd
PA
4566 gdb_assert (ret == -1);
4567
4568 lwp = find_lwp_pid (step_over_bkpt);
4569 if (lwp != NULL)
7e9cf1fe
PA
4570 {
4571 finish_step_over (lwp);
4572
4573 /* If we got our step SIGTRAP, don't leave it pending,
4574 otherwise we would report it to GDB as a spurious
4575 SIGTRAP. */
4576 gdb_assert (lwp->status_pending_p);
4577 if (WIFSTOPPED (lwp->status_pending)
4578 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4579 {
4580 thread_info *thread = get_lwp_thread (lwp);
4581 if (thread->last_resume_kind != resume_step)
4582 {
c058728c 4583 threads_debug_printf ("detach: discard step-over SIGTRAP");
7e9cf1fe
PA
4584
4585 lwp->status_pending_p = 0;
4586 lwp->status_pending = 0;
4587 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4588 }
4589 else
c058728c
SM
4590 threads_debug_printf
4591 ("detach: resume_step, not discarding step-over SIGTRAP");
7e9cf1fe
PA
4592 }
4593 }
863d01bd
PA
4594 step_over_bkpt = null_ptid;
4595 unsuspend_all_lwps (lwp);
4596 }
4597}
4598
df95181f
TBA
4599void
4600linux_process_target::resume_one_thread (thread_info *thread,
4601 bool leave_all_stopped)
5544ad89 4602{
d86d4aaf 4603 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4604 int leave_pending;
5544ad89 4605
2bd7c093 4606 if (lwp->resume == NULL)
c80825ff 4607 return;
5544ad89 4608
bd99dc85 4609 if (lwp->resume->kind == resume_stop)
5544ad89 4610 {
c058728c
SM
4611 threads_debug_printf ("resume_stop request for LWP %ld",
4612 lwpid_of (thread));
bd99dc85
PA
4613
4614 if (!lwp->stopped)
4615 {
c058728c 4616 threads_debug_printf ("stopping LWP %ld", lwpid_of (thread));
bd99dc85 4617
d50171e4
PA
4618 /* Stop the thread, and wait for the event asynchronously,
4619 through the event loop. */
02fc4de7 4620 send_sigstop (lwp);
bd99dc85
PA
4621 }
4622 else
4623 {
c058728c 4624 threads_debug_printf ("already stopped LWP %ld", lwpid_of (thread));
d50171e4
PA
4625
4626 /* The LWP may have been stopped in an internal event that
4627 was not meant to be notified back to GDB (e.g., gdbserver
4628 breakpoint), so we should be reporting a stop event in
4629 this case too. */
4630
4631 /* If the thread already has a pending SIGSTOP, this is a
4632 no-op. Otherwise, something later will presumably resume
4633 the thread and this will cause it to cancel any pending
4634 operation, due to last_resume_kind == resume_stop. If
4635 the thread already has a pending status to report, we
4636 will still report it the next time we wait - see
4637 status_pending_p_callback. */
1a981360
PA
4638
4639 /* If we already have a pending signal to report, then
4640 there's no need to queue a SIGSTOP, as this means we're
4641 midway through moving the LWP out of the jumppad, and we
4642 will report the pending signal as soon as that is
4643 finished. */
013e3554 4644 if (lwp->pending_signals_to_report.empty ())
1a981360 4645 send_sigstop (lwp);
bd99dc85 4646 }
32ca6d61 4647
bd99dc85
PA
4648 /* For stop requests, we're done. */
4649 lwp->resume = NULL;
183be222 4650 thread->last_status.set_ignore ();
c80825ff 4651 return;
5544ad89
DJ
4652 }
4653
bd99dc85 4654 /* If this thread which is about to be resumed has a pending status,
863d01bd
PA
4655 then don't resume it - we can just report the pending status.
4656 Likewise if it is suspended, because e.g., another thread is
4657 stepping past a breakpoint. Make sure to queue any signals that
4658 would otherwise be sent. In all-stop mode, we do this decision
4659 based on if *any* thread has a pending status. If there's a
4660 thread that needs the step-over-breakpoint dance, then don't
4661 resume any other thread but that particular one. */
4662 leave_pending = (lwp->suspended
4663 || lwp->status_pending_p
4664 || leave_all_stopped);
5544ad89 4665
0e9a339e
YQ
4666 /* If we have a new signal, enqueue the signal. */
4667 if (lwp->resume->sig != 0)
4668 {
4669 siginfo_t info, *info_p;
4670
4671 /* If this is the same signal we were previously stopped by,
4672 make sure to queue its siginfo. */
4673 if (WIFSTOPPED (lwp->last_status)
4674 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4675 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4676 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4677 info_p = &info;
4678 else
4679 info_p = NULL;
4680
4681 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4682 }
4683
d50171e4 4684 if (!leave_pending)
bd99dc85 4685 {
c058728c 4686 threads_debug_printf ("resuming LWP %ld", lwpid_of (thread));
5544ad89 4687
9c80ecd6 4688 proceed_one_lwp (thread, NULL);
bd99dc85
PA
4689 }
4690 else
c058728c 4691 threads_debug_printf ("leaving LWP %ld stopped", lwpid_of (thread));
5544ad89 4692
183be222 4693 thread->last_status.set_ignore ();
bd99dc85 4694 lwp->resume = NULL;
0d62e5e8
DJ
4695}
4696
0e4d7e35
TBA
4697void
4698linux_process_target::resume (thread_resume *resume_info, size_t n)
0d62e5e8 4699{
d86d4aaf 4700 struct thread_info *need_step_over = NULL;
c6ecbae5 4701
c058728c 4702 THREADS_SCOPED_DEBUG_ENTER_EXIT;
87ce2a04 4703
5fdda392
SM
4704 for_each_thread ([&] (thread_info *thread)
4705 {
4706 linux_set_resume_request (thread, resume_info, n);
4707 });
5544ad89 4708
d50171e4
PA
4709 /* If there is a thread which would otherwise be resumed, which has
4710 a pending status, then don't resume any threads - we can just
4711 report the pending status. Make sure to queue any signals that
4712 would otherwise be sent. In non-stop mode, we'll apply this
4713 logic to each thread individually. We consume all pending events
4714 before considering to start a step-over (in all-stop). */
25c28b4d 4715 bool any_pending = false;
bd99dc85 4716 if (!non_stop)
df95181f
TBA
4717 any_pending = find_thread ([this] (thread_info *thread)
4718 {
4719 return resume_status_pending (thread);
4720 }) != nullptr;
d50171e4
PA
4721
4722 /* If there is a thread which would otherwise be resumed, which is
4723 stopped at a breakpoint that needs stepping over, then don't
4724 resume any threads - have it step over the breakpoint with all
4725 other threads stopped, then resume all threads again. Make sure
4726 to queue any signals that would otherwise be delivered or
4727 queued. */
bf9ae9d8 4728 if (!any_pending && low_supports_breakpoints ())
df95181f
TBA
4729 need_step_over = find_thread ([this] (thread_info *thread)
4730 {
4731 return thread_needs_step_over (thread);
4732 });
d50171e4 4733
c80825ff 4734 bool leave_all_stopped = (need_step_over != NULL || any_pending);
d50171e4 4735
c058728c
SM
4736 if (need_step_over != NULL)
4737 threads_debug_printf ("Not resuming all, need step over");
4738 else if (any_pending)
4739 threads_debug_printf ("Not resuming, all-stop and found "
4740 "an LWP with pending status");
4741 else
4742 threads_debug_printf ("Resuming, no pending status or step over needed");
d50171e4
PA
4743
4744 /* Even if we're leaving threads stopped, queue all signals we'd
4745 otherwise deliver. */
c80825ff
SM
4746 for_each_thread ([&] (thread_info *thread)
4747 {
df95181f 4748 resume_one_thread (thread, leave_all_stopped);
c80825ff 4749 });
d50171e4
PA
4750
4751 if (need_step_over)
d86d4aaf 4752 start_step_over (get_thread_lwp (need_step_over));
87ce2a04 4753
1bebeeca
PA
4754 /* We may have events that were pending that can/should be sent to
4755 the client now. Trigger a linux_wait call. */
4756 if (target_is_async_p ())
4757 async_file_mark ();
d50171e4
PA
4758}
4759
df95181f
TBA
4760void
4761linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
d50171e4 4762{
d86d4aaf 4763 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4764 int step;
4765
7984d532 4766 if (lwp == except)
e2b44075 4767 return;
d50171e4 4768
c058728c 4769 threads_debug_printf ("lwp %ld", lwpid_of (thread));
d50171e4
PA
4770
4771 if (!lwp->stopped)
4772 {
c058728c 4773 threads_debug_printf (" LWP %ld already running", lwpid_of (thread));
e2b44075 4774 return;
d50171e4
PA
4775 }
4776
02fc4de7 4777 if (thread->last_resume_kind == resume_stop
183be222 4778 && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
d50171e4 4779 {
c058728c
SM
4780 threads_debug_printf (" client wants LWP to remain %ld stopped",
4781 lwpid_of (thread));
e2b44075 4782 return;
d50171e4
PA
4783 }
4784
4785 if (lwp->status_pending_p)
4786 {
c058728c
SM
4787 threads_debug_printf (" LWP %ld has pending status, leaving stopped",
4788 lwpid_of (thread));
e2b44075 4789 return;
d50171e4
PA
4790 }
4791
7984d532
PA
4792 gdb_assert (lwp->suspended >= 0);
4793
d50171e4
PA
4794 if (lwp->suspended)
4795 {
c058728c 4796 threads_debug_printf (" LWP %ld is suspended", lwpid_of (thread));
e2b44075 4797 return;
d50171e4
PA
4798 }
4799
1a981360 4800 if (thread->last_resume_kind == resume_stop
013e3554 4801 && lwp->pending_signals_to_report.empty ()
229d26fc
SM
4802 && (lwp->collecting_fast_tracepoint
4803 == fast_tpoint_collect_result::not_collecting))
02fc4de7
PA
4804 {
4805 /* We haven't reported this LWP as stopped yet (otherwise, the
4806 last_status.kind check above would catch it, and we wouldn't
4807 reach here. This LWP may have been momentarily paused by a
4808 stop_all_lwps call while handling for example, another LWP's
4809 step-over. In that case, the pending expected SIGSTOP signal
4810 that was queued at vCont;t handling time will have already
4811 been consumed by wait_for_sigstop, and so we need to requeue
4812 another one here. Note that if the LWP already has a SIGSTOP
4813 pending, this is a no-op. */
4814
c058728c
SM
4815 threads_debug_printf
4816 ("Client wants LWP %ld to stop. Making sure it has a SIGSTOP pending",
4817 lwpid_of (thread));
02fc4de7
PA
4818
4819 send_sigstop (lwp);
4820 }
4821
863d01bd
PA
4822 if (thread->last_resume_kind == resume_step)
4823 {
c058728c
SM
4824 threads_debug_printf (" stepping LWP %ld, client wants it stepping",
4825 lwpid_of (thread));
8901d193 4826
3b9a79ef 4827 /* If resume_step is requested by GDB, install single-step
8901d193 4828 breakpoints when the thread is about to be actually resumed if
3b9a79ef 4829 the single-step breakpoints weren't removed. */
7582c77c 4830 if (supports_software_single_step ()
3b9a79ef 4831 && !has_single_step_breakpoints (thread))
8901d193
YQ
4832 install_software_single_step_breakpoints (lwp);
4833
4834 step = maybe_hw_step (thread);
863d01bd
PA
4835 }
4836 else if (lwp->bp_reinsert != 0)
4837 {
c058728c
SM
4838 threads_debug_printf (" stepping LWP %ld, reinsert set",
4839 lwpid_of (thread));
f79b145d
YQ
4840
4841 step = maybe_hw_step (thread);
863d01bd
PA
4842 }
4843 else
4844 step = 0;
4845
df95181f 4846 resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
4847}
4848
df95181f
TBA
4849void
4850linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4851 lwp_info *except)
7984d532 4852{
d86d4aaf 4853 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
4854
4855 if (lwp == except)
e2b44075 4856 return;
7984d532 4857
863d01bd 4858 lwp_suspended_decr (lwp);
7984d532 4859
e2b44075 4860 proceed_one_lwp (thread, except);
d50171e4
PA
4861}
4862
d16f3f6c
TBA
4863void
4864linux_process_target::proceed_all_lwps ()
d50171e4 4865{
d86d4aaf 4866 struct thread_info *need_step_over;
d50171e4
PA
4867
4868 /* If there is a thread which would otherwise be resumed, which is
4869 stopped at a breakpoint that needs stepping over, then don't
4870 resume any threads - have it step over the breakpoint with all
4871 other threads stopped, then resume all threads again. */
4872
bf9ae9d8 4873 if (low_supports_breakpoints ())
d50171e4 4874 {
df95181f
TBA
4875 need_step_over = find_thread ([this] (thread_info *thread)
4876 {
4877 return thread_needs_step_over (thread);
4878 });
d50171e4
PA
4879
4880 if (need_step_over != NULL)
4881 {
c058728c
SM
4882 threads_debug_printf ("found thread %ld needing a step-over",
4883 lwpid_of (need_step_over));
d50171e4 4884
d86d4aaf 4885 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
4886 return;
4887 }
4888 }
5544ad89 4889
c058728c 4890 threads_debug_printf ("Proceeding, no step-over needed");
d50171e4 4891
df95181f 4892 for_each_thread ([this] (thread_info *thread)
e2b44075
SM
4893 {
4894 proceed_one_lwp (thread, NULL);
4895 });
d50171e4
PA
4896}
4897
d16f3f6c
TBA
4898void
4899linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
d50171e4 4900{
c058728c
SM
4901 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4902
4903 if (except)
4904 threads_debug_printf ("except=(LWP %ld)",
4905 lwpid_of (get_lwp_thread (except)));
4906 else
4907 threads_debug_printf ("except=nullptr");
5544ad89 4908
7984d532 4909 if (unsuspend)
e2b44075
SM
4910 for_each_thread ([&] (thread_info *thread)
4911 {
4912 unsuspend_and_proceed_one_lwp (thread, except);
4913 });
7984d532 4914 else
e2b44075
SM
4915 for_each_thread ([&] (thread_info *thread)
4916 {
4917 proceed_one_lwp (thread, except);
4918 });
0d62e5e8
DJ
4919}
4920
58caa3dc
DJ
4921
4922#ifdef HAVE_LINUX_REGSETS
4923
1faeff08
MR
4924#define use_linux_regsets 1
4925
030031ee
PA
4926/* Returns true if REGSET has been disabled. */
4927
4928static int
4929regset_disabled (struct regsets_info *info, struct regset_info *regset)
4930{
4931 return (info->disabled_regsets != NULL
4932 && info->disabled_regsets[regset - info->regsets]);
4933}
4934
4935/* Disable REGSET. */
4936
4937static void
4938disable_regset (struct regsets_info *info, struct regset_info *regset)
4939{
4940 int dr_offset;
4941
4942 dr_offset = regset - info->regsets;
4943 if (info->disabled_regsets == NULL)
224c3ddb 4944 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
030031ee
PA
4945 info->disabled_regsets[dr_offset] = 1;
4946}
4947
58caa3dc 4948static int
3aee8918
PA
4949regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4950 struct regcache *regcache)
58caa3dc
DJ
4951{
4952 struct regset_info *regset;
e9d25b98 4953 int saw_general_regs = 0;
95954743 4954 int pid;
1570b33e 4955 struct iovec iov;
58caa3dc 4956
0bfdf32f 4957 pid = lwpid_of (current_thread);
28eef672 4958 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 4959 {
1570b33e
L
4960 void *buf, *data;
4961 int nt_type, res;
58caa3dc 4962
030031ee 4963 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 4964 continue;
58caa3dc 4965
bca929d3 4966 buf = xmalloc (regset->size);
1570b33e
L
4967
4968 nt_type = regset->nt_type;
4969 if (nt_type)
4970 {
4971 iov.iov_base = buf;
4972 iov.iov_len = regset->size;
4973 data = (void *) &iov;
4974 }
4975 else
4976 data = buf;
4977
dfb64f85 4978#ifndef __sparc__
f15f9948 4979 res = ptrace (regset->get_request, pid,
b8e1b30e 4980 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4981#else
1570b33e 4982 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4983#endif
58caa3dc
DJ
4984 if (res < 0)
4985 {
1ef53e6b
AH
4986 if (errno == EIO
4987 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 4988 {
1ef53e6b
AH
4989 /* If we get EIO on a regset, or an EINVAL and the regset is
4990 optional, do not try it again for this process mode. */
030031ee 4991 disable_regset (regsets_info, regset);
58caa3dc 4992 }
e5a9158d
AA
4993 else if (errno == ENODATA)
4994 {
4995 /* ENODATA may be returned if the regset is currently
4996 not "active". This can happen in normal operation,
4997 so suppress the warning in this case. */
4998 }
fcd4a73d
YQ
4999 else if (errno == ESRCH)
5000 {
5001 /* At this point, ESRCH should mean the process is
5002 already gone, in which case we simply ignore attempts
5003 to read its registers. */
5004 }
58caa3dc
DJ
5005 else
5006 {
0d62e5e8 5007 char s[256];
95954743
PA
5008 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5009 pid);
0d62e5e8 5010 perror (s);
58caa3dc
DJ
5011 }
5012 }
098dbe61
AA
5013 else
5014 {
5015 if (regset->type == GENERAL_REGS)
5016 saw_general_regs = 1;
5017 regset->store_function (regcache, buf);
5018 }
fdeb2a12 5019 free (buf);
58caa3dc 5020 }
e9d25b98
DJ
5021 if (saw_general_regs)
5022 return 0;
5023 else
5024 return 1;
58caa3dc
DJ
5025}
5026
5027static int
3aee8918
PA
5028regsets_store_inferior_registers (struct regsets_info *regsets_info,
5029 struct regcache *regcache)
58caa3dc
DJ
5030{
5031 struct regset_info *regset;
e9d25b98 5032 int saw_general_regs = 0;
95954743 5033 int pid;
1570b33e 5034 struct iovec iov;
58caa3dc 5035
0bfdf32f 5036 pid = lwpid_of (current_thread);
28eef672 5037 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5038 {
1570b33e
L
5039 void *buf, *data;
5040 int nt_type, res;
58caa3dc 5041
feea5f36
AA
5042 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5043 || regset->fill_function == NULL)
28eef672 5044 continue;
58caa3dc 5045
bca929d3 5046 buf = xmalloc (regset->size);
545587ee
DJ
5047
5048 /* First fill the buffer with the current register set contents,
5049 in case there are any items in the kernel's regset that are
5050 not in gdbserver's regcache. */
1570b33e
L
5051
5052 nt_type = regset->nt_type;
5053 if (nt_type)
5054 {
5055 iov.iov_base = buf;
5056 iov.iov_len = regset->size;
5057 data = (void *) &iov;
5058 }
5059 else
5060 data = buf;
5061
dfb64f85 5062#ifndef __sparc__
f15f9948 5063 res = ptrace (regset->get_request, pid,
b8e1b30e 5064 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5065#else
689cc2ae 5066 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5067#endif
545587ee
DJ
5068
5069 if (res == 0)
5070 {
5071 /* Then overlay our cached registers on that. */
442ea881 5072 regset->fill_function (regcache, buf);
545587ee
DJ
5073
5074 /* Only now do we write the register set. */
dfb64f85 5075#ifndef __sparc__
f15f9948 5076 res = ptrace (regset->set_request, pid,
b8e1b30e 5077 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5078#else
1570b33e 5079 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 5080#endif
545587ee
DJ
5081 }
5082
58caa3dc
DJ
5083 if (res < 0)
5084 {
1ef53e6b
AH
5085 if (errno == EIO
5086 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5087 {
1ef53e6b
AH
5088 /* If we get EIO on a regset, or an EINVAL and the regset is
5089 optional, do not try it again for this process mode. */
030031ee 5090 disable_regset (regsets_info, regset);
58caa3dc 5091 }
3221518c
UW
5092 else if (errno == ESRCH)
5093 {
1b3f6016
PA
5094 /* At this point, ESRCH should mean the process is
5095 already gone, in which case we simply ignore attempts
5096 to change its registers. See also the related
df95181f 5097 comment in resume_one_lwp. */
fdeb2a12 5098 free (buf);
3221518c
UW
5099 return 0;
5100 }
58caa3dc
DJ
5101 else
5102 {
ce3a066d 5103 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
5104 }
5105 }
e9d25b98
DJ
5106 else if (regset->type == GENERAL_REGS)
5107 saw_general_regs = 1;
09ec9b38 5108 free (buf);
58caa3dc 5109 }
e9d25b98
DJ
5110 if (saw_general_regs)
5111 return 0;
5112 else
5113 return 1;
58caa3dc
DJ
5114}
5115
1faeff08 5116#else /* !HAVE_LINUX_REGSETS */
58caa3dc 5117
1faeff08 5118#define use_linux_regsets 0
3aee8918
PA
5119#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5120#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 5121
58caa3dc 5122#endif
1faeff08
MR
5123
5124/* Return 1 if register REGNO is supported by one of the regset ptrace
5125 calls or 0 if it has to be transferred individually. */
5126
5127static int
3aee8918 5128linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
5129{
5130 unsigned char mask = 1 << (regno % 8);
5131 size_t index = regno / 8;
5132
5133 return (use_linux_regsets
3aee8918
PA
5134 && (regs_info->regset_bitmap == NULL
5135 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
5136}
5137
58caa3dc 5138#ifdef HAVE_LINUX_USRREGS
1faeff08 5139
5b3da067 5140static int
3aee8918 5141register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
5142{
5143 int addr;
5144
3aee8918 5145 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
5146 error ("Invalid register number %d.", regnum);
5147
3aee8918 5148 addr = usrregs->regmap[regnum];
1faeff08
MR
5149
5150 return addr;
5151}
5152
daca57a7
TBA
5153
5154void
5155linux_process_target::fetch_register (const usrregs_info *usrregs,
5156 regcache *regcache, int regno)
1faeff08
MR
5157{
5158 CORE_ADDR regaddr;
5159 int i, size;
5160 char *buf;
5161 int pid;
5162
3aee8918 5163 if (regno >= usrregs->num_regs)
1faeff08 5164 return;
daca57a7 5165 if (low_cannot_fetch_register (regno))
1faeff08
MR
5166 return;
5167
3aee8918 5168 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5169 if (regaddr == -1)
5170 return;
5171
3aee8918
PA
5172 size = ((register_size (regcache->tdesc, regno)
5173 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5174 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5175 buf = (char *) alloca (size);
1faeff08 5176
0bfdf32f 5177 pid = lwpid_of (current_thread);
1faeff08
MR
5178 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5179 {
5180 errno = 0;
5181 *(PTRACE_XFER_TYPE *) (buf + i) =
5182 ptrace (PTRACE_PEEKUSER, pid,
5183 /* Coerce to a uintptr_t first to avoid potential gcc warning
5184 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5185 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
5186 regaddr += sizeof (PTRACE_XFER_TYPE);
5187 if (errno != 0)
9a70f35c
YQ
5188 {
5189 /* Mark register REGNO unavailable. */
5190 supply_register (regcache, regno, NULL);
5191 return;
5192 }
1faeff08
MR
5193 }
5194
b35db733 5195 low_supply_ptrace_register (regcache, regno, buf);
1faeff08
MR
5196}
5197
daca57a7
TBA
5198void
5199linux_process_target::store_register (const usrregs_info *usrregs,
5200 regcache *regcache, int regno)
1faeff08
MR
5201{
5202 CORE_ADDR regaddr;
5203 int i, size;
5204 char *buf;
5205 int pid;
5206
3aee8918 5207 if (regno >= usrregs->num_regs)
1faeff08 5208 return;
daca57a7 5209 if (low_cannot_store_register (regno))
1faeff08
MR
5210 return;
5211
3aee8918 5212 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5213 if (regaddr == -1)
5214 return;
5215
3aee8918
PA
5216 size = ((register_size (regcache->tdesc, regno)
5217 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5218 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5219 buf = (char *) alloca (size);
1faeff08
MR
5220 memset (buf, 0, size);
5221
b35db733 5222 low_collect_ptrace_register (regcache, regno, buf);
1faeff08 5223
0bfdf32f 5224 pid = lwpid_of (current_thread);
1faeff08
MR
5225 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5226 {
5227 errno = 0;
5228 ptrace (PTRACE_POKEUSER, pid,
5229 /* Coerce to a uintptr_t first to avoid potential gcc warning
5230 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5231 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5232 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
5233 if (errno != 0)
5234 {
5235 /* At this point, ESRCH should mean the process is
5236 already gone, in which case we simply ignore attempts
5237 to change its registers. See also the related
df95181f 5238 comment in resume_one_lwp. */
1faeff08
MR
5239 if (errno == ESRCH)
5240 return;
5241
daca57a7
TBA
5242
5243 if (!low_cannot_store_register (regno))
6d91ce9a 5244 error ("writing register %d: %s", regno, safe_strerror (errno));
1faeff08
MR
5245 }
5246 regaddr += sizeof (PTRACE_XFER_TYPE);
5247 }
5248}
daca57a7 5249#endif /* HAVE_LINUX_USRREGS */
1faeff08 5250
b35db733
TBA
5251void
5252linux_process_target::low_collect_ptrace_register (regcache *regcache,
5253 int regno, char *buf)
5254{
5255 collect_register (regcache, regno, buf);
5256}
5257
5258void
5259linux_process_target::low_supply_ptrace_register (regcache *regcache,
5260 int regno, const char *buf)
5261{
5262 supply_register (regcache, regno, buf);
5263}
5264
daca57a7
TBA
5265void
5266linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5267 regcache *regcache,
5268 int regno, int all)
1faeff08 5269{
daca57a7 5270#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5271 struct usrregs_info *usr = regs_info->usrregs;
5272
1faeff08
MR
5273 if (regno == -1)
5274 {
3aee8918
PA
5275 for (regno = 0; regno < usr->num_regs; regno++)
5276 if (all || !linux_register_in_regsets (regs_info, regno))
5277 fetch_register (usr, regcache, regno);
1faeff08
MR
5278 }
5279 else
3aee8918 5280 fetch_register (usr, regcache, regno);
daca57a7 5281#endif
1faeff08
MR
5282}
5283
daca57a7
TBA
5284void
5285linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5286 regcache *regcache,
5287 int regno, int all)
1faeff08 5288{
daca57a7 5289#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5290 struct usrregs_info *usr = regs_info->usrregs;
5291
1faeff08
MR
5292 if (regno == -1)
5293 {
3aee8918
PA
5294 for (regno = 0; regno < usr->num_regs; regno++)
5295 if (all || !linux_register_in_regsets (regs_info, regno))
5296 store_register (usr, regcache, regno);
1faeff08
MR
5297 }
5298 else
3aee8918 5299 store_register (usr, regcache, regno);
58caa3dc 5300#endif
daca57a7 5301}
1faeff08 5302
a5a4d4cd
TBA
5303void
5304linux_process_target::fetch_registers (regcache *regcache, int regno)
1faeff08
MR
5305{
5306 int use_regsets;
5307 int all = 0;
aa8d21c9 5308 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5309
5310 if (regno == -1)
5311 {
bd70b1f2 5312 if (regs_info->usrregs != NULL)
3aee8918 5313 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
bd70b1f2 5314 low_fetch_register (regcache, regno);
c14dfd32 5315
3aee8918
PA
5316 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5317 if (regs_info->usrregs != NULL)
5318 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
5319 }
5320 else
5321 {
bd70b1f2 5322 if (low_fetch_register (regcache, regno))
c14dfd32
PA
5323 return;
5324
3aee8918 5325 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5326 if (use_regsets)
3aee8918
PA
5327 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5328 regcache);
5329 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5330 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5331 }
58caa3dc
DJ
5332}
5333
a5a4d4cd
TBA
5334void
5335linux_process_target::store_registers (regcache *regcache, int regno)
58caa3dc 5336{
1faeff08
MR
5337 int use_regsets;
5338 int all = 0;
aa8d21c9 5339 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5340
5341 if (regno == -1)
5342 {
3aee8918
PA
5343 all = regsets_store_inferior_registers (regs_info->regsets_info,
5344 regcache);
5345 if (regs_info->usrregs != NULL)
5346 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
5347 }
5348 else
5349 {
3aee8918 5350 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5351 if (use_regsets)
3aee8918
PA
5352 all = regsets_store_inferior_registers (regs_info->regsets_info,
5353 regcache);
5354 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5355 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5356 }
58caa3dc
DJ
5357}
5358
bd70b1f2
TBA
5359bool
5360linux_process_target::low_fetch_register (regcache *regcache, int regno)
5361{
5362 return false;
5363}
da6d8c04 5364
e2558df3 5365/* A wrapper for the read_memory target op. */
da6d8c04 5366
c3e735a6 5367static int
f450004a 5368linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
e2558df3 5369{
52405d85 5370 return the_target->read_memory (memaddr, myaddr, len);
e2558df3
TBA
5371}
5372
e2558df3 5373
421490af
PA
5374/* Helper for read_memory/write_memory using /proc/PID/mem. Because
5375 we can use a single read/write call, this can be much more
5376 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
5377 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running threads.
5378 One an only one of READBUF and WRITEBUF is non-null. If READBUF is
5379 not null, then we're reading, otherwise we're writing. */
5380
5381static int
5382proc_xfer_memory (CORE_ADDR memaddr, unsigned char *readbuf,
5383 const gdb_byte *writebuf, int len)
da6d8c04 5384{
421490af 5385 gdb_assert ((readbuf == nullptr) != (writebuf == nullptr));
fd462a61 5386
421490af
PA
5387 process_info *proc = current_process ();
5388
5389 int fd = proc->priv->mem_fd;
5390 if (fd == -1)
5391 return EIO;
5392
5393 while (len > 0)
fd462a61 5394 {
4934b29e
MR
5395 int bytes;
5396
31a56a22
PA
5397 /* Use pread64/pwrite64 if available, since they save a syscall
5398 and can handle 64-bit offsets even on 32-bit platforms (for
5399 instance, SPARC debugging a SPARC64 application). But only
5400 use them if the offset isn't so high that when cast to off_t
5401 it'd be negative, as seen on SPARC64. pread64/pwrite64
5402 outright reject such offsets. lseek does not. */
fd462a61 5403#ifdef HAVE_PREAD64
31a56a22 5404 if ((off_t) memaddr >= 0)
421490af 5405 bytes = (readbuf != nullptr
31a56a22
PA
5406 ? pread64 (fd, readbuf, len, memaddr)
5407 : pwrite64 (fd, writebuf, len, memaddr));
5408 else
fd462a61 5409#endif
31a56a22
PA
5410 {
5411 bytes = -1;
5412 if (lseek (fd, memaddr, SEEK_SET) != -1)
5413 bytes = (readbuf != nullptr
5414 ? read (fd, readbuf, len)
5415 : write (fd, writebuf, len));
5416 }
fd462a61 5417
421490af
PA
5418 if (bytes < 0)
5419 return errno;
5420 else if (bytes == 0)
4934b29e 5421 {
421490af
PA
5422 /* EOF means the address space is gone, the whole process
5423 exited or execed. */
5424 return EIO;
4934b29e 5425 }
da6d8c04 5426
421490af
PA
5427 memaddr += bytes;
5428 if (readbuf != nullptr)
5429 readbuf += bytes;
5430 else
5431 writebuf += bytes;
5432 len -= bytes;
da6d8c04
DJ
5433 }
5434
421490af
PA
5435 return 0;
5436}
c3e735a6 5437
421490af
PA
5438int
5439linux_process_target::read_memory (CORE_ADDR memaddr,
5440 unsigned char *myaddr, int len)
5441{
5442 return proc_xfer_memory (memaddr, myaddr, nullptr, len);
da6d8c04
DJ
5443}
5444
93ae6fdc
PA
5445/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5446 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 5447 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 5448
e2558df3
TBA
5449int
5450linux_process_target::write_memory (CORE_ADDR memaddr,
5451 const unsigned char *myaddr, int len)
da6d8c04 5452{
0d62e5e8
DJ
5453 if (debug_threads)
5454 {
58d6951d 5455 /* Dump up to four bytes. */
bf47e248
PA
5456 char str[4 * 2 + 1];
5457 char *p = str;
5458 int dump = len < 4 ? len : 4;
5459
421490af 5460 for (int i = 0; i < dump; i++)
bf47e248
PA
5461 {
5462 sprintf (p, "%02x", myaddr[i]);
5463 p += 2;
5464 }
5465 *p = '\0';
5466
c058728c 5467 threads_debug_printf ("Writing %s to 0x%08lx in process %d",
421490af 5468 str, (long) memaddr, current_process ()->pid);
0d62e5e8
DJ
5469 }
5470
421490af 5471 return proc_xfer_memory (memaddr, nullptr, myaddr, len);
da6d8c04 5472}
2f2893d9 5473
2a31c7aa
TBA
5474void
5475linux_process_target::look_up_symbols ()
2f2893d9 5476{
0d62e5e8 5477#ifdef USE_THREAD_DB
95954743
PA
5478 struct process_info *proc = current_process ();
5479
fe978cb0 5480 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5481 return;
5482
9b4c5f87 5483 thread_db_init ();
0d62e5e8
DJ
5484#endif
5485}
5486
eb497a2a
TBA
5487void
5488linux_process_target::request_interrupt ()
e5379b03 5489{
78708b7c
PA
5490 /* Send a SIGINT to the process group. This acts just like the user
5491 typed a ^C on the controlling terminal. */
4c35c4c6
TV
5492 int res = ::kill (-signal_pid, SIGINT);
5493 if (res == -1)
5494 warning (_("Sending SIGINT to process group of pid %ld failed: %s"),
5495 signal_pid, safe_strerror (errno));
e5379b03
DJ
5496}
5497
eac215cc
TBA
5498bool
5499linux_process_target::supports_read_auxv ()
5500{
5501 return true;
5502}
5503
aa691b87
RM
5504/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5505 to debugger memory starting at MYADDR. */
5506
eac215cc 5507int
43e5fbd8
TJB
5508linux_process_target::read_auxv (int pid, CORE_ADDR offset,
5509 unsigned char *myaddr, unsigned int len)
aa691b87
RM
5510{
5511 char filename[PATH_MAX];
5512 int fd, n;
5513
6cebaf6e 5514 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5515
5516 fd = open (filename, O_RDONLY);
5517 if (fd < 0)
5518 return -1;
5519
5520 if (offset != (CORE_ADDR) 0
5521 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5522 n = -1;
5523 else
5524 n = read (fd, myaddr, len);
5525
5526 close (fd);
5527
5528 return n;
5529}
5530
7e0bde70
TBA
5531int
5532linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5533 int size, raw_breakpoint *bp)
e013ee27 5534{
c8f4bfdd
YQ
5535 if (type == raw_bkpt_type_sw)
5536 return insert_memory_breakpoint (bp);
e013ee27 5537 else
9db9aa23
TBA
5538 return low_insert_point (type, addr, size, bp);
5539}
5540
5541int
5542linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5543 int size, raw_breakpoint *bp)
5544{
5545 /* Unsupported (see target.h). */
5546 return 1;
e013ee27
OF
5547}
5548
7e0bde70
TBA
5549int
5550linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5551 int size, raw_breakpoint *bp)
e013ee27 5552{
c8f4bfdd
YQ
5553 if (type == raw_bkpt_type_sw)
5554 return remove_memory_breakpoint (bp);
e013ee27 5555 else
9db9aa23
TBA
5556 return low_remove_point (type, addr, size, bp);
5557}
5558
5559int
5560linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5561 int size, raw_breakpoint *bp)
5562{
5563 /* Unsupported (see target.h). */
5564 return 1;
e013ee27
OF
5565}
5566
84320c4e 5567/* Implement the stopped_by_sw_breakpoint target_ops
3e572f71
PA
5568 method. */
5569
84320c4e
TBA
5570bool
5571linux_process_target::stopped_by_sw_breakpoint ()
3e572f71
PA
5572{
5573 struct lwp_info *lwp = get_thread_lwp (current_thread);
5574
5575 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5576}
5577
84320c4e 5578/* Implement the supports_stopped_by_sw_breakpoint target_ops
3e572f71
PA
5579 method. */
5580
84320c4e
TBA
5581bool
5582linux_process_target::supports_stopped_by_sw_breakpoint ()
3e572f71
PA
5583{
5584 return USE_SIGTRAP_SIGINFO;
5585}
5586
93fe88b2 5587/* Implement the stopped_by_hw_breakpoint target_ops
3e572f71
PA
5588 method. */
5589
93fe88b2
TBA
5590bool
5591linux_process_target::stopped_by_hw_breakpoint ()
3e572f71
PA
5592{
5593 struct lwp_info *lwp = get_thread_lwp (current_thread);
5594
5595 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5596}
5597
93fe88b2 5598/* Implement the supports_stopped_by_hw_breakpoint target_ops
3e572f71
PA
5599 method. */
5600
93fe88b2
TBA
5601bool
5602linux_process_target::supports_stopped_by_hw_breakpoint ()
3e572f71
PA
5603{
5604 return USE_SIGTRAP_SIGINFO;
5605}
5606
70b90b91 5607/* Implement the supports_hardware_single_step target_ops method. */
45614f15 5608
22aa6223
TBA
5609bool
5610linux_process_target::supports_hardware_single_step ()
45614f15 5611{
b31cdfa6 5612 return true;
45614f15
YQ
5613}
5614
6eeb5c55
TBA
5615bool
5616linux_process_target::stopped_by_watchpoint ()
e013ee27 5617{
0bfdf32f 5618 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 5619
15c66dd6 5620 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
5621}
5622
6eeb5c55
TBA
5623CORE_ADDR
5624linux_process_target::stopped_data_address ()
e013ee27 5625{
0bfdf32f 5626 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
5627
5628 return lwp->stopped_data_address;
e013ee27
OF
5629}
5630
db0dfaa0
LM
5631/* This is only used for targets that define PT_TEXT_ADDR,
5632 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5633 the target has different ways of acquiring this information, like
5634 loadmaps. */
52fb6437 5635
5203ae1e
TBA
5636bool
5637linux_process_target::supports_read_offsets ()
5638{
5639#ifdef SUPPORTS_READ_OFFSETS
5640 return true;
5641#else
5642 return false;
5643#endif
5644}
5645
52fb6437
NS
5646/* Under uClinux, programs are loaded at non-zero offsets, which we need
5647 to tell gdb about. */
5648
5203ae1e
TBA
5649int
5650linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
52fb6437 5651{
5203ae1e 5652#ifdef SUPPORTS_READ_OFFSETS
52fb6437 5653 unsigned long text, text_end, data;
62828379 5654 int pid = lwpid_of (current_thread);
52fb6437
NS
5655
5656 errno = 0;
5657
b8e1b30e
LM
5658 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5659 (PTRACE_TYPE_ARG4) 0);
5660 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5661 (PTRACE_TYPE_ARG4) 0);
5662 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5663 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
5664
5665 if (errno == 0)
5666 {
5667 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
5668 used by gdb) are relative to the beginning of the program,
5669 with the data segment immediately following the text segment.
5670 However, the actual runtime layout in memory may put the data
5671 somewhere else, so when we send gdb a data base-address, we
5672 use the real data base address and subtract the compile-time
5673 data base-address from it (which is just the length of the
5674 text segment). BSS immediately follows data in both
5675 cases. */
52fb6437
NS
5676 *text_p = text;
5677 *data_p = data - (text_end - text);
1b3f6016 5678
52fb6437
NS
5679 return 1;
5680 }
5203ae1e
TBA
5681 return 0;
5682#else
5683 gdb_assert_not_reached ("target op read_offsets not supported");
52fb6437 5684#endif
5203ae1e 5685}
52fb6437 5686
6e3fd7e9
TBA
5687bool
5688linux_process_target::supports_get_tls_address ()
5689{
5690#ifdef USE_THREAD_DB
5691 return true;
5692#else
5693 return false;
5694#endif
5695}
5696
5697int
5698linux_process_target::get_tls_address (thread_info *thread,
5699 CORE_ADDR offset,
5700 CORE_ADDR load_module,
5701 CORE_ADDR *address)
5702{
5703#ifdef USE_THREAD_DB
5704 return thread_db_get_tls_address (thread, offset, load_module, address);
5705#else
5706 return -1;
5707#endif
5708}
5709
2d0795ee
TBA
5710bool
5711linux_process_target::supports_qxfer_osdata ()
5712{
5713 return true;
5714}
5715
5716int
5717linux_process_target::qxfer_osdata (const char *annex,
5718 unsigned char *readbuf,
5719 unsigned const char *writebuf,
5720 CORE_ADDR offset, int len)
07e059b5 5721{
d26e3629 5722 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
5723}
5724
cb63de7c
TBA
5725void
5726linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5727 gdb_byte *inf_siginfo, int direction)
d0722149 5728{
cb63de7c 5729 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
d0722149
DE
5730
5731 /* If there was no callback, or the callback didn't do anything,
5732 then just do a straight memcpy. */
5733 if (!done)
5734 {
5735 if (direction == 1)
a5362b9a 5736 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 5737 else
a5362b9a 5738 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
5739 }
5740}
5741
cb63de7c
TBA
5742bool
5743linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5744 int direction)
5745{
5746 return false;
5747}
5748
d7abedf7
TBA
5749bool
5750linux_process_target::supports_qxfer_siginfo ()
5751{
5752 return true;
5753}
5754
5755int
5756linux_process_target::qxfer_siginfo (const char *annex,
5757 unsigned char *readbuf,
5758 unsigned const char *writebuf,
5759 CORE_ADDR offset, int len)
4aa995e1 5760{
d0722149 5761 int pid;
a5362b9a 5762 siginfo_t siginfo;
8adce034 5763 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1 5764
0bfdf32f 5765 if (current_thread == NULL)
4aa995e1
PA
5766 return -1;
5767
0bfdf32f 5768 pid = lwpid_of (current_thread);
4aa995e1 5769
c058728c
SM
5770 threads_debug_printf ("%s siginfo for lwp %d.",
5771 readbuf != NULL ? "Reading" : "Writing",
5772 pid);
4aa995e1 5773
0adea5f7 5774 if (offset >= sizeof (siginfo))
4aa995e1
PA
5775 return -1;
5776
b8e1b30e 5777 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5778 return -1;
5779
d0722149
DE
5780 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5781 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5782 inferior with a 64-bit GDBSERVER should look the same as debugging it
5783 with a 32-bit GDBSERVER, we need to convert it. */
5784 siginfo_fixup (&siginfo, inf_siginfo, 0);
5785
4aa995e1
PA
5786 if (offset + len > sizeof (siginfo))
5787 len = sizeof (siginfo) - offset;
5788
5789 if (readbuf != NULL)
d0722149 5790 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
5791 else
5792 {
d0722149
DE
5793 memcpy (inf_siginfo + offset, writebuf, len);
5794
5795 /* Convert back to ptrace layout before flushing it out. */
5796 siginfo_fixup (&siginfo, inf_siginfo, 1);
5797
b8e1b30e 5798 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5799 return -1;
5800 }
5801
5802 return len;
5803}
5804
bd99dc85
PA
5805/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5806 so we notice when children change state; as the handler for the
5807 sigsuspend in my_waitpid. */
5808
5809static void
5810sigchld_handler (int signo)
5811{
5812 int old_errno = errno;
5813
5814 if (debug_threads)
e581f2b4
PA
5815 {
5816 do
5817 {
a7e559cc
AH
5818 /* Use the async signal safe debug function. */
5819 if (debug_write ("sigchld_handler\n",
5820 sizeof ("sigchld_handler\n") - 1) < 0)
e581f2b4
PA
5821 break; /* just ignore */
5822 } while (0);
5823 }
bd99dc85
PA
5824
5825 if (target_is_async_p ())
5826 async_file_mark (); /* trigger a linux_wait */
5827
5828 errno = old_errno;
5829}
5830
0dc587d4
TBA
5831bool
5832linux_process_target::supports_non_stop ()
bd99dc85 5833{
0dc587d4 5834 return true;
bd99dc85
PA
5835}
5836
0dc587d4
TBA
5837bool
5838linux_process_target::async (bool enable)
bd99dc85 5839{
0dc587d4 5840 bool previous = target_is_async_p ();
bd99dc85 5841
c058728c
SM
5842 threads_debug_printf ("async (%d), previous=%d",
5843 enable, previous);
8336d594 5844
bd99dc85
PA
5845 if (previous != enable)
5846 {
5847 sigset_t mask;
5848 sigemptyset (&mask);
5849 sigaddset (&mask, SIGCHLD);
5850
21987b9c 5851 gdb_sigmask (SIG_BLOCK, &mask, NULL);
bd99dc85
PA
5852
5853 if (enable)
5854 {
8674f082 5855 if (!linux_event_pipe.open_pipe ())
aa96c426 5856 {
21987b9c 5857 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
aa96c426
GB
5858
5859 warning ("creating event pipe failed.");
5860 return previous;
5861 }
bd99dc85 5862
bd99dc85 5863 /* Register the event loop handler. */
cdc8e9b2 5864 add_file_handler (linux_event_pipe.event_fd (),
2554f6f5
SM
5865 handle_target_event, NULL,
5866 "linux-low");
bd99dc85
PA
5867
5868 /* Always trigger a linux_wait. */
5869 async_file_mark ();
5870 }
5871 else
5872 {
cdc8e9b2 5873 delete_file_handler (linux_event_pipe.event_fd ());
bd99dc85 5874
8674f082 5875 linux_event_pipe.close_pipe ();
bd99dc85
PA
5876 }
5877
21987b9c 5878 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
bd99dc85
PA
5879 }
5880
5881 return previous;
5882}
5883
0dc587d4
TBA
5884int
5885linux_process_target::start_non_stop (bool nonstop)
bd99dc85
PA
5886{
5887 /* Register or unregister from event-loop accordingly. */
0dc587d4 5888 target_async (nonstop);
aa96c426 5889
0dc587d4 5890 if (target_is_async_p () != (nonstop != false))
aa96c426
GB
5891 return -1;
5892
bd99dc85
PA
5893 return 0;
5894}
5895
652aef77
TBA
5896bool
5897linux_process_target::supports_multi_process ()
cf8fd78b 5898{
652aef77 5899 return true;
cf8fd78b
PA
5900}
5901
89245bc0
DB
5902/* Check if fork events are supported. */
5903
9690a72a
TBA
5904bool
5905linux_process_target::supports_fork_events ()
89245bc0 5906{
a2885186 5907 return true;
89245bc0
DB
5908}
5909
5910/* Check if vfork events are supported. */
5911
9690a72a
TBA
5912bool
5913linux_process_target::supports_vfork_events ()
89245bc0 5914{
a2885186 5915 return true;
89245bc0
DB
5916}
5917
393a6b59
PA
5918/* Return the set of supported thread options. */
5919
5920gdb_thread_options
5921linux_process_target::supported_thread_options ()
5922{
5923 return GDB_THREAD_OPTION_CLONE;
5924}
5925
94585166
DB
5926/* Check if exec events are supported. */
5927
9690a72a
TBA
5928bool
5929linux_process_target::supports_exec_events ()
94585166 5930{
a2885186 5931 return true;
94585166
DB
5932}
5933
de0d863e
DB
5934/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5935 ptrace flags for all inferiors. This is in case the new GDB connection
5936 doesn't support the same set of events that the previous one did. */
5937
fb00dfce
TBA
5938void
5939linux_process_target::handle_new_gdb_connection ()
de0d863e 5940{
de0d863e 5941 /* Request that all the lwps reset their ptrace options. */
bbf550d5
SM
5942 for_each_thread ([] (thread_info *thread)
5943 {
5944 struct lwp_info *lwp = get_thread_lwp (thread);
5945
5946 if (!lwp->stopped)
5947 {
5948 /* Stop the lwp so we can modify its ptrace options. */
5949 lwp->must_set_ptrace_flags = 1;
5950 linux_stop_lwp (lwp);
5951 }
5952 else
5953 {
5954 /* Already stopped; go ahead and set the ptrace options. */
5955 struct process_info *proc = find_process_pid (pid_of (thread));
5956 int options = linux_low_ptrace_options (proc->attached);
5957
5958 linux_enable_event_reporting (lwpid_of (thread), options);
5959 lwp->must_set_ptrace_flags = 0;
5960 }
5961 });
de0d863e
DB
5962}
5963
55cf3021
TBA
5964int
5965linux_process_target::handle_monitor_command (char *mon)
5966{
5967#ifdef USE_THREAD_DB
5968 return thread_db_handle_monitor_command (mon);
5969#else
5970 return 0;
5971#endif
5972}
5973
95a45fc1
TBA
5974int
5975linux_process_target::core_of_thread (ptid_t ptid)
5976{
5977 return linux_common_core_of_thread (ptid);
5978}
5979
c756403b
TBA
5980bool
5981linux_process_target::supports_disable_randomization ()
03583c20 5982{
c756403b 5983 return true;
03583c20 5984}
efcbbd14 5985
c0245cb9
TBA
5986bool
5987linux_process_target::supports_agent ()
d1feda86 5988{
c0245cb9 5989 return true;
d1feda86
YQ
5990}
5991
2526e0cd
TBA
5992bool
5993linux_process_target::supports_range_stepping ()
c2d6af84 5994{
7582c77c 5995 if (supports_software_single_step ())
2526e0cd 5996 return true;
c2d6af84 5997
9cfd8715
TBA
5998 return low_supports_range_stepping ();
5999}
6000
6001bool
6002linux_process_target::low_supports_range_stepping ()
6003{
6004 return false;
c2d6af84
PA
6005}
6006
8247b823
TBA
6007bool
6008linux_process_target::supports_pid_to_exec_file ()
6009{
6010 return true;
6011}
6012
04977957 6013const char *
8247b823
TBA
6014linux_process_target::pid_to_exec_file (int pid)
6015{
6016 return linux_proc_pid_to_exec_file (pid);
6017}
6018
c9b7b804
TBA
6019bool
6020linux_process_target::supports_multifs ()
6021{
6022 return true;
6023}
6024
6025int
6026linux_process_target::multifs_open (int pid, const char *filename,
6027 int flags, mode_t mode)
6028{
6029 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6030}
6031
6032int
6033linux_process_target::multifs_unlink (int pid, const char *filename)
6034{
6035 return linux_mntns_unlink (pid, filename);
6036}
6037
6038ssize_t
6039linux_process_target::multifs_readlink (int pid, const char *filename,
6040 char *buf, size_t bufsiz)
6041{
6042 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6043}
6044
723b724b 6045#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
6046struct target_loadseg
6047{
6048 /* Core address to which the segment is mapped. */
6049 Elf32_Addr addr;
6050 /* VMA recorded in the program header. */
6051 Elf32_Addr p_vaddr;
6052 /* Size of this segment in memory. */
6053 Elf32_Word p_memsz;
6054};
6055
723b724b 6056# if defined PT_GETDSBT
78d85199
YQ
6057struct target_loadmap
6058{
6059 /* Protocol version number, must be zero. */
6060 Elf32_Word version;
6061 /* Pointer to the DSBT table, its size, and the DSBT index. */
6062 unsigned *dsbt_table;
6063 unsigned dsbt_size, dsbt_index;
6064 /* Number of segments in this map. */
6065 Elf32_Word nsegs;
6066 /* The actual memory map. */
6067 struct target_loadseg segs[/*nsegs*/];
6068};
723b724b
MF
6069# define LINUX_LOADMAP PT_GETDSBT
6070# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6071# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6072# else
6073struct target_loadmap
6074{
6075 /* Protocol version number, must be zero. */
6076 Elf32_Half version;
6077 /* Number of segments in this map. */
6078 Elf32_Half nsegs;
6079 /* The actual memory map. */
6080 struct target_loadseg segs[/*nsegs*/];
6081};
6082# define LINUX_LOADMAP PTRACE_GETFDPIC
6083# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6084# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6085# endif
78d85199 6086
9da41fda
TBA
6087bool
6088linux_process_target::supports_read_loadmap ()
6089{
6090 return true;
6091}
6092
6093int
6094linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6095 unsigned char *myaddr, unsigned int len)
78d85199 6096{
0bfdf32f 6097 int pid = lwpid_of (current_thread);
78d85199
YQ
6098 int addr = -1;
6099 struct target_loadmap *data = NULL;
6100 unsigned int actual_length, copy_length;
6101
6102 if (strcmp (annex, "exec") == 0)
723b724b 6103 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 6104 else if (strcmp (annex, "interp") == 0)
723b724b 6105 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
6106 else
6107 return -1;
6108
723b724b 6109 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
6110 return -1;
6111
6112 if (data == NULL)
6113 return -1;
6114
6115 actual_length = sizeof (struct target_loadmap)
6116 + sizeof (struct target_loadseg) * data->nsegs;
6117
6118 if (offset < 0 || offset > actual_length)
6119 return -1;
6120
6121 copy_length = actual_length - offset < len ? actual_length - offset : len;
6122 memcpy (myaddr, (char *) data + offset, copy_length);
6123 return copy_length;
6124}
723b724b 6125#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 6126
bc8d3ae4
TBA
6127bool
6128linux_process_target::supports_catch_syscall ()
82075af2 6129{
a2885186 6130 return low_supports_catch_syscall ();
82075af2
JS
6131}
6132
9eedd27d
TBA
6133bool
6134linux_process_target::low_supports_catch_syscall ()
6135{
6136 return false;
6137}
6138
770d8f6a
TBA
6139CORE_ADDR
6140linux_process_target::read_pc (regcache *regcache)
219f2f23 6141{
bf9ae9d8 6142 if (!low_supports_breakpoints ())
219f2f23
PA
6143 return 0;
6144
bf9ae9d8 6145 return low_get_pc (regcache);
219f2f23
PA
6146}
6147
770d8f6a
TBA
6148void
6149linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
219f2f23 6150{
bf9ae9d8 6151 gdb_assert (low_supports_breakpoints ());
219f2f23 6152
bf9ae9d8 6153 low_set_pc (regcache, pc);
219f2f23
PA
6154}
6155
68119632
TBA
6156bool
6157linux_process_target::supports_thread_stopped ()
6158{
6159 return true;
6160}
6161
6162bool
6163linux_process_target::thread_stopped (thread_info *thread)
8336d594
PA
6164{
6165 return get_thread_lwp (thread)->stopped;
6166}
6167
6168/* This exposes stop-all-threads functionality to other modules. */
6169
29e8dc09
TBA
6170void
6171linux_process_target::pause_all (bool freeze)
8336d594 6172{
7984d532
PA
6173 stop_all_lwps (freeze, NULL);
6174}
6175
6176/* This exposes unstop-all-threads functionality to other gdbserver
6177 modules. */
6178
29e8dc09
TBA
6179void
6180linux_process_target::unpause_all (bool unfreeze)
7984d532
PA
6181{
6182 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
6183}
6184
2268b414
JK
6185/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6186
6187static int
6188get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6189 CORE_ADDR *phdr_memaddr, int *num_phdr)
6190{
6191 char filename[PATH_MAX];
6192 int fd;
6193 const int auxv_size = is_elf64
6194 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6195 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6196
6197 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6198
6199 fd = open (filename, O_RDONLY);
6200 if (fd < 0)
6201 return 1;
6202
6203 *phdr_memaddr = 0;
6204 *num_phdr = 0;
6205 while (read (fd, buf, auxv_size) == auxv_size
6206 && (*phdr_memaddr == 0 || *num_phdr == 0))
6207 {
6208 if (is_elf64)
6209 {
6210 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6211
6212 switch (aux->a_type)
6213 {
6214 case AT_PHDR:
6215 *phdr_memaddr = aux->a_un.a_val;
6216 break;
6217 case AT_PHNUM:
6218 *num_phdr = aux->a_un.a_val;
6219 break;
6220 }
6221 }
6222 else
6223 {
6224 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6225
6226 switch (aux->a_type)
6227 {
6228 case AT_PHDR:
6229 *phdr_memaddr = aux->a_un.a_val;
6230 break;
6231 case AT_PHNUM:
6232 *num_phdr = aux->a_un.a_val;
6233 break;
6234 }
6235 }
6236 }
6237
6238 close (fd);
6239
6240 if (*phdr_memaddr == 0 || *num_phdr == 0)
6241 {
6242 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6243 "phdr_memaddr = %ld, phdr_num = %d",
6244 (long) *phdr_memaddr, *num_phdr);
6245 return 2;
6246 }
6247
6248 return 0;
6249}
6250
6251/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6252
6253static CORE_ADDR
6254get_dynamic (const int pid, const int is_elf64)
6255{
6256 CORE_ADDR phdr_memaddr, relocation;
db1ff28b 6257 int num_phdr, i;
2268b414 6258 unsigned char *phdr_buf;
db1ff28b 6259 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
2268b414
JK
6260
6261 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6262 return 0;
6263
6264 gdb_assert (num_phdr < 100); /* Basic sanity check. */
224c3ddb 6265 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
2268b414
JK
6266
6267 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6268 return 0;
6269
6270 /* Compute relocation: it is expected to be 0 for "regular" executables,
6271 non-zero for PIE ones. */
6272 relocation = -1;
db1ff28b
JK
6273 for (i = 0; relocation == -1 && i < num_phdr; i++)
6274 if (is_elf64)
6275 {
6276 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6277
6278 if (p->p_type == PT_PHDR)
6279 relocation = phdr_memaddr - p->p_vaddr;
6280 }
6281 else
6282 {
6283 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6284
6285 if (p->p_type == PT_PHDR)
6286 relocation = phdr_memaddr - p->p_vaddr;
6287 }
6288
2268b414
JK
6289 if (relocation == -1)
6290 {
e237a7e2
JK
6291 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6292 any real world executables, including PIE executables, have always
6293 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6294 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6295 or present DT_DEBUG anyway (fpc binaries are statically linked).
6296
6297 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6298
6299 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6300
2268b414
JK
6301 return 0;
6302 }
6303
db1ff28b
JK
6304 for (i = 0; i < num_phdr; i++)
6305 {
6306 if (is_elf64)
6307 {
6308 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6309
6310 if (p->p_type == PT_DYNAMIC)
6311 return p->p_vaddr + relocation;
6312 }
6313 else
6314 {
6315 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
2268b414 6316
db1ff28b
JK
6317 if (p->p_type == PT_DYNAMIC)
6318 return p->p_vaddr + relocation;
6319 }
6320 }
2268b414
JK
6321
6322 return 0;
6323}
6324
6325/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
6326 can be 0 if the inferior does not yet have the library list initialized.
6327 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6328 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
6329
6330static CORE_ADDR
6331get_r_debug (const int pid, const int is_elf64)
6332{
6333 CORE_ADDR dynamic_memaddr;
6334 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6335 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 6336 CORE_ADDR map = -1;
2268b414
JK
6337
6338 dynamic_memaddr = get_dynamic (pid, is_elf64);
6339 if (dynamic_memaddr == 0)
367ba2c2 6340 return map;
2268b414
JK
6341
6342 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6343 {
6344 if (is_elf64)
6345 {
6346 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
a738da3a 6347#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6348 union
6349 {
6350 Elf64_Xword map;
6351 unsigned char buf[sizeof (Elf64_Xword)];
6352 }
6353 rld_map;
a738da3a
MF
6354#endif
6355#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6356 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6357 {
6358 if (linux_read_memory (dyn->d_un.d_val,
6359 rld_map.buf, sizeof (rld_map.buf)) == 0)
6360 return rld_map.map;
6361 else
6362 break;
6363 }
75f62ce7 6364#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6365#ifdef DT_MIPS_RLD_MAP_REL
6366 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6367 {
6368 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6369 rld_map.buf, sizeof (rld_map.buf)) == 0)
6370 return rld_map.map;
6371 else
6372 break;
6373 }
6374#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6375
367ba2c2
MR
6376 if (dyn->d_tag == DT_DEBUG && map == -1)
6377 map = dyn->d_un.d_val;
2268b414
JK
6378
6379 if (dyn->d_tag == DT_NULL)
6380 break;
6381 }
6382 else
6383 {
6384 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
a738da3a 6385#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6386 union
6387 {
6388 Elf32_Word map;
6389 unsigned char buf[sizeof (Elf32_Word)];
6390 }
6391 rld_map;
a738da3a
MF
6392#endif
6393#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6394 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6395 {
6396 if (linux_read_memory (dyn->d_un.d_val,
6397 rld_map.buf, sizeof (rld_map.buf)) == 0)
6398 return rld_map.map;
6399 else
6400 break;
6401 }
75f62ce7 6402#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6403#ifdef DT_MIPS_RLD_MAP_REL
6404 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6405 {
6406 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6407 rld_map.buf, sizeof (rld_map.buf)) == 0)
6408 return rld_map.map;
6409 else
6410 break;
6411 }
6412#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6413
367ba2c2
MR
6414 if (dyn->d_tag == DT_DEBUG && map == -1)
6415 map = dyn->d_un.d_val;
2268b414
JK
6416
6417 if (dyn->d_tag == DT_NULL)
6418 break;
6419 }
6420
6421 dynamic_memaddr += dyn_size;
6422 }
6423
367ba2c2 6424 return map;
2268b414
JK
6425}
6426
6427/* Read one pointer from MEMADDR in the inferior. */
6428
6429static int
6430read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6431{
485f1ee4
PA
6432 int ret;
6433
6434 /* Go through a union so this works on either big or little endian
6435 hosts, when the inferior's pointer size is smaller than the size
6436 of CORE_ADDR. It is assumed the inferior's endianness is the
6437 same of the superior's. */
6438 union
6439 {
6440 CORE_ADDR core_addr;
6441 unsigned int ui;
6442 unsigned char uc;
6443 } addr;
6444
6445 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6446 if (ret == 0)
6447 {
6448 if (ptr_size == sizeof (CORE_ADDR))
6449 *ptr = addr.core_addr;
6450 else if (ptr_size == sizeof (unsigned int))
6451 *ptr = addr.ui;
6452 else
6453 gdb_assert_not_reached ("unhandled pointer size");
6454 }
6455 return ret;
2268b414
JK
6456}
6457
974387bb
TBA
6458bool
6459linux_process_target::supports_qxfer_libraries_svr4 ()
6460{
6461 return true;
6462}
6463
2268b414
JK
6464struct link_map_offsets
6465 {
6466 /* Offset and size of r_debug.r_version. */
6467 int r_version_offset;
6468
6469 /* Offset and size of r_debug.r_map. */
6470 int r_map_offset;
6471
8d56636a
MM
6472 /* Offset of r_debug_extended.r_next. */
6473 int r_next_offset;
6474
2268b414
JK
6475 /* Offset to l_addr field in struct link_map. */
6476 int l_addr_offset;
6477
6478 /* Offset to l_name field in struct link_map. */
6479 int l_name_offset;
6480
6481 /* Offset to l_ld field in struct link_map. */
6482 int l_ld_offset;
6483
6484 /* Offset to l_next field in struct link_map. */
6485 int l_next_offset;
6486
6487 /* Offset to l_prev field in struct link_map. */
6488 int l_prev_offset;
6489 };
6490
8d56636a
MM
6491static const link_map_offsets lmo_32bit_offsets =
6492 {
6493 0, /* r_version offset. */
6494 4, /* r_debug.r_map offset. */
6495 20, /* r_debug_extended.r_next. */
6496 0, /* l_addr offset in link_map. */
6497 4, /* l_name offset in link_map. */
6498 8, /* l_ld offset in link_map. */
6499 12, /* l_next offset in link_map. */
6500 16 /* l_prev offset in link_map. */
6501 };
6502
6503static const link_map_offsets lmo_64bit_offsets =
6504 {
6505 0, /* r_version offset. */
6506 8, /* r_debug.r_map offset. */
6507 40, /* r_debug_extended.r_next. */
6508 0, /* l_addr offset in link_map. */
6509 8, /* l_name offset in link_map. */
6510 16, /* l_ld offset in link_map. */
6511 24, /* l_next offset in link_map. */
6512 32 /* l_prev offset in link_map. */
6513 };
6514
6515/* Get the loaded shared libraries from one namespace. */
6516
6517static void
2733d9d5
MM
6518read_link_map (std::string &document, CORE_ADDR lmid, CORE_ADDR lm_addr,
6519 CORE_ADDR lm_prev, int ptr_size, const link_map_offsets *lmo)
8d56636a
MM
6520{
6521 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6522
6523 while (lm_addr
6524 && read_one_ptr (lm_addr + lmo->l_name_offset,
6525 &l_name, ptr_size) == 0
6526 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6527 &l_addr, ptr_size) == 0
6528 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6529 &l_ld, ptr_size) == 0
6530 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6531 &l_prev, ptr_size) == 0
6532 && read_one_ptr (lm_addr + lmo->l_next_offset,
6533 &l_next, ptr_size) == 0)
6534 {
6535 unsigned char libname[PATH_MAX];
6536
6537 if (lm_prev != l_prev)
6538 {
6539 warning ("Corrupted shared library list: 0x%s != 0x%s",
6540 paddress (lm_prev), paddress (l_prev));
6541 break;
6542 }
6543
ad10f44e
MM
6544 /* Not checking for error because reading may stop before we've got
6545 PATH_MAX worth of characters. */
6546 libname[0] = '\0';
6547 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6548 libname[sizeof (libname) - 1] = '\0';
6549 if (libname[0] != '\0')
8d56636a 6550 {
ad10f44e 6551 string_appendf (document, "<library name=\"");
de75275f 6552 xml_escape_text_append (document, (char *) libname);
ad10f44e 6553 string_appendf (document, "\" lm=\"0x%s\" l_addr=\"0x%s\" "
2733d9d5 6554 "l_ld=\"0x%s\" lmid=\"0x%s\"/>",
ad10f44e 6555 paddress (lm_addr), paddress (l_addr),
2733d9d5 6556 paddress (l_ld), paddress (lmid));
8d56636a
MM
6557 }
6558
6559 lm_prev = lm_addr;
6560 lm_addr = l_next;
6561 }
6562}
6563
fb723180 6564/* Construct qXfer:libraries-svr4:read reply. */
2268b414 6565
974387bb
TBA
6566int
6567linux_process_target::qxfer_libraries_svr4 (const char *annex,
6568 unsigned char *readbuf,
6569 unsigned const char *writebuf,
6570 CORE_ADDR offset, int len)
2268b414 6571{
fe978cb0 6572 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
6573 char filename[PATH_MAX];
6574 int pid, is_elf64;
214d508e 6575 unsigned int machine;
2733d9d5 6576 CORE_ADDR lmid = 0, lm_addr = 0, lm_prev = 0;
2268b414
JK
6577
6578 if (writebuf != NULL)
6579 return -2;
6580 if (readbuf == NULL)
6581 return -1;
6582
0bfdf32f 6583 pid = lwpid_of (current_thread);
2268b414 6584 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 6585 is_elf64 = elf_64_file_p (filename, &machine);
8d56636a
MM
6586 const link_map_offsets *lmo;
6587 int ptr_size;
6588 if (is_elf64)
6589 {
6590 lmo = &lmo_64bit_offsets;
6591 ptr_size = 8;
6592 }
6593 else
6594 {
6595 lmo = &lmo_32bit_offsets;
6596 ptr_size = 4;
6597 }
2268b414 6598
b1fbec62
GB
6599 while (annex[0] != '\0')
6600 {
6601 const char *sep;
6602 CORE_ADDR *addrp;
da4ae14a 6603 int name_len;
2268b414 6604
b1fbec62
GB
6605 sep = strchr (annex, '=');
6606 if (sep == NULL)
6607 break;
0c5bf5a9 6608
da4ae14a 6609 name_len = sep - annex;
2733d9d5
MM
6610 if (name_len == 4 && startswith (annex, "lmid"))
6611 addrp = &lmid;
6612 else if (name_len == 5 && startswith (annex, "start"))
b1fbec62 6613 addrp = &lm_addr;
da4ae14a 6614 else if (name_len == 4 && startswith (annex, "prev"))
b1fbec62
GB
6615 addrp = &lm_prev;
6616 else
6617 {
6618 annex = strchr (sep, ';');
6619 if (annex == NULL)
6620 break;
6621 annex++;
6622 continue;
6623 }
6624
6625 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 6626 }
b1fbec62 6627
8d56636a
MM
6628 std::string document = "<library-list-svr4 version=\"1.0\"";
6629
6630 /* When the starting LM_ADDR is passed in the annex, only traverse that
2733d9d5 6631 namespace, which is assumed to be identified by LMID.
8d56636a
MM
6632
6633 Otherwise, start with R_DEBUG and traverse all namespaces we find. */
6634 if (lm_addr != 0)
ad10f44e
MM
6635 {
6636 document += ">";
2733d9d5 6637 read_link_map (document, lmid, lm_addr, lm_prev, ptr_size, lmo);
ad10f44e 6638 }
8d56636a 6639 else
2268b414 6640 {
8d56636a
MM
6641 if (lm_prev != 0)
6642 warning ("ignoring prev=0x%s without start", paddress (lm_prev));
b1fbec62 6643
2733d9d5
MM
6644 /* We could interpret LMID as 'provide only the libraries for this
6645 namespace' but GDB is currently only providing lmid, start, and
6646 prev, or nothing. */
6647 if (lmid != 0)
6648 warning ("ignoring lmid=0x%s without start", paddress (lmid));
6649
8d56636a
MM
6650 CORE_ADDR r_debug = priv->r_debug;
6651 if (r_debug == 0)
6652 r_debug = priv->r_debug = get_r_debug (pid, is_elf64);
b1fbec62
GB
6653
6654 /* We failed to find DT_DEBUG. Such situation will not change
6655 for this inferior - do not retry it. Report it to GDB as
6656 E01, see for the reasons at the GDB solib-svr4.c side. */
8d56636a 6657 if (r_debug == (CORE_ADDR) -1)
b1fbec62
GB
6658 return -1;
6659
ad10f44e
MM
6660 /* Terminate the header if we end up with an empty list. */
6661 if (r_debug == 0)
6662 document += ">";
6663
8d56636a 6664 while (r_debug != 0)
2268b414 6665 {
8d56636a
MM
6666 int r_version = 0;
6667 if (linux_read_memory (r_debug + lmo->r_version_offset,
b1fbec62 6668 (unsigned char *) &r_version,
8d56636a
MM
6669 sizeof (r_version)) != 0)
6670 {
6671 warning ("unable to read r_version from 0x%s",
6672 paddress (r_debug + lmo->r_version_offset));
6673 break;
6674 }
6675
6676 if (r_version < 1)
b1fbec62
GB
6677 {
6678 warning ("unexpected r_debug version %d", r_version);
8d56636a 6679 break;
b1fbec62 6680 }
8d56636a
MM
6681
6682 if (read_one_ptr (r_debug + lmo->r_map_offset, &lm_addr,
6683 ptr_size) != 0)
b1fbec62 6684 {
8d56636a
MM
6685 warning ("unable to read r_map from 0x%s",
6686 paddress (r_debug + lmo->r_map_offset));
6687 break;
b1fbec62 6688 }
2268b414 6689
ad10f44e
MM
6690 /* We read the entire namespace. */
6691 lm_prev = 0;
6692
6693 /* The first entry corresponds to the main executable unless the
6694 dynamic loader was loaded late by a static executable. But
6695 in such case the main executable does not have PT_DYNAMIC
6696 present and we would not have gotten here. */
6697 if (r_debug == priv->r_debug)
6698 {
6699 if (lm_addr != 0)
6700 string_appendf (document, " main-lm=\"0x%s\">",
6701 paddress (lm_addr));
6702 else
6703 document += ">";
6704
6705 lm_prev = lm_addr;
6706 if (read_one_ptr (lm_addr + lmo->l_next_offset,
6707 &lm_addr, ptr_size) != 0)
6708 {
6709 warning ("unable to read l_next from 0x%s",
6710 paddress (lm_addr + lmo->l_next_offset));
6711 break;
6712 }
6713 }
6714
2733d9d5 6715 read_link_map (document, r_debug, lm_addr, lm_prev, ptr_size, lmo);
b1fbec62 6716
8d56636a
MM
6717 if (r_version < 2)
6718 break;
b1fbec62 6719
8d56636a
MM
6720 if (read_one_ptr (r_debug + lmo->r_next_offset, &r_debug,
6721 ptr_size) != 0)
2268b414 6722 {
8d56636a
MM
6723 warning ("unable to read r_next from 0x%s",
6724 paddress (r_debug + lmo->r_next_offset));
6725 break;
d878444c 6726 }
0afae3cf 6727 }
2268b414
JK
6728 }
6729
ad10f44e 6730 document += "</library-list-svr4>";
b1fbec62 6731
f6e8a41e 6732 int document_len = document.length ();
2268b414
JK
6733 if (offset < document_len)
6734 document_len -= offset;
6735 else
6736 document_len = 0;
6737 if (len > document_len)
6738 len = document_len;
6739
f6e8a41e 6740 memcpy (readbuf, document.data () + offset, len);
2268b414
JK
6741
6742 return len;
6743}
6744
9accd112
MM
6745#ifdef HAVE_LINUX_BTRACE
6746
8263b346
TBA
6747bool
6748linux_process_target::supports_btrace ()
6749{
6750 return true;
6751}
6752
79597bdd 6753btrace_target_info *
696c0d5e 6754linux_process_target::enable_btrace (thread_info *tp,
79597bdd
TBA
6755 const btrace_config *conf)
6756{
696c0d5e 6757 return linux_enable_btrace (tp->id, conf);
79597bdd
TBA
6758}
6759
969c39fb 6760/* See to_disable_btrace target method. */
9accd112 6761
79597bdd
TBA
6762int
6763linux_process_target::disable_btrace (btrace_target_info *tinfo)
969c39fb
MM
6764{
6765 enum btrace_error err;
6766
6767 err = linux_disable_btrace (tinfo);
6768 return (err == BTRACE_ERR_NONE ? 0 : -1);
6769}
6770
bc504a31 6771/* Encode an Intel Processor Trace configuration. */
b20a6524
MM
6772
6773static void
873a185b 6774linux_low_encode_pt_config (std::string *buffer,
b20a6524
MM
6775 const struct btrace_data_pt_config *config)
6776{
873a185b 6777 *buffer += "<pt-config>\n";
b20a6524
MM
6778
6779 switch (config->cpu.vendor)
6780 {
6781 case CV_INTEL:
873a185b
TT
6782 string_xml_appendf (*buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6783 "model=\"%u\" stepping=\"%u\"/>\n",
6784 config->cpu.family, config->cpu.model,
6785 config->cpu.stepping);
b20a6524
MM
6786 break;
6787
6788 default:
6789 break;
6790 }
6791
873a185b 6792 *buffer += "</pt-config>\n";
b20a6524
MM
6793}
6794
6795/* Encode a raw buffer. */
6796
6797static void
873a185b 6798linux_low_encode_raw (std::string *buffer, const gdb_byte *data,
b20a6524
MM
6799 unsigned int size)
6800{
6801 if (size == 0)
6802 return;
6803
268a13a5 6804 /* We use hex encoding - see gdbsupport/rsp-low.h. */
873a185b 6805 *buffer += "<raw>\n";
b20a6524
MM
6806
6807 while (size-- > 0)
6808 {
6809 char elem[2];
6810
6811 elem[0] = tohex ((*data >> 4) & 0xf);
6812 elem[1] = tohex (*data++ & 0xf);
6813
8b2d5ef8 6814 buffer->append (elem, 2);
b20a6524
MM
6815 }
6816
873a185b 6817 *buffer += "</raw>\n";
b20a6524
MM
6818}
6819
969c39fb
MM
6820/* See to_read_btrace target method. */
6821
79597bdd
TBA
6822int
6823linux_process_target::read_btrace (btrace_target_info *tinfo,
873a185b 6824 std::string *buffer,
79597bdd 6825 enum btrace_read_type type)
9accd112 6826{
734b0e4b 6827 struct btrace_data btrace;
969c39fb 6828 enum btrace_error err;
9accd112 6829
969c39fb
MM
6830 err = linux_read_btrace (&btrace, tinfo, type);
6831 if (err != BTRACE_ERR_NONE)
6832 {
6833 if (err == BTRACE_ERR_OVERFLOW)
873a185b 6834 *buffer += "E.Overflow.";
969c39fb 6835 else
873a185b 6836 *buffer += "E.Generic Error.";
969c39fb 6837
8dcc53b3 6838 return -1;
969c39fb 6839 }
9accd112 6840
734b0e4b
MM
6841 switch (btrace.format)
6842 {
6843 case BTRACE_FORMAT_NONE:
873a185b 6844 *buffer += "E.No Trace.";
8dcc53b3 6845 return -1;
734b0e4b
MM
6846
6847 case BTRACE_FORMAT_BTS:
873a185b
TT
6848 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6849 *buffer += "<btrace version=\"1.0\">\n";
9accd112 6850
46f29a9a 6851 for (const btrace_block &block : *btrace.variant.bts.blocks)
873a185b
TT
6852 string_xml_appendf (*buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6853 paddress (block.begin), paddress (block.end));
9accd112 6854
873a185b 6855 *buffer += "</btrace>\n";
734b0e4b
MM
6856 break;
6857
b20a6524 6858 case BTRACE_FORMAT_PT:
873a185b
TT
6859 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6860 *buffer += "<btrace version=\"1.0\">\n";
6861 *buffer += "<pt>\n";
b20a6524
MM
6862
6863 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
9accd112 6864
b20a6524
MM
6865 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6866 btrace.variant.pt.size);
6867
873a185b
TT
6868 *buffer += "</pt>\n";
6869 *buffer += "</btrace>\n";
b20a6524
MM
6870 break;
6871
6872 default:
873a185b 6873 *buffer += "E.Unsupported Trace Format.";
8dcc53b3 6874 return -1;
734b0e4b 6875 }
969c39fb
MM
6876
6877 return 0;
9accd112 6878}
f4abbc16
MM
6879
6880/* See to_btrace_conf target method. */
6881
79597bdd
TBA
6882int
6883linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
873a185b 6884 std::string *buffer)
f4abbc16
MM
6885{
6886 const struct btrace_config *conf;
6887
873a185b
TT
6888 *buffer += "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n";
6889 *buffer += "<btrace-conf version=\"1.0\">\n";
f4abbc16
MM
6890
6891 conf = linux_btrace_conf (tinfo);
6892 if (conf != NULL)
6893 {
6894 switch (conf->format)
6895 {
6896 case BTRACE_FORMAT_NONE:
6897 break;
6898
6899 case BTRACE_FORMAT_BTS:
873a185b
TT
6900 string_xml_appendf (*buffer, "<bts");
6901 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->bts.size);
6902 string_xml_appendf (*buffer, " />\n");
f4abbc16 6903 break;
b20a6524
MM
6904
6905 case BTRACE_FORMAT_PT:
873a185b
TT
6906 string_xml_appendf (*buffer, "<pt");
6907 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->pt.size);
6908 string_xml_appendf (*buffer, "/>\n");
b20a6524 6909 break;
f4abbc16
MM
6910 }
6911 }
6912
873a185b 6913 *buffer += "</btrace-conf>\n";
f4abbc16
MM
6914 return 0;
6915}
9accd112
MM
6916#endif /* HAVE_LINUX_BTRACE */
6917
7b669087
GB
6918/* See nat/linux-nat.h. */
6919
6920ptid_t
6921current_lwp_ptid (void)
6922{
6923 return ptid_of (current_thread);
6924}
6925
7f63b89b
TBA
6926const char *
6927linux_process_target::thread_name (ptid_t thread)
6928{
6929 return linux_proc_tid_get_name (thread);
6930}
6931
6932#if USE_THREAD_DB
6933bool
6934linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
6935 int *handle_len)
6936{
6937 return thread_db_thread_handle (ptid, handle, handle_len);
6938}
6939#endif
6940
7b961964
SM
6941thread_info *
6942linux_process_target::thread_pending_parent (thread_info *thread)
6943{
6944 lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
6945
6946 if (parent == nullptr)
6947 return nullptr;
6948
6949 return get_lwp_thread (parent);
6950}
6951
df5ad102 6952thread_info *
faf44a31
PA
6953linux_process_target::thread_pending_child (thread_info *thread,
6954 target_waitkind *kind)
df5ad102 6955{
faf44a31 6956 lwp_info *child = get_thread_lwp (thread)->pending_child (kind);
df5ad102
SM
6957
6958 if (child == nullptr)
6959 return nullptr;
6960
6961 return get_lwp_thread (child);
6962}
6963
276d4552
YQ
6964/* Default implementation of linux_target_ops method "set_pc" for
6965 32-bit pc register which is literally named "pc". */
6966
6967void
6968linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
6969{
6970 uint32_t newpc = pc;
6971
6972 supply_register_by_name (regcache, "pc", &newpc);
6973}
6974
6975/* Default implementation of linux_target_ops method "get_pc" for
6976 32-bit pc register which is literally named "pc". */
6977
6978CORE_ADDR
6979linux_get_pc_32bit (struct regcache *regcache)
6980{
6981 uint32_t pc;
6982
6983 collect_register_by_name (regcache, "pc", &pc);
c058728c 6984 threads_debug_printf ("stop pc is 0x%" PRIx32, pc);
276d4552
YQ
6985 return pc;
6986}
6987
6f69e520
YQ
6988/* Default implementation of linux_target_ops method "set_pc" for
6989 64-bit pc register which is literally named "pc". */
6990
6991void
6992linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
6993{
6994 uint64_t newpc = pc;
6995
6996 supply_register_by_name (regcache, "pc", &newpc);
6997}
6998
6999/* Default implementation of linux_target_ops method "get_pc" for
7000 64-bit pc register which is literally named "pc". */
7001
7002CORE_ADDR
7003linux_get_pc_64bit (struct regcache *regcache)
7004{
7005 uint64_t pc;
7006
7007 collect_register_by_name (regcache, "pc", &pc);
c058728c 7008 threads_debug_printf ("stop pc is 0x%" PRIx64, pc);
6f69e520
YQ
7009 return pc;
7010}
7011
0570503d 7012/* See linux-low.h. */
974c89e0 7013
0570503d 7014int
43e5fbd8 7015linux_get_auxv (int pid, int wordsize, CORE_ADDR match, CORE_ADDR *valp)
974c89e0
AH
7016{
7017 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7018 int offset = 0;
7019
7020 gdb_assert (wordsize == 4 || wordsize == 8);
7021
43e5fbd8
TJB
7022 while (the_target->read_auxv (pid, offset, data, 2 * wordsize)
7023 == 2 * wordsize)
974c89e0
AH
7024 {
7025 if (wordsize == 4)
7026 {
0570503d 7027 uint32_t *data_p = (uint32_t *) data;
974c89e0 7028 if (data_p[0] == match)
0570503d
PFC
7029 {
7030 *valp = data_p[1];
7031 return 1;
7032 }
974c89e0
AH
7033 }
7034 else
7035 {
0570503d 7036 uint64_t *data_p = (uint64_t *) data;
974c89e0 7037 if (data_p[0] == match)
0570503d
PFC
7038 {
7039 *valp = data_p[1];
7040 return 1;
7041 }
974c89e0
AH
7042 }
7043
7044 offset += 2 * wordsize;
7045 }
7046
7047 return 0;
7048}
7049
7050/* See linux-low.h. */
7051
7052CORE_ADDR
43e5fbd8 7053linux_get_hwcap (int pid, int wordsize)
974c89e0 7054{
0570503d 7055 CORE_ADDR hwcap = 0;
43e5fbd8 7056 linux_get_auxv (pid, wordsize, AT_HWCAP, &hwcap);
0570503d 7057 return hwcap;
974c89e0
AH
7058}
7059
7060/* See linux-low.h. */
7061
7062CORE_ADDR
43e5fbd8 7063linux_get_hwcap2 (int pid, int wordsize)
974c89e0 7064{
0570503d 7065 CORE_ADDR hwcap2 = 0;
43e5fbd8 7066 linux_get_auxv (pid, wordsize, AT_HWCAP2, &hwcap2);
0570503d 7067 return hwcap2;
974c89e0 7068}
6f69e520 7069
3aee8918
PA
7070#ifdef HAVE_LINUX_REGSETS
7071void
7072initialize_regsets_info (struct regsets_info *info)
7073{
7074 for (info->num_regsets = 0;
7075 info->regsets[info->num_regsets].size >= 0;
7076 info->num_regsets++)
7077 ;
3aee8918
PA
7078}
7079#endif
7080
da6d8c04
DJ
7081void
7082initialize_low (void)
7083{
bd99dc85 7084 struct sigaction sigchld_action;
dd373349 7085
bd99dc85 7086 memset (&sigchld_action, 0, sizeof (sigchld_action));
ef0478f6 7087 set_target_ops (the_linux_target);
dd373349 7088
aa7c7447 7089 linux_ptrace_init_warnings ();
1b919490 7090 linux_proc_init_warnings ();
bd99dc85
PA
7091
7092 sigchld_action.sa_handler = sigchld_handler;
7093 sigemptyset (&sigchld_action.sa_mask);
7094 sigchld_action.sa_flags = SA_RESTART;
7095 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
7096
7097 initialize_low_arch ();
89245bc0
DB
7098
7099 linux_check_ptrace_features ();
da6d8c04 7100}