]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdbserver/linux-low.cc
Update gdb.Symbol.is_variable documentation
[thirdparty/binutils-gdb.git] / gdbserver / linux-low.cc
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
213516ef 2 Copyright (C) 1995-2023 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
268a13a5 22#include "gdbsupport/agent.h"
de0d863e 23#include "tdesc.h"
cdc8e9b2
JB
24#include "gdbsupport/event-loop.h"
25#include "gdbsupport/event-pipe.h"
268a13a5
TT
26#include "gdbsupport/rsp-low.h"
27#include "gdbsupport/signals-state-save-restore.h"
96d7229d
LM
28#include "nat/linux-nat.h"
29#include "nat/linux-waitpid.h"
268a13a5 30#include "gdbsupport/gdb_wait.h"
5826e159 31#include "nat/gdb_ptrace.h"
125f8a3d
GB
32#include "nat/linux-ptrace.h"
33#include "nat/linux-procfs.h"
8cc73a39 34#include "nat/linux-personality.h"
da6d8c04
DJ
35#include <signal.h>
36#include <sys/ioctl.h>
37#include <fcntl.h>
0a30fbc4 38#include <unistd.h>
fd500816 39#include <sys/syscall.h>
f9387fc3 40#include <sched.h>
07e059b5
VP
41#include <ctype.h>
42#include <pwd.h>
43#include <sys/types.h>
44#include <dirent.h>
53ce3c39 45#include <sys/stat.h>
efcbbd14 46#include <sys/vfs.h>
1570b33e 47#include <sys/uio.h>
268a13a5 48#include "gdbsupport/filestuff.h"
c144c7a0 49#include "tracepoint.h"
276d4552 50#include <inttypes.h>
268a13a5 51#include "gdbsupport/common-inferior.h"
2090129c 52#include "nat/fork-inferior.h"
268a13a5 53#include "gdbsupport/environ.h"
21987b9c 54#include "gdbsupport/gdb-sigmask.h"
268a13a5 55#include "gdbsupport/scoped_restore.h"
957f3f49
DE
56#ifndef ELFMAG0
57/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
58 then ELFMAG0 will have been defined. If it didn't get included by
59 gdb_proc_service.h then including it will likely introduce a duplicate
60 definition of elf_fpregset_t. */
61#include <elf.h>
62#endif
14d2069a 63#include "nat/linux-namespaces.h"
efcbbd14 64
fd462a61
DJ
65#ifndef O_LARGEFILE
66#define O_LARGEFILE 0
67#endif
1a981360 68
69f4c9cc
AH
69#ifndef AT_HWCAP2
70#define AT_HWCAP2 26
71#endif
72
db0dfaa0
LM
73/* Some targets did not define these ptrace constants from the start,
74 so gdbserver defines them locally here. In the future, these may
75 be removed after they are added to asm/ptrace.h. */
76#if !(defined(PT_TEXT_ADDR) \
77 || defined(PT_DATA_ADDR) \
78 || defined(PT_TEXT_END_ADDR))
79#if defined(__mcoldfire__)
80/* These are still undefined in 3.10 kernels. */
81#define PT_TEXT_ADDR 49*4
82#define PT_DATA_ADDR 50*4
83#define PT_TEXT_END_ADDR 51*4
db0dfaa0
LM
84/* These are still undefined in 3.10 kernels. */
85#elif defined(__TMS320C6X__)
86#define PT_TEXT_ADDR (0x10000*4)
87#define PT_DATA_ADDR (0x10004*4)
88#define PT_TEXT_END_ADDR (0x10008*4)
89#endif
90#endif
91
5203ae1e
TBA
92#if (defined(__UCLIBC__) \
93 && defined(HAS_NOMMU) \
94 && defined(PT_TEXT_ADDR) \
95 && defined(PT_DATA_ADDR) \
96 && defined(PT_TEXT_END_ADDR))
97#define SUPPORTS_READ_OFFSETS
98#endif
99
9accd112 100#ifdef HAVE_LINUX_BTRACE
125f8a3d 101# include "nat/linux-btrace.h"
268a13a5 102# include "gdbsupport/btrace-common.h"
9accd112
MM
103#endif
104
8365dcf5
TJB
105#ifndef HAVE_ELF32_AUXV_T
106/* Copied from glibc's elf.h. */
107typedef struct
108{
109 uint32_t a_type; /* Entry type */
110 union
111 {
112 uint32_t a_val; /* Integer value */
113 /* We use to have pointer elements added here. We cannot do that,
114 though, since it does not work when using 32-bit definitions
115 on 64-bit platforms and vice versa. */
116 } a_un;
117} Elf32_auxv_t;
118#endif
119
120#ifndef HAVE_ELF64_AUXV_T
121/* Copied from glibc's elf.h. */
122typedef struct
123{
124 uint64_t a_type; /* Entry type */
125 union
126 {
127 uint64_t a_val; /* Integer value */
128 /* We use to have pointer elements added here. We cannot do that,
129 though, since it does not work when using 32-bit definitions
130 on 64-bit platforms and vice versa. */
131 } a_un;
132} Elf64_auxv_t;
133#endif
134
ded48a5e
YQ
135/* Does the current host support PTRACE_GETREGSET? */
136int have_ptrace_getregset = -1;
137
8a841a35
PA
138/* Return TRUE if THREAD is the leader thread of the process. */
139
140static bool
141is_leader (thread_info *thread)
142{
143 ptid_t ptid = ptid_of (thread);
144 return ptid.pid () == ptid.lwp ();
145}
146
48989498
PA
147/* Return true if we should report thread exit events to GDB, for
148 THR. */
149
150static bool
151report_exit_events_for (thread_info *thr)
152{
153 client_state &cs = get_client_state ();
154
155 return (cs.report_thread_events
156 || (thr->thread_options & GDB_THREAD_OPTION_EXIT) != 0);
157}
158
cff068da
GB
159/* LWP accessors. */
160
161/* See nat/linux-nat.h. */
162
163ptid_t
164ptid_of_lwp (struct lwp_info *lwp)
165{
166 return ptid_of (get_lwp_thread (lwp));
167}
168
169/* See nat/linux-nat.h. */
170
4b134ca1
GB
171void
172lwp_set_arch_private_info (struct lwp_info *lwp,
173 struct arch_lwp_info *info)
174{
175 lwp->arch_private = info;
176}
177
178/* See nat/linux-nat.h. */
179
180struct arch_lwp_info *
181lwp_arch_private_info (struct lwp_info *lwp)
182{
183 return lwp->arch_private;
184}
185
186/* See nat/linux-nat.h. */
187
cff068da
GB
188int
189lwp_is_stopped (struct lwp_info *lwp)
190{
191 return lwp->stopped;
192}
193
194/* See nat/linux-nat.h. */
195
196enum target_stop_reason
197lwp_stop_reason (struct lwp_info *lwp)
198{
199 return lwp->stop_reason;
200}
201
0e00e962
AA
202/* See nat/linux-nat.h. */
203
204int
205lwp_is_stepping (struct lwp_info *lwp)
206{
207 return lwp->stepping;
208}
209
05044653
PA
210/* A list of all unknown processes which receive stop signals. Some
211 other process will presumably claim each of these as forked
212 children momentarily. */
24a09b5f 213
05044653
PA
214struct simple_pid_list
215{
216 /* The process ID. */
217 int pid;
218
219 /* The status as reported by waitpid. */
220 int status;
221
222 /* Next in chain. */
223 struct simple_pid_list *next;
224};
05c309a8 225static struct simple_pid_list *stopped_pids;
05044653
PA
226
227/* Trivial list manipulation functions to keep track of a list of new
228 stopped processes. */
229
230static void
231add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
232{
8d749320 233 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
05044653
PA
234
235 new_pid->pid = pid;
236 new_pid->status = status;
237 new_pid->next = *listp;
238 *listp = new_pid;
239}
240
241static int
242pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
243{
244 struct simple_pid_list **p;
245
246 for (p = listp; *p != NULL; p = &(*p)->next)
247 if ((*p)->pid == pid)
248 {
249 struct simple_pid_list *next = (*p)->next;
250
251 *statusp = (*p)->status;
252 xfree (*p);
253 *p = next;
254 return 1;
255 }
256 return 0;
257}
24a09b5f 258
bde24c0a
PA
259enum stopping_threads_kind
260 {
261 /* Not stopping threads presently. */
262 NOT_STOPPING_THREADS,
263
264 /* Stopping threads. */
265 STOPPING_THREADS,
266
267 /* Stopping and suspending threads. */
268 STOPPING_AND_SUSPENDING_THREADS
269 };
270
271/* This is set while stop_all_lwps is in effect. */
6bd434d6 272static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
273
274/* FIXME make into a target method? */
24a09b5f 275int using_threads = 1;
24a09b5f 276
fa593d66
PA
277/* True if we're presently stabilizing threads (moving them out of
278 jump pads). */
279static int stabilizing_threads;
280
f50bf8e5 281static void unsuspend_all_lwps (struct lwp_info *except);
e8a625d1
PA
282static void mark_lwp_dead (struct lwp_info *lwp, int wstat,
283 bool thread_event);
00db26fa 284static int lwp_is_marked_dead (struct lwp_info *lwp);
d50171e4 285static int kill_lwp (unsigned long lwpid, int signo);
863d01bd 286static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
ece66d65 287static int linux_low_ptrace_options (int attached);
ced2dffb 288static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
d50171e4 289
582511be
PA
290/* When the event-loop is doing a step-over, this points at the thread
291 being stepped. */
6bd434d6 292static ptid_t step_over_bkpt;
582511be 293
bf9ae9d8
TBA
294bool
295linux_process_target::low_supports_breakpoints ()
296{
297 return false;
298}
d50171e4 299
bf9ae9d8
TBA
300CORE_ADDR
301linux_process_target::low_get_pc (regcache *regcache)
302{
303 return 0;
304}
305
306void
307linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
d50171e4 308{
bf9ae9d8 309 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
d50171e4 310}
0d62e5e8 311
7582c77c
TBA
312std::vector<CORE_ADDR>
313linux_process_target::low_get_next_pcs (regcache *regcache)
314{
315 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
316 "implemented");
317}
318
d4807ea2
TBA
319int
320linux_process_target::low_decr_pc_after_break ()
321{
322 return 0;
323}
324
c2d6af84
PA
325/* True if LWP is stopped in its stepping range. */
326
327static int
328lwp_in_step_range (struct lwp_info *lwp)
329{
330 CORE_ADDR pc = lwp->stop_pc;
331
332 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
333}
334
cdc8e9b2
JB
335/* The event pipe registered as a waitable file in the event loop. */
336static event_pipe linux_event_pipe;
bd99dc85
PA
337
338/* True if we're currently in async mode. */
cdc8e9b2 339#define target_is_async_p() (linux_event_pipe.is_open ())
bd99dc85 340
02fc4de7 341static void send_sigstop (struct lwp_info *lwp);
bd99dc85 342
d0722149
DE
343/* Return non-zero if HEADER is a 64-bit ELF file. */
344
345static int
214d508e 346elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 347{
214d508e
L
348 if (header->e_ident[EI_MAG0] == ELFMAG0
349 && header->e_ident[EI_MAG1] == ELFMAG1
350 && header->e_ident[EI_MAG2] == ELFMAG2
351 && header->e_ident[EI_MAG3] == ELFMAG3)
352 {
353 *machine = header->e_machine;
354 return header->e_ident[EI_CLASS] == ELFCLASS64;
355
356 }
357 *machine = EM_NONE;
358 return -1;
d0722149
DE
359}
360
361/* Return non-zero if FILE is a 64-bit ELF file,
362 zero if the file is not a 64-bit ELF file,
363 and -1 if the file is not accessible or doesn't exist. */
364
be07f1a2 365static int
214d508e 366elf_64_file_p (const char *file, unsigned int *machine)
d0722149 367{
957f3f49 368 Elf64_Ehdr header;
d0722149
DE
369 int fd;
370
371 fd = open (file, O_RDONLY);
372 if (fd < 0)
373 return -1;
374
375 if (read (fd, &header, sizeof (header)) != sizeof (header))
376 {
377 close (fd);
378 return 0;
379 }
380 close (fd);
381
214d508e 382 return elf_64_header_p (&header, machine);
d0722149
DE
383}
384
be07f1a2
PA
385/* Accepts an integer PID; Returns true if the executable PID is
386 running is a 64-bit ELF file.. */
387
388int
214d508e 389linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 390{
d8d2a3ee 391 char file[PATH_MAX];
be07f1a2
PA
392
393 sprintf (file, "/proc/%d/exe", pid);
214d508e 394 return elf_64_file_p (file, machine);
be07f1a2
PA
395}
396
fd000fb3
TBA
397void
398linux_process_target::delete_lwp (lwp_info *lwp)
bd99dc85 399{
fa96cb38
PA
400 struct thread_info *thr = get_lwp_thread (lwp);
401
c058728c 402 threads_debug_printf ("deleting %ld", lwpid_of (thr));
fa96cb38
PA
403
404 remove_thread (thr);
466eecee 405
fd000fb3 406 low_delete_thread (lwp->arch_private);
466eecee 407
013e3554 408 delete lwp;
bd99dc85
PA
409}
410
fd000fb3
TBA
411void
412linux_process_target::low_delete_thread (arch_lwp_info *info)
413{
414 /* Default implementation should be overridden if architecture-specific
415 info is being used. */
416 gdb_assert (info == nullptr);
417}
95954743 418
421490af
PA
419/* Open the /proc/PID/mem file for PROC. */
420
421static void
422open_proc_mem_file (process_info *proc)
423{
424 gdb_assert (proc->priv->mem_fd == -1);
425
426 char filename[64];
427 xsnprintf (filename, sizeof filename, "/proc/%d/mem", proc->pid);
428
429 proc->priv->mem_fd
430 = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
431}
432
fd000fb3 433process_info *
421490af 434linux_process_target::add_linux_process_no_mem_file (int pid, int attached)
95954743
PA
435{
436 struct process_info *proc;
437
95954743 438 proc = add_process (pid, attached);
8d749320 439 proc->priv = XCNEW (struct process_info_private);
95954743 440
fd000fb3 441 proc->priv->arch_private = low_new_process ();
421490af
PA
442 proc->priv->mem_fd = -1;
443
444 return proc;
445}
446
aa5ca48f 447
421490af
PA
448process_info *
449linux_process_target::add_linux_process (int pid, int attached)
450{
451 process_info *proc = add_linux_process_no_mem_file (pid, attached);
452 open_proc_mem_file (proc);
95954743
PA
453 return proc;
454}
455
f551c8ef
SM
456void
457linux_process_target::remove_linux_process (process_info *proc)
458{
459 if (proc->priv->mem_fd >= 0)
460 close (proc->priv->mem_fd);
461
462 this->low_delete_process (proc->priv->arch_private);
463
464 xfree (proc->priv);
465 proc->priv = nullptr;
466
467 remove_process (proc);
468}
469
fd000fb3
TBA
470arch_process_info *
471linux_process_target::low_new_process ()
472{
473 return nullptr;
474}
475
476void
477linux_process_target::low_delete_process (arch_process_info *info)
478{
479 /* Default implementation must be overridden if architecture-specific
480 info exists. */
481 gdb_assert (info == nullptr);
482}
483
484void
485linux_process_target::low_new_fork (process_info *parent, process_info *child)
486{
487 /* Nop. */
488}
489
797bcff5
TBA
490void
491linux_process_target::arch_setup_thread (thread_info *thread)
94585166 492{
24583e45
TBA
493 scoped_restore_current_thread restore_thread;
494 switch_to_thread (thread);
94585166 495
797bcff5 496 low_arch_setup ();
94585166
DB
497}
498
d16f3f6c
TBA
499int
500linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
501 int wstat)
24a09b5f 502{
c12a5089 503 client_state &cs = get_client_state ();
94585166 504 struct lwp_info *event_lwp = *orig_event_lwp;
89a5711c 505 int event = linux_ptrace_get_extended_event (wstat);
de0d863e 506 struct thread_info *event_thr = get_lwp_thread (event_lwp);
24a09b5f 507
183be222 508 gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
65706a29 509
82075af2
JS
510 /* All extended events we currently use are mid-syscall. Only
511 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
512 you have to be using PTRACE_SEIZE to get that. */
513 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
514
c269dbdb
DB
515 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
516 || (event == PTRACE_EVENT_CLONE))
24a09b5f
DJ
517 {
518 unsigned long new_pid;
05044653 519 int ret, status;
24a09b5f 520
de0d863e 521 /* Get the pid of the new lwp. */
d86d4aaf 522 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 523 &new_pid);
24a09b5f
DJ
524
525 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 526 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
527 {
528 /* The new child has a pending SIGSTOP. We can't affect it until it
529 hits the SIGSTOP, but we're already attached. */
530
97438e3f 531 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
532
533 if (ret == -1)
534 perror_with_name ("waiting for new child");
535 else if (ret != new_pid)
536 warning ("wait returned unexpected PID %d", ret);
da5898ce 537 else if (!WIFSTOPPED (status))
24a09b5f
DJ
538 warning ("wait returned unexpected status 0x%x", status);
539 }
540
393a6b59 541 if (debug_threads)
de0d863e 542 {
393a6b59
PA
543 debug_printf ("HEW: Got %s event from LWP %ld, new child is %ld\n",
544 (event == PTRACE_EVENT_FORK ? "fork"
545 : event == PTRACE_EVENT_VFORK ? "vfork"
546 : event == PTRACE_EVENT_CLONE ? "clone"
547 : "???"),
548 ptid_of (event_thr).lwp (),
549 new_pid);
550 }
551
552 ptid_t child_ptid = (event != PTRACE_EVENT_CLONE
553 ? ptid_t (new_pid, new_pid)
554 : ptid_t (ptid_of (event_thr).pid (), new_pid));
de0d863e 555
393a6b59
PA
556 lwp_info *child_lwp = add_lwp (child_ptid);
557 gdb_assert (child_lwp != NULL);
558 child_lwp->stopped = 1;
559 if (event != PTRACE_EVENT_CLONE)
560 child_lwp->must_set_ptrace_flags = 1;
561 child_lwp->status_pending_p = 0;
de0d863e 562
393a6b59 563 thread_info *child_thr = get_lwp_thread (child_lwp);
de0d863e 564
393a6b59
PA
565 /* If we're suspending all threads, leave this one suspended
566 too. If the fork/clone parent is stepping over a breakpoint,
567 all other threads have been suspended already. Leave the
568 child suspended too. */
569 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
570 || event_lwp->bp_reinsert != 0)
571 {
572 threads_debug_printf ("leaving child suspended");
573 child_lwp->suspended = 1;
574 }
575
576 if (event_lwp->bp_reinsert != 0
577 && supports_software_single_step ()
578 && event == PTRACE_EVENT_VFORK)
579 {
580 /* If we leave single-step breakpoints there, child will
581 hit it, so uninsert single-step breakpoints from parent
582 (and child). Once vfork child is done, reinsert
583 them back to parent. */
584 uninsert_single_step_breakpoints (event_thr);
585 }
586
587 if (event != PTRACE_EVENT_CLONE)
588 {
de0d863e
DB
589 /* Add the new process to the tables and clone the breakpoint
590 lists of the parent. We need to do this even if the new process
591 will be detached, since we will need the process object and the
592 breakpoints to remove any breakpoints from memory when we
593 detach, and the client side will access registers. */
393a6b59 594 process_info *child_proc = add_linux_process (new_pid, 0);
de0d863e 595 gdb_assert (child_proc != NULL);
863d01bd 596
393a6b59 597 process_info *parent_proc = get_thread_process (event_thr);
de0d863e 598 child_proc->attached = parent_proc->attached;
2e7b624b 599
63c40ec7 600 clone_all_breakpoints (child_thr, event_thr);
de0d863e 601
51a948fd
AB
602 target_desc_up tdesc = allocate_target_description ();
603 copy_target_description (tdesc.get (), parent_proc->tdesc);
604 child_proc->tdesc = tdesc.release ();
de0d863e 605
3a8a0396 606 /* Clone arch-specific process data. */
fd000fb3 607 low_new_fork (parent_proc, child_proc);
393a6b59 608 }
3a8a0396 609
393a6b59
PA
610 /* Save fork/clone info in the parent thread. */
611 if (event == PTRACE_EVENT_FORK)
612 event_lwp->waitstatus.set_forked (child_ptid);
613 else if (event == PTRACE_EVENT_VFORK)
614 event_lwp->waitstatus.set_vforked (child_ptid);
615 else if (event == PTRACE_EVENT_CLONE
616 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
617 event_lwp->waitstatus.set_thread_cloned (child_ptid);
618
619 if (event != PTRACE_EVENT_CLONE
620 || (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
621 {
de0d863e 622 /* The status_pending field contains bits denoting the
393a6b59
PA
623 extended event, so when the pending event is handled, the
624 handler will look at lwp->waitstatus. */
de0d863e
DB
625 event_lwp->status_pending_p = 1;
626 event_lwp->status_pending = wstat;
627
393a6b59
PA
628 /* Link the threads until the parent's event is passed on to
629 GDB. */
630 event_lwp->relative = child_lwp;
631 child_lwp->relative = event_lwp;
de0d863e
DB
632 }
633
393a6b59
PA
634 /* If the parent thread is doing step-over with single-step
635 breakpoints, the list of single-step breakpoints are cloned
636 from the parent's. Remove them from the child process.
637 In case of vfork, we'll reinsert them back once vforked
638 child is done. */
639 if (event_lwp->bp_reinsert != 0
640 && supports_software_single_step ())
641 {
642 /* The child process is forked and stopped, so it is safe
643 to access its memory without stopping all other threads
644 from other processes. */
645 delete_single_step_breakpoints (child_thr);
e27d73f6 646
393a6b59
PA
647 gdb_assert (has_single_step_breakpoints (event_thr));
648 gdb_assert (!has_single_step_breakpoints (child_thr));
649 }
bde24c0a 650
da5898ce
DJ
651 /* Normally we will get the pending SIGSTOP. But in some cases
652 we might get another signal delivered to the group first.
f21cc1a2 653 If we do get another signal, be sure not to lose it. */
20ba1ce6 654 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 655 {
393a6b59
PA
656 child_lwp->stop_expected = 1;
657 child_lwp->status_pending_p = 1;
658 child_lwp->status_pending = status;
da5898ce 659 }
393a6b59 660 else if (event == PTRACE_EVENT_CLONE && cs.report_thread_events)
65706a29 661 {
393a6b59
PA
662 child_lwp->waitstatus.set_thread_created ();
663 child_lwp->status_pending_p = 1;
664 child_lwp->status_pending = status;
65706a29 665 }
de0d863e 666
393a6b59
PA
667 if (event == PTRACE_EVENT_CLONE)
668 {
a0aad537 669#ifdef USE_THREAD_DB
393a6b59 670 thread_db_notice_clone (event_thr, child_ptid);
a0aad537 671#endif
393a6b59 672 }
86299109 673
393a6b59
PA
674 if (event == PTRACE_EVENT_CLONE
675 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) == 0)
676 {
677 threads_debug_printf
678 ("not reporting clone event from LWP %ld, new child is %ld\n",
679 ptid_of (event_thr).lwp (),
680 new_pid);
681 return 1;
682 }
683
684 /* Leave the child stopped until GDB processes the parent
685 event. */
686 child_thr->last_resume_kind = resume_stop;
687 child_thr->last_status.set_stopped (GDB_SIGNAL_0);
688
689 /* Report the event. */
690 threads_debug_printf
691 ("reporting %s event from LWP %ld, new child is %ld\n",
692 (event == PTRACE_EVENT_FORK ? "fork"
693 : event == PTRACE_EVENT_VFORK ? "vfork"
694 : event == PTRACE_EVENT_CLONE ? "clone"
695 : "???"),
696 ptid_of (event_thr).lwp (),
697 new_pid);
698 return 0;
24a09b5f 699 }
c269dbdb
DB
700 else if (event == PTRACE_EVENT_VFORK_DONE)
701 {
183be222 702 event_lwp->waitstatus.set_vfork_done ();
c269dbdb 703
7582c77c 704 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
2e7b624b 705 {
3b9a79ef 706 reinsert_single_step_breakpoints (event_thr);
2e7b624b 707
3b9a79ef 708 gdb_assert (has_single_step_breakpoints (event_thr));
2e7b624b
YQ
709 }
710
c269dbdb
DB
711 /* Report the event. */
712 return 0;
713 }
c12a5089 714 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
94585166
DB
715 {
716 struct process_info *proc;
f27866ba 717 std::vector<int> syscalls_to_catch;
94585166
DB
718 ptid_t event_ptid;
719 pid_t event_pid;
720
c058728c
SM
721 threads_debug_printf ("Got exec event from LWP %ld",
722 lwpid_of (event_thr));
94585166
DB
723
724 /* Get the event ptid. */
725 event_ptid = ptid_of (event_thr);
e99b03dc 726 event_pid = event_ptid.pid ();
94585166 727
82075af2 728 /* Save the syscall list from the execing process. */
94585166 729 proc = get_thread_process (event_thr);
f27866ba 730 syscalls_to_catch = std::move (proc->syscalls_to_catch);
82075af2
JS
731
732 /* Delete the execing process and all its threads. */
d16f3f6c 733 mourn (proc);
24583e45 734 switch_to_thread (nullptr);
94585166
DB
735
736 /* Create a new process/lwp/thread. */
fd000fb3 737 proc = add_linux_process (event_pid, 0);
94585166
DB
738 event_lwp = add_lwp (event_ptid);
739 event_thr = get_lwp_thread (event_lwp);
740 gdb_assert (current_thread == event_thr);
797bcff5 741 arch_setup_thread (event_thr);
94585166
DB
742
743 /* Set the event status. */
183be222
SM
744 event_lwp->waitstatus.set_execd
745 (make_unique_xstrdup
746 (linux_proc_pid_to_exec_file (lwpid_of (event_thr))));
94585166
DB
747
748 /* Mark the exec status as pending. */
749 event_lwp->stopped = 1;
750 event_lwp->status_pending_p = 1;
751 event_lwp->status_pending = wstat;
752 event_thr->last_resume_kind = resume_continue;
183be222 753 event_thr->last_status.set_ignore ();
94585166 754
82075af2
JS
755 /* Update syscall state in the new lwp, effectively mid-syscall too. */
756 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
757
758 /* Restore the list to catch. Don't rely on the client, which is free
759 to avoid sending a new list when the architecture doesn't change.
760 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
f27866ba 761 proc->syscalls_to_catch = std::move (syscalls_to_catch);
82075af2 762
94585166
DB
763 /* Report the event. */
764 *orig_event_lwp = event_lwp;
765 return 0;
766 }
de0d863e 767
f34652de 768 internal_error (_("unknown ptrace event %d"), event);
24a09b5f
DJ
769}
770
df95181f
TBA
771CORE_ADDR
772linux_process_target::get_pc (lwp_info *lwp)
d50171e4 773{
a9deee17
PA
774 process_info *proc = get_thread_process (get_lwp_thread (lwp));
775 gdb_assert (!proc->starting_up);
d50171e4 776
bf9ae9d8 777 if (!low_supports_breakpoints ())
d50171e4
PA
778 return 0;
779
24583e45
TBA
780 scoped_restore_current_thread restore_thread;
781 switch_to_thread (get_lwp_thread (lwp));
d50171e4 782
a9deee17
PA
783 struct regcache *regcache = get_thread_regcache (current_thread, 1);
784 CORE_ADDR pc = low_get_pc (regcache);
d50171e4 785
c058728c 786 threads_debug_printf ("pc is 0x%lx", (long) pc);
d50171e4 787
d50171e4
PA
788 return pc;
789}
790
9eedd27d
TBA
791void
792linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
82075af2 793{
82075af2
JS
794 struct regcache *regcache;
795
24583e45
TBA
796 scoped_restore_current_thread restore_thread;
797 switch_to_thread (get_lwp_thread (lwp));
82075af2
JS
798
799 regcache = get_thread_regcache (current_thread, 1);
9eedd27d 800 low_get_syscall_trapinfo (regcache, sysno);
82075af2 801
c058728c 802 threads_debug_printf ("get_syscall_trapinfo sysno %d", *sysno);
82075af2
JS
803}
804
9eedd27d
TBA
805void
806linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
807{
808 /* By default, report an unknown system call number. */
809 *sysno = UNKNOWN_SYSCALL;
810}
811
df95181f
TBA
812bool
813linux_process_target::save_stop_reason (lwp_info *lwp)
0d62e5e8 814{
582511be
PA
815 CORE_ADDR pc;
816 CORE_ADDR sw_breakpoint_pc;
3e572f71
PA
817#if USE_SIGTRAP_SIGINFO
818 siginfo_t siginfo;
819#endif
d50171e4 820
bf9ae9d8 821 if (!low_supports_breakpoints ())
df95181f 822 return false;
0d62e5e8 823
a9deee17
PA
824 process_info *proc = get_thread_process (get_lwp_thread (lwp));
825 if (proc->starting_up)
826 {
827 /* Claim we have the stop PC so that the caller doesn't try to
828 fetch it itself. */
829 return true;
830 }
831
582511be 832 pc = get_pc (lwp);
d4807ea2 833 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
d50171e4 834
582511be 835 /* breakpoint_at reads from the current thread. */
24583e45
TBA
836 scoped_restore_current_thread restore_thread;
837 switch_to_thread (get_lwp_thread (lwp));
47c0c975 838
3e572f71
PA
839#if USE_SIGTRAP_SIGINFO
840 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
841 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
842 {
843 if (siginfo.si_signo == SIGTRAP)
844 {
e7ad2f14
PA
845 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
846 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 847 {
e7ad2f14
PA
848 /* The si_code is ambiguous on this arch -- check debug
849 registers. */
850 if (!check_stopped_by_watchpoint (lwp))
851 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
852 }
853 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
854 {
855 /* If we determine the LWP stopped for a SW breakpoint,
856 trust it. Particularly don't check watchpoint
857 registers, because at least on s390, we'd find
858 stopped-by-watchpoint as long as there's a watchpoint
859 set. */
3e572f71 860 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
3e572f71 861 }
e7ad2f14 862 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 863 {
e7ad2f14
PA
864 /* This can indicate either a hardware breakpoint or
865 hardware watchpoint. Check debug registers. */
866 if (!check_stopped_by_watchpoint (lwp))
867 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
3e572f71 868 }
2bf6fb9d
PA
869 else if (siginfo.si_code == TRAP_TRACE)
870 {
e7ad2f14
PA
871 /* We may have single stepped an instruction that
872 triggered a watchpoint. In that case, on some
873 architectures (such as x86), instead of TRAP_HWBKPT,
874 si_code indicates TRAP_TRACE, and we need to check
875 the debug registers separately. */
876 if (!check_stopped_by_watchpoint (lwp))
877 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
2bf6fb9d 878 }
3e572f71
PA
879 }
880 }
881#else
582511be
PA
882 /* We may have just stepped a breakpoint instruction. E.g., in
883 non-stop mode, GDB first tells the thread A to step a range, and
884 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
885 case we need to report the breakpoint PC. */
886 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
d7146cda 887 && low_breakpoint_at (sw_breakpoint_pc))
e7ad2f14
PA
888 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
889
890 if (hardware_breakpoint_inserted_here (pc))
891 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
892
893 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
894 check_stopped_by_watchpoint (lwp);
895#endif
896
897 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
582511be 898 {
c058728c
SM
899 threads_debug_printf
900 ("%s stopped by software breakpoint",
901 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
582511be
PA
902
903 /* Back up the PC if necessary. */
904 if (pc != sw_breakpoint_pc)
e7ad2f14 905 {
582511be
PA
906 struct regcache *regcache
907 = get_thread_regcache (current_thread, 1);
bf9ae9d8 908 low_set_pc (regcache, sw_breakpoint_pc);
582511be
PA
909 }
910
e7ad2f14
PA
911 /* Update this so we record the correct stop PC below. */
912 pc = sw_breakpoint_pc;
582511be 913 }
e7ad2f14 914 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
c058728c
SM
915 threads_debug_printf
916 ("%s stopped by hardware breakpoint",
917 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14 918 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
c058728c
SM
919 threads_debug_printf
920 ("%s stopped by hardware watchpoint",
921 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14 922 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
c058728c
SM
923 threads_debug_printf
924 ("%s stopped by trace",
925 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14
PA
926
927 lwp->stop_pc = pc;
df95181f 928 return true;
0d62e5e8 929}
ce3a066d 930
fd000fb3
TBA
931lwp_info *
932linux_process_target::add_lwp (ptid_t ptid)
611cb4a5 933{
c360a473 934 lwp_info *lwp = new lwp_info;
0d62e5e8 935
754e3168
AH
936 lwp->thread = add_thread (ptid, lwp);
937
fd000fb3 938 low_new_thread (lwp);
aa5ca48f 939
54a0b537 940 return lwp;
0d62e5e8 941}
611cb4a5 942
fd000fb3
TBA
943void
944linux_process_target::low_new_thread (lwp_info *info)
945{
946 /* Nop. */
947}
948
2090129c
SDJ
949/* Callback to be used when calling fork_inferior, responsible for
950 actually initiating the tracing of the inferior. */
951
952static void
953linux_ptrace_fun ()
954{
955 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
956 (PTRACE_TYPE_ARG4) 0) < 0)
50fa3001 957 trace_start_error_with_name ("ptrace");
2090129c
SDJ
958
959 if (setpgid (0, 0) < 0)
960 trace_start_error_with_name ("setpgid");
961
962 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
963 stdout to stderr so that inferior i/o doesn't corrupt the connection.
964 Also, redirect stdin to /dev/null. */
965 if (remote_connection_is_stdio ())
966 {
967 if (close (0) < 0)
968 trace_start_error_with_name ("close");
969 if (open ("/dev/null", O_RDONLY) < 0)
970 trace_start_error_with_name ("open");
971 if (dup2 (2, 1) < 0)
972 trace_start_error_with_name ("dup2");
973 if (write (2, "stdin/stdout redirected\n",
974 sizeof ("stdin/stdout redirected\n") - 1) < 0)
975 {
976 /* Errors ignored. */;
977 }
978 }
979}
980
da6d8c04 981/* Start an inferior process and returns its pid.
2090129c
SDJ
982 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
983 are its arguments. */
da6d8c04 984
15295543
TBA
985int
986linux_process_target::create_inferior (const char *program,
987 const std::vector<char *> &program_args)
da6d8c04 988{
c12a5089 989 client_state &cs = get_client_state ();
a6dbe5df 990 struct lwp_info *new_lwp;
da6d8c04 991 int pid;
95954743 992 ptid_t ptid;
03583c20 993
41272101
TT
994 {
995 maybe_disable_address_space_randomization restore_personality
c12a5089 996 (cs.disable_randomization);
bea571eb 997 std::string str_program_args = construct_inferior_arguments (program_args);
41272101
TT
998
999 pid = fork_inferior (program,
1000 str_program_args.c_str (),
1001 get_environ ()->envp (), linux_ptrace_fun,
1002 NULL, NULL, NULL, NULL);
1003 }
03583c20 1004
421490af
PA
1005 /* When spawning a new process, we can't open the mem file yet. We
1006 still have to nurse the process through the shell, and that execs
1007 a couple times. The address space a /proc/PID/mem file is
1008 accessing is destroyed on exec. */
1009 process_info *proc = add_linux_process_no_mem_file (pid, 0);
95954743 1010
184ea2f7 1011 ptid = ptid_t (pid, pid);
95954743 1012 new_lwp = add_lwp (ptid);
a6dbe5df 1013 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 1014
2090129c
SDJ
1015 post_fork_inferior (pid, program);
1016
421490af
PA
1017 /* PROC is now past the shell running the program we want, so we can
1018 open the /proc/PID/mem file. */
1019 open_proc_mem_file (proc);
1020
a9fa9f7d 1021 return pid;
da6d8c04
DJ
1022}
1023
ece66d65
JS
1024/* Implement the post_create_inferior target_ops method. */
1025
6dee9afb
TBA
1026void
1027linux_process_target::post_create_inferior ()
ece66d65
JS
1028{
1029 struct lwp_info *lwp = get_thread_lwp (current_thread);
1030
797bcff5 1031 low_arch_setup ();
ece66d65
JS
1032
1033 if (lwp->must_set_ptrace_flags)
1034 {
1035 struct process_info *proc = current_process ();
1036 int options = linux_low_ptrace_options (proc->attached);
1037
1038 linux_enable_event_reporting (lwpid_of (current_thread), options);
1039 lwp->must_set_ptrace_flags = 0;
1040 }
1041}
1042
7ae1a6a6 1043int
fd000fb3 1044linux_process_target::attach_lwp (ptid_t ptid)
da6d8c04 1045{
54a0b537 1046 struct lwp_info *new_lwp;
e38504b3 1047 int lwpid = ptid.lwp ();
611cb4a5 1048
b8e1b30e 1049 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 1050 != 0)
7ae1a6a6 1051 return errno;
24a09b5f 1052
b3312d80 1053 new_lwp = add_lwp (ptid);
0d62e5e8 1054
a6dbe5df
PA
1055 /* We need to wait for SIGSTOP before being able to make the next
1056 ptrace call on this LWP. */
1057 new_lwp->must_set_ptrace_flags = 1;
1058
644cebc9 1059 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2 1060 {
c058728c 1061 threads_debug_printf ("Attached to a stopped process");
c14d7ab2
PA
1062
1063 /* The process is definitely stopped. It is in a job control
1064 stop, unless the kernel predates the TASK_STOPPED /
1065 TASK_TRACED distinction, in which case it might be in a
1066 ptrace stop. Make sure it is in a ptrace stop; from there we
1067 can kill it, signal it, et cetera.
1068
1069 First make sure there is a pending SIGSTOP. Since we are
1070 already attached, the process can not transition from stopped
1071 to running without a PTRACE_CONT; so we know this signal will
1072 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1073 probably already in the queue (unless this kernel is old
1074 enough to use TASK_STOPPED for ptrace stops); but since
1075 SIGSTOP is not an RT signal, it can only be queued once. */
1076 kill_lwp (lwpid, SIGSTOP);
1077
1078 /* Finally, resume the stopped process. This will deliver the
1079 SIGSTOP (or a higher priority signal, just like normal
1080 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 1081 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
1082 }
1083
0d62e5e8 1084 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
1085 brings it to a halt.
1086
1087 There are several cases to consider here:
1088
1089 1) gdbserver has already attached to the process and is being notified
1b3f6016 1090 of a new thread that is being created.
d50171e4
PA
1091 In this case we should ignore that SIGSTOP and resume the
1092 process. This is handled below by setting stop_expected = 1,
8336d594 1093 and the fact that add_thread sets last_resume_kind ==
d50171e4 1094 resume_continue.
0e21c1ec
DE
1095
1096 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
1097 to it via attach_inferior.
1098 In this case we want the process thread to stop.
d50171e4
PA
1099 This is handled by having linux_attach set last_resume_kind ==
1100 resume_stop after we return.
e3deef73
LM
1101
1102 If the pid we are attaching to is also the tgid, we attach to and
1103 stop all the existing threads. Otherwise, we attach to pid and
1104 ignore any other threads in the same group as this pid.
0e21c1ec
DE
1105
1106 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
1107 existing threads.
1108 In this case we want the thread to stop.
1109 FIXME: This case is currently not properly handled.
1110 We should wait for the SIGSTOP but don't. Things work apparently
1111 because enough time passes between when we ptrace (ATTACH) and when
1112 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
1113
1114 On the other hand, if we are currently trying to stop all threads, we
1115 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 1116 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
1117 end of the list, and so the new thread has not yet reached
1118 wait_for_sigstop (but will). */
d50171e4 1119 new_lwp->stop_expected = 1;
0d62e5e8 1120
7ae1a6a6 1121 return 0;
95954743
PA
1122}
1123
8784d563
PA
1124/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1125 already attached. Returns true if a new LWP is found, false
1126 otherwise. */
1127
1128static int
1129attach_proc_task_lwp_callback (ptid_t ptid)
1130{
1131 /* Is this a new thread? */
1132 if (find_thread_ptid (ptid) == NULL)
1133 {
e38504b3 1134 int lwpid = ptid.lwp ();
8784d563
PA
1135 int err;
1136
c058728c 1137 threads_debug_printf ("Found new lwp %d", lwpid);
8784d563 1138
fd000fb3 1139 err = the_linux_target->attach_lwp (ptid);
8784d563
PA
1140
1141 /* Be quiet if we simply raced with the thread exiting. EPERM
1142 is returned if the thread's task still exists, and is marked
1143 as exited or zombie, as well as other conditions, so in that
1144 case, confirm the status in /proc/PID/status. */
1145 if (err == ESRCH
1146 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
c058728c
SM
1147 threads_debug_printf
1148 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1149 lwpid, err, safe_strerror (err));
8784d563
PA
1150 else if (err != 0)
1151 {
4d9b86e1 1152 std::string reason
50fa3001 1153 = linux_ptrace_attach_fail_reason_string (ptid, err);
4d9b86e1
SM
1154
1155 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
8784d563
PA
1156 }
1157
1158 return 1;
1159 }
1160 return 0;
1161}
1162
500c1d85
PA
1163static void async_file_mark (void);
1164
e3deef73
LM
1165/* Attach to PID. If PID is the tgid, attach to it and all
1166 of its threads. */
1167
ef03dad8
TBA
1168int
1169linux_process_target::attach (unsigned long pid)
0d62e5e8 1170{
500c1d85
PA
1171 struct process_info *proc;
1172 struct thread_info *initial_thread;
184ea2f7 1173 ptid_t ptid = ptid_t (pid, pid);
7ae1a6a6
PA
1174 int err;
1175
421490af
PA
1176 /* Delay opening the /proc/PID/mem file until we've successfully
1177 attached. */
1178 proc = add_linux_process_no_mem_file (pid, 1);
df0da8a2 1179
e3deef73
LM
1180 /* Attach to PID. We will check for other threads
1181 soon. */
fd000fb3 1182 err = attach_lwp (ptid);
7ae1a6a6 1183 if (err != 0)
4d9b86e1 1184 {
f551c8ef 1185 this->remove_linux_process (proc);
4d9b86e1 1186
50fa3001
SDJ
1187 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1188 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
4d9b86e1 1189 }
7ae1a6a6 1190
421490af
PA
1191 open_proc_mem_file (proc);
1192
500c1d85
PA
1193 /* Don't ignore the initial SIGSTOP if we just attached to this
1194 process. It will be collected by wait shortly. */
184ea2f7 1195 initial_thread = find_thread_ptid (ptid_t (pid, pid));
59487af3 1196 gdb_assert (initial_thread != nullptr);
500c1d85 1197 initial_thread->last_resume_kind = resume_stop;
0d62e5e8 1198
8784d563
PA
1199 /* We must attach to every LWP. If /proc is mounted, use that to
1200 find them now. On the one hand, the inferior may be using raw
1201 clone instead of using pthreads. On the other hand, even if it
1202 is using pthreads, GDB may not be connected yet (thread_db needs
1203 to do symbol lookups, through qSymbol). Also, thread_db walks
1204 structures in the inferior's address space to find the list of
1205 threads/LWPs, and those structures may well be corrupted. Note
1206 that once thread_db is loaded, we'll still use it to list threads
1207 and associate pthread info with each LWP. */
1208 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
500c1d85
PA
1209
1210 /* GDB will shortly read the xml target description for this
1211 process, to figure out the process' architecture. But the target
1212 description is only filled in when the first process/thread in
1213 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1214 that now, otherwise, if GDB is fast enough, it could read the
1215 target description _before_ that initial stop. */
1216 if (non_stop)
1217 {
1218 struct lwp_info *lwp;
1219 int wstat, lwpid;
f2907e49 1220 ptid_t pid_ptid = ptid_t (pid);
500c1d85 1221
d16f3f6c 1222 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
500c1d85
PA
1223 gdb_assert (lwpid > 0);
1224
f2907e49 1225 lwp = find_lwp_pid (ptid_t (lwpid));
59487af3 1226 gdb_assert (lwp != nullptr);
500c1d85
PA
1227
1228 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1229 {
1230 lwp->status_pending_p = 1;
1231 lwp->status_pending = wstat;
1232 }
1233
1234 initial_thread->last_resume_kind = resume_continue;
1235
1236 async_file_mark ();
1237
1238 gdb_assert (proc->tdesc != NULL);
1239 }
1240
95954743
PA
1241 return 0;
1242}
1243
95954743 1244static int
e4eb0dec 1245last_thread_of_process_p (int pid)
95954743 1246{
e4eb0dec 1247 bool seen_one = false;
95954743 1248
da4ae14a 1249 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
95954743 1250 {
e4eb0dec
SM
1251 if (!seen_one)
1252 {
1253 /* This is the first thread of this process we see. */
1254 seen_one = true;
1255 return false;
1256 }
1257 else
1258 {
1259 /* This is the second thread of this process we see. */
1260 return true;
1261 }
1262 });
da6d8c04 1263
e4eb0dec 1264 return thread == NULL;
95954743
PA
1265}
1266
da84f473
PA
1267/* Kill LWP. */
1268
1269static void
1270linux_kill_one_lwp (struct lwp_info *lwp)
1271{
d86d4aaf
DE
1272 struct thread_info *thr = get_lwp_thread (lwp);
1273 int pid = lwpid_of (thr);
da84f473
PA
1274
1275 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1276 there is no signal context, and ptrace(PTRACE_KILL) (or
1277 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1278 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1279 alternative is to kill with SIGKILL. We only need one SIGKILL
1280 per process, not one for each thread. But since we still support
4a6ed09b
PA
1281 support debugging programs using raw clone without CLONE_THREAD,
1282 we send one for each thread. For years, we used PTRACE_KILL
1283 only, so we're being a bit paranoid about some old kernels where
1284 PTRACE_KILL might work better (dubious if there are any such, but
1285 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1286 second, and so we're fine everywhere. */
da84f473
PA
1287
1288 errno = 0;
69ff6be5 1289 kill_lwp (pid, SIGKILL);
da84f473 1290 if (debug_threads)
ce9e3fe7
PA
1291 {
1292 int save_errno = errno;
1293
c058728c
SM
1294 threads_debug_printf ("kill_lwp (SIGKILL) %s, 0, 0 (%s)",
1295 target_pid_to_str (ptid_of (thr)).c_str (),
1296 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1297 }
da84f473
PA
1298
1299 errno = 0;
b8e1b30e 1300 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 1301 if (debug_threads)
ce9e3fe7
PA
1302 {
1303 int save_errno = errno;
1304
c058728c
SM
1305 threads_debug_printf ("PTRACE_KILL %s, 0, 0 (%s)",
1306 target_pid_to_str (ptid_of (thr)).c_str (),
1307 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1308 }
da84f473
PA
1309}
1310
e76126e8
PA
1311/* Kill LWP and wait for it to die. */
1312
1313static void
1314kill_wait_lwp (struct lwp_info *lwp)
1315{
1316 struct thread_info *thr = get_lwp_thread (lwp);
e99b03dc 1317 int pid = ptid_of (thr).pid ();
e38504b3 1318 int lwpid = ptid_of (thr).lwp ();
e76126e8
PA
1319 int wstat;
1320 int res;
1321
c058728c 1322 threads_debug_printf ("killing lwp %d, for pid: %d", lwpid, pid);
e76126e8
PA
1323
1324 do
1325 {
1326 linux_kill_one_lwp (lwp);
1327
1328 /* Make sure it died. Notes:
1329
1330 - The loop is most likely unnecessary.
1331
d16f3f6c 1332 - We don't use wait_for_event as that could delete lwps
e76126e8
PA
1333 while we're iterating over them. We're not interested in
1334 any pending status at this point, only in making sure all
1335 wait status on the kernel side are collected until the
1336 process is reaped.
1337
1338 - We don't use __WALL here as the __WALL emulation relies on
1339 SIGCHLD, and killing a stopped process doesn't generate
1340 one, nor an exit status.
1341 */
1342 res = my_waitpid (lwpid, &wstat, 0);
1343 if (res == -1 && errno == ECHILD)
1344 res = my_waitpid (lwpid, &wstat, __WCLONE);
1345 } while (res > 0 && WIFSTOPPED (wstat));
1346
586b02a9
PA
1347 /* Even if it was stopped, the child may have already disappeared.
1348 E.g., if it was killed by SIGKILL. */
1349 if (res < 0 && errno != ECHILD)
1350 perror_with_name ("kill_wait_lwp");
e76126e8
PA
1351}
1352
578290ec 1353/* Callback for `for_each_thread'. Kills an lwp of a given process,
da84f473 1354 except the leader. */
95954743 1355
578290ec
SM
1356static void
1357kill_one_lwp_callback (thread_info *thread, int pid)
da6d8c04 1358{
54a0b537 1359 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 1360
fd500816
DJ
1361 /* We avoid killing the first thread here, because of a Linux kernel (at
1362 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1363 the children get a chance to be reaped, it will remain a zombie
1364 forever. */
95954743 1365
d86d4aaf 1366 if (lwpid_of (thread) == pid)
95954743 1367 {
c058728c
SM
1368 threads_debug_printf ("is last of process %s",
1369 target_pid_to_str (thread->id).c_str ());
578290ec 1370 return;
95954743 1371 }
fd500816 1372
e76126e8 1373 kill_wait_lwp (lwp);
da6d8c04
DJ
1374}
1375
c6885a57
TBA
1376int
1377linux_process_target::kill (process_info *process)
0d62e5e8 1378{
a780ef4f 1379 int pid = process->pid;
9d606399 1380
f9e39928
PA
1381 /* If we're killing a running inferior, make sure it is stopped
1382 first, as PTRACE_KILL will not work otherwise. */
7984d532 1383 stop_all_lwps (0, NULL);
f9e39928 1384
578290ec
SM
1385 for_each_thread (pid, [&] (thread_info *thread)
1386 {
1387 kill_one_lwp_callback (thread, pid);
1388 });
fd500816 1389
54a0b537 1390 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1391 thread in the list, so do so now. */
a780ef4f 1392 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
bd99dc85 1393
784867a5 1394 if (lwp == NULL)
c058728c 1395 threads_debug_printf ("cannot find lwp for pid: %d", pid);
784867a5 1396 else
e76126e8 1397 kill_wait_lwp (lwp);
2d717e4f 1398
8adb37b9 1399 mourn (process);
f9e39928
PA
1400
1401 /* Since we presently can only stop all lwps of all processes, we
1402 need to unstop lwps of other processes. */
7984d532 1403 unstop_all_lwps (0, NULL);
95954743 1404 return 0;
0d62e5e8
DJ
1405}
1406
9b224c5e
PA
1407/* Get pending signal of THREAD, for detaching purposes. This is the
1408 signal the thread last stopped for, which we need to deliver to the
1409 thread when detaching, otherwise, it'd be suppressed/lost. */
1410
1411static int
1412get_detach_signal (struct thread_info *thread)
1413{
c12a5089 1414 client_state &cs = get_client_state ();
a493e3e2 1415 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1416 int status;
1417 struct lwp_info *lp = get_thread_lwp (thread);
1418
1419 if (lp->status_pending_p)
1420 status = lp->status_pending;
1421 else
1422 {
1423 /* If the thread had been suspended by gdbserver, and it stopped
1424 cleanly, then it'll have stopped with SIGSTOP. But we don't
1425 want to deliver that SIGSTOP. */
183be222
SM
1426 if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1427 || thread->last_status.sig () == GDB_SIGNAL_0)
9b224c5e
PA
1428 return 0;
1429
1430 /* Otherwise, we may need to deliver the signal we
1431 intercepted. */
1432 status = lp->last_status;
1433 }
1434
1435 if (!WIFSTOPPED (status))
1436 {
c058728c
SM
1437 threads_debug_printf ("lwp %s hasn't stopped: no pending signal",
1438 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e
PA
1439 return 0;
1440 }
1441
1442 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1443 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e 1444 {
c058728c
SM
1445 threads_debug_printf ("lwp %s had stopped with extended "
1446 "status: no pending signal",
1447 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e
PA
1448 return 0;
1449 }
1450
2ea28649 1451 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e 1452
c12a5089 1453 if (cs.program_signals_p && !cs.program_signals[signo])
9b224c5e 1454 {
c058728c
SM
1455 threads_debug_printf ("lwp %s had signal %s, but it is in nopass state",
1456 target_pid_to_str (ptid_of (thread)).c_str (),
1457 gdb_signal_to_string (signo));
9b224c5e
PA
1458 return 0;
1459 }
c12a5089 1460 else if (!cs.program_signals_p
9b224c5e
PA
1461 /* If we have no way to know which signals GDB does not
1462 want to have passed to the program, assume
1463 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1464 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e 1465 {
c058728c
SM
1466 threads_debug_printf ("lwp %s had signal %s, "
1467 "but we don't know if we should pass it. "
1468 "Default to not.",
1469 target_pid_to_str (ptid_of (thread)).c_str (),
1470 gdb_signal_to_string (signo));
9b224c5e
PA
1471 return 0;
1472 }
1473 else
1474 {
c058728c
SM
1475 threads_debug_printf ("lwp %s has pending signal %s: delivering it",
1476 target_pid_to_str (ptid_of (thread)).c_str (),
1477 gdb_signal_to_string (signo));
9b224c5e
PA
1478
1479 return WSTOPSIG (status);
1480 }
1481}
1482
fd000fb3
TBA
1483void
1484linux_process_target::detach_one_lwp (lwp_info *lwp)
6ad8ae5c 1485{
ced2dffb 1486 struct thread_info *thread = get_lwp_thread (lwp);
9b224c5e 1487 int sig;
ced2dffb 1488 int lwpid;
6ad8ae5c 1489
9b224c5e 1490 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1491 if (lwp->stop_expected)
ae13219e 1492 {
c058728c
SM
1493 threads_debug_printf ("Sending SIGCONT to %s",
1494 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e 1495
d86d4aaf 1496 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1497 lwp->stop_expected = 0;
ae13219e
DJ
1498 }
1499
9b224c5e
PA
1500 /* Pass on any pending signal for this thread. */
1501 sig = get_detach_signal (thread);
1502
ced2dffb
PA
1503 /* Preparing to resume may try to write registers, and fail if the
1504 lwp is zombie. If that happens, ignore the error. We'll handle
1505 it below, when detach fails with ESRCH. */
a70b8144 1506 try
ced2dffb
PA
1507 {
1508 /* Flush any pending changes to the process's registers. */
1509 regcache_invalidate_thread (thread);
1510
1511 /* Finally, let it resume. */
d7599cc0 1512 low_prepare_to_resume (lwp);
ced2dffb 1513 }
230d2906 1514 catch (const gdb_exception_error &ex)
ced2dffb
PA
1515 {
1516 if (!check_ptrace_stopped_lwp_gone (lwp))
eedc3f4f 1517 throw;
ced2dffb 1518 }
ced2dffb
PA
1519
1520 lwpid = lwpid_of (thread);
1521 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1522 (PTRACE_TYPE_ARG4) (long) sig) < 0)
ced2dffb
PA
1523 {
1524 int save_errno = errno;
1525
1526 /* We know the thread exists, so ESRCH must mean the lwp is
1527 zombie. This can happen if one of the already-detached
1528 threads exits the whole thread group. In that case we're
1529 still attached, and must reap the lwp. */
1530 if (save_errno == ESRCH)
1531 {
1532 int ret, status;
1533
1534 ret = my_waitpid (lwpid, &status, __WALL);
1535 if (ret == -1)
1536 {
1537 warning (_("Couldn't reap LWP %d while detaching: %s"),
6d91ce9a 1538 lwpid, safe_strerror (errno));
ced2dffb
PA
1539 }
1540 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1541 {
1542 warning (_("Reaping LWP %d while detaching "
1543 "returned unexpected status 0x%x"),
1544 lwpid, status);
1545 }
1546 }
1547 else
1548 {
1549 error (_("Can't detach %s: %s"),
61d7f128 1550 target_pid_to_str (ptid_of (thread)).c_str (),
6d91ce9a 1551 safe_strerror (save_errno));
ced2dffb
PA
1552 }
1553 }
c058728c
SM
1554 else
1555 threads_debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)",
1556 target_pid_to_str (ptid_of (thread)).c_str (),
1557 strsignal (sig));
bd99dc85
PA
1558
1559 delete_lwp (lwp);
ced2dffb
PA
1560}
1561
9061c9cf
TBA
1562int
1563linux_process_target::detach (process_info *process)
95954743 1564{
ced2dffb 1565 struct lwp_info *main_lwp;
95954743 1566
863d01bd
PA
1567 /* As there's a step over already in progress, let it finish first,
1568 otherwise nesting a stabilize_threads operation on top gets real
1569 messy. */
1570 complete_ongoing_step_over ();
1571
f9e39928 1572 /* Stop all threads before detaching. First, ptrace requires that
30baf67b 1573 the thread is stopped to successfully detach. Second, thread_db
f9e39928
PA
1574 may need to uninstall thread event breakpoints from memory, which
1575 only works with a stopped process anyway. */
7984d532 1576 stop_all_lwps (0, NULL);
f9e39928 1577
ca5c370d 1578#ifdef USE_THREAD_DB
8336d594 1579 thread_db_detach (process);
ca5c370d
PA
1580#endif
1581
fa593d66 1582 /* Stabilize threads (move out of jump pads). */
5c9eb2f2 1583 target_stabilize_threads ();
fa593d66 1584
ced2dffb
PA
1585 /* Detach from the clone lwps first. If the thread group exits just
1586 while we're detaching, we must reap the clone lwps before we're
1587 able to reap the leader. */
fd000fb3
TBA
1588 for_each_thread (process->pid, [this] (thread_info *thread)
1589 {
1590 /* We don't actually detach from the thread group leader just yet.
1591 If the thread group exits, we must reap the zombie clone lwps
1592 before we're able to reap the leader. */
1593 if (thread->id.pid () == thread->id.lwp ())
1594 return;
1595
1596 lwp_info *lwp = get_thread_lwp (thread);
1597 detach_one_lwp (lwp);
1598 });
ced2dffb 1599
ef2ddb33 1600 main_lwp = find_lwp_pid (ptid_t (process->pid));
59487af3 1601 gdb_assert (main_lwp != nullptr);
fd000fb3 1602 detach_one_lwp (main_lwp);
8336d594 1603
8adb37b9 1604 mourn (process);
f9e39928
PA
1605
1606 /* Since we presently can only stop all lwps of all processes, we
1607 need to unstop lwps of other processes. */
7984d532 1608 unstop_all_lwps (0, NULL);
f9e39928
PA
1609 return 0;
1610}
1611
1612/* Remove all LWPs that belong to process PROC from the lwp list. */
1613
8adb37b9
TBA
1614void
1615linux_process_target::mourn (process_info *process)
8336d594 1616{
8336d594
PA
1617#ifdef USE_THREAD_DB
1618 thread_db_mourn (process);
1619#endif
1620
fd000fb3 1621 for_each_thread (process->pid, [this] (thread_info *thread)
6b2a85da
SM
1622 {
1623 delete_lwp (get_thread_lwp (thread));
1624 });
f9e39928 1625
f551c8ef 1626 this->remove_linux_process (process);
8336d594
PA
1627}
1628
95a49a39
TBA
1629void
1630linux_process_target::join (int pid)
444d6139 1631{
444d6139
PA
1632 int status, ret;
1633
1634 do {
d105de22 1635 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1636 if (WIFEXITED (status) || WIFSIGNALED (status))
1637 break;
1638 } while (ret != -1 || errno != ECHILD);
1639}
1640
13d3d99b
TBA
1641/* Return true if the given thread is still alive. */
1642
1643bool
1644linux_process_target::thread_alive (ptid_t ptid)
0d62e5e8 1645{
95954743
PA
1646 struct lwp_info *lwp = find_lwp_pid (ptid);
1647
1648 /* We assume we always know if a thread exits. If a whole process
1649 exited but we still haven't been able to report it to GDB, we'll
1650 hold on to the last lwp of the dead process. */
1651 if (lwp != NULL)
00db26fa 1652 return !lwp_is_marked_dead (lwp);
0d62e5e8
DJ
1653 else
1654 return 0;
1655}
1656
df95181f
TBA
1657bool
1658linux_process_target::thread_still_has_status_pending (thread_info *thread)
582511be
PA
1659{
1660 struct lwp_info *lp = get_thread_lwp (thread);
1661
1662 if (!lp->status_pending_p)
1663 return 0;
1664
582511be 1665 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1666 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1667 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be 1668 {
582511be
PA
1669 CORE_ADDR pc;
1670 int discard = 0;
1671
1672 gdb_assert (lp->last_status != 0);
1673
1674 pc = get_pc (lp);
1675
24583e45
TBA
1676 scoped_restore_current_thread restore_thread;
1677 switch_to_thread (thread);
582511be
PA
1678
1679 if (pc != lp->stop_pc)
1680 {
c058728c
SM
1681 threads_debug_printf ("PC of %ld changed",
1682 lwpid_of (thread));
582511be
PA
1683 discard = 1;
1684 }
3e572f71
PA
1685
1686#if !USE_SIGTRAP_SIGINFO
15c66dd6 1687 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
d7146cda 1688 && !low_breakpoint_at (pc))
582511be 1689 {
c058728c
SM
1690 threads_debug_printf ("previous SW breakpoint of %ld gone",
1691 lwpid_of (thread));
582511be
PA
1692 discard = 1;
1693 }
15c66dd6 1694 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1695 && !hardware_breakpoint_inserted_here (pc))
1696 {
c058728c
SM
1697 threads_debug_printf ("previous HW breakpoint of %ld gone",
1698 lwpid_of (thread));
582511be
PA
1699 discard = 1;
1700 }
3e572f71 1701#endif
582511be 1702
582511be
PA
1703 if (discard)
1704 {
c058728c 1705 threads_debug_printf ("discarding pending breakpoint status");
582511be
PA
1706 lp->status_pending_p = 0;
1707 return 0;
1708 }
1709 }
1710
1711 return 1;
1712}
1713
a681f9c9
PA
1714/* Returns true if LWP is resumed from the client's perspective. */
1715
1716static int
1717lwp_resumed (struct lwp_info *lwp)
1718{
1719 struct thread_info *thread = get_lwp_thread (lwp);
1720
1721 if (thread->last_resume_kind != resume_stop)
1722 return 1;
1723
1724 /* Did gdb send us a `vCont;t', but we haven't reported the
1725 corresponding stop to gdb yet? If so, the thread is still
1726 resumed/running from gdb's perspective. */
1727 if (thread->last_resume_kind == resume_stop
183be222 1728 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
a681f9c9
PA
1729 return 1;
1730
1731 return 0;
1732}
1733
df95181f
TBA
1734bool
1735linux_process_target::status_pending_p_callback (thread_info *thread,
1736 ptid_t ptid)
0d62e5e8 1737{
582511be 1738 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1739
1740 /* Check if we're only interested in events from a specific process
afa8d396 1741 or a specific LWP. */
83e1b6c1 1742 if (!thread->id.matches (ptid))
95954743 1743 return 0;
0d62e5e8 1744
a681f9c9
PA
1745 if (!lwp_resumed (lp))
1746 return 0;
1747
582511be 1748 if (lp->status_pending_p
df95181f 1749 && !thread_still_has_status_pending (thread))
582511be 1750 {
df95181f 1751 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
582511be
PA
1752 return 0;
1753 }
0d62e5e8 1754
582511be 1755 return lp->status_pending_p;
0d62e5e8
DJ
1756}
1757
95954743
PA
1758struct lwp_info *
1759find_lwp_pid (ptid_t ptid)
1760{
d4895ba2
SM
1761 long lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1762 thread_info *thread = find_thread ([lwp] (thread_info *thr_arg)
454296a2 1763 {
da4ae14a 1764 return thr_arg->id.lwp () == lwp;
454296a2 1765 });
d86d4aaf
DE
1766
1767 if (thread == NULL)
1768 return NULL;
1769
9c80ecd6 1770 return get_thread_lwp (thread);
95954743
PA
1771}
1772
fa96cb38 1773/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1774
fa96cb38
PA
1775static int
1776num_lwps (int pid)
1777{
fa96cb38 1778 int count = 0;
0d62e5e8 1779
4d3bb80e
SM
1780 for_each_thread (pid, [&] (thread_info *thread)
1781 {
9c80ecd6 1782 count++;
4d3bb80e 1783 });
3aee8918 1784
fa96cb38
PA
1785 return count;
1786}
d61ddec4 1787
6d4ee8c6
GB
1788/* See nat/linux-nat.h. */
1789
1790struct lwp_info *
1791iterate_over_lwps (ptid_t filter,
d3a70e03 1792 gdb::function_view<iterate_over_lwps_ftype> callback)
6d4ee8c6 1793{
da4ae14a 1794 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
6d1e5673 1795 {
da4ae14a 1796 lwp_info *lwp = get_thread_lwp (thr_arg);
6d1e5673 1797
d3a70e03 1798 return callback (lwp);
6d1e5673 1799 });
6d4ee8c6 1800
9c80ecd6 1801 if (thread == NULL)
6d4ee8c6
GB
1802 return NULL;
1803
9c80ecd6 1804 return get_thread_lwp (thread);
6d4ee8c6
GB
1805}
1806
e8a625d1 1807bool
fd000fb3 1808linux_process_target::check_zombie_leaders ()
fa96cb38 1809{
e8a625d1
PA
1810 bool new_pending_event = false;
1811
1812 for_each_process ([&] (process_info *proc)
aa40a989
PA
1813 {
1814 pid_t leader_pid = pid_of (proc);
1815 lwp_info *leader_lp = find_lwp_pid (ptid_t (leader_pid));
1816
1817 threads_debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1818 "num_lwps=%d, zombie=%d",
1819 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1820 linux_proc_pid_is_zombie (leader_pid));
1821
1822 if (leader_lp != NULL && !leader_lp->stopped
1823 /* Check if there are other threads in the group, as we may
8a841a35
PA
1824 have raced with the inferior simply exiting. Note this
1825 isn't a watertight check. If the inferior is
1826 multi-threaded and is exiting, it may be we see the
1827 leader as zombie before we reap all the non-leader
1828 threads. See comments below. */
aa40a989
PA
1829 && !last_thread_of_process_p (leader_pid)
1830 && linux_proc_pid_is_zombie (leader_pid))
1831 {
8a841a35
PA
1832 /* A zombie leader in a multi-threaded program can mean one
1833 of three things:
1834
1835 #1 - Only the leader exited, not the whole program, e.g.,
1836 with pthread_exit. Since we can't reap the leader's exit
1837 status until all other threads are gone and reaped too,
1838 we want to delete the zombie leader right away, as it
1839 can't be debugged, we can't read its registers, etc.
1840 This is the main reason we check for zombie leaders
1841 disappearing.
1842
1843 #2 - The whole thread-group/process exited (a group exit,
1844 via e.g. exit(3), and there is (or will be shortly) an
1845 exit reported for each thread in the process, and then
1846 finally an exit for the leader once the non-leaders are
1847 reaped.
1848
1849 #3 - There are 3 or more threads in the group, and a
1850 thread other than the leader exec'd. See comments on
1851 exec events at the top of the file.
1852
1853 Ideally we would never delete the leader for case #2.
1854 Instead, we want to collect the exit status of each
1855 non-leader thread, and then finally collect the exit
1856 status of the leader as normal and use its exit code as
1857 whole-process exit code. Unfortunately, there's no
1858 race-free way to distinguish cases #1 and #2. We can't
1859 assume the exit events for the non-leaders threads are
1860 already pending in the kernel, nor can we assume the
1861 non-leader threads are in zombie state already. Between
1862 the leader becoming zombie and the non-leaders exiting
1863 and becoming zombie themselves, there's a small time
1864 window, so such a check would be racy. Temporarily
1865 pausing all threads and checking to see if all threads
1866 exit or not before re-resuming them would work in the
1867 case that all threads are running right now, but it
1868 wouldn't work if some thread is currently already
1869 ptrace-stopped, e.g., due to scheduler-locking.
1870
1871 So what we do is we delete the leader anyhow, and then
1872 later on when we see its exit status, we re-add it back.
1873 We also make sure that we only report a whole-process
1874 exit when we see the leader exiting, as opposed to when
1875 the last LWP in the LWP list exits, which can be a
1876 non-leader if we deleted the leader here. */
aa40a989 1877 threads_debug_printf ("Thread group leader %d zombie "
8a841a35
PA
1878 "(it exited, or another thread execd), "
1879 "deleting it.",
aa40a989 1880 leader_pid);
e8a625d1
PA
1881
1882 thread_info *leader_thread = get_lwp_thread (leader_lp);
1883 if (report_exit_events_for (leader_thread))
1884 {
1885 mark_lwp_dead (leader_lp, W_EXITCODE (0, 0), true);
1886 new_pending_event = true;
1887 }
1888 else
1889 delete_lwp (leader_lp);
aa40a989 1890 }
9179355e 1891 });
e8a625d1
PA
1892
1893 return new_pending_event;
fa96cb38 1894}
c3adc08c 1895
a1385b7b
SM
1896/* Callback for `find_thread'. Returns the first LWP that is not
1897 stopped. */
d50171e4 1898
a1385b7b
SM
1899static bool
1900not_stopped_callback (thread_info *thread, ptid_t filter)
fa96cb38 1901{
a1385b7b
SM
1902 if (!thread->id.matches (filter))
1903 return false;
47c0c975 1904
a1385b7b 1905 lwp_info *lwp = get_thread_lwp (thread);
fa96cb38 1906
a1385b7b 1907 return !lwp->stopped;
0d62e5e8 1908}
611cb4a5 1909
863d01bd
PA
1910/* Increment LWP's suspend count. */
1911
1912static void
1913lwp_suspended_inc (struct lwp_info *lwp)
1914{
1915 lwp->suspended++;
1916
c058728c
SM
1917 if (lwp->suspended > 4)
1918 threads_debug_printf
1919 ("LWP %ld has a suspiciously high suspend count, suspended=%d",
1920 lwpid_of (get_lwp_thread (lwp)), lwp->suspended);
863d01bd
PA
1921}
1922
1923/* Decrement LWP's suspend count. */
1924
1925static void
1926lwp_suspended_decr (struct lwp_info *lwp)
1927{
1928 lwp->suspended--;
1929
1930 if (lwp->suspended < 0)
1931 {
1932 struct thread_info *thread = get_lwp_thread (lwp);
1933
f34652de 1934 internal_error ("unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
863d01bd
PA
1935 lwp->suspended);
1936 }
1937}
1938
219f2f23
PA
1939/* This function should only be called if the LWP got a SIGTRAP.
1940
1941 Handle any tracepoint steps or hits. Return true if a tracepoint
1942 event was handled, 0 otherwise. */
1943
1944static int
1945handle_tracepoints (struct lwp_info *lwp)
1946{
1947 struct thread_info *tinfo = get_lwp_thread (lwp);
1948 int tpoint_related_event = 0;
1949
582511be
PA
1950 gdb_assert (lwp->suspended == 0);
1951
7984d532
PA
1952 /* If this tracepoint hit causes a tracing stop, we'll immediately
1953 uninsert tracepoints. To do this, we temporarily pause all
1954 threads, unpatch away, and then unpause threads. We need to make
1955 sure the unpausing doesn't resume LWP too. */
863d01bd 1956 lwp_suspended_inc (lwp);
7984d532 1957
219f2f23
PA
1958 /* And we need to be sure that any all-threads-stopping doesn't try
1959 to move threads out of the jump pads, as it could deadlock the
1960 inferior (LWP could be in the jump pad, maybe even holding the
1961 lock.) */
1962
1963 /* Do any necessary step collect actions. */
1964 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1965
fa593d66
PA
1966 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1967
219f2f23
PA
1968 /* See if we just hit a tracepoint and do its main collect
1969 actions. */
1970 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1971
863d01bd 1972 lwp_suspended_decr (lwp);
7984d532
PA
1973
1974 gdb_assert (lwp->suspended == 0);
229d26fc
SM
1975 gdb_assert (!stabilizing_threads
1976 || (lwp->collecting_fast_tracepoint
1977 != fast_tpoint_collect_result::not_collecting));
7984d532 1978
219f2f23
PA
1979 if (tpoint_related_event)
1980 {
c058728c 1981 threads_debug_printf ("got a tracepoint event");
219f2f23
PA
1982 return 1;
1983 }
1984
1985 return 0;
1986}
1987
13e567af
TBA
1988fast_tpoint_collect_result
1989linux_process_target::linux_fast_tracepoint_collecting
1990 (lwp_info *lwp, fast_tpoint_collect_status *status)
fa593d66
PA
1991{
1992 CORE_ADDR thread_area;
d86d4aaf 1993 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66 1994
fa593d66
PA
1995 /* Get the thread area address. This is used to recognize which
1996 thread is which when tracing with the in-process agent library.
1997 We don't read anything from the address, and treat it as opaque;
1998 it's the address itself that we assume is unique per-thread. */
13e567af 1999 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
229d26fc 2000 return fast_tpoint_collect_result::not_collecting;
fa593d66
PA
2001
2002 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2003}
2004
13e567af
TBA
2005int
2006linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
2007{
2008 return -1;
2009}
2010
d16f3f6c
TBA
2011bool
2012linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
fa593d66 2013{
24583e45
TBA
2014 scoped_restore_current_thread restore_thread;
2015 switch_to_thread (get_lwp_thread (lwp));
fa593d66
PA
2016
2017 if ((wstat == NULL
2018 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2019 && supports_fast_tracepoints ()
58b4daa5 2020 && agent_loaded_p ())
fa593d66
PA
2021 {
2022 struct fast_tpoint_collect_status status;
fa593d66 2023
c058728c
SM
2024 threads_debug_printf
2025 ("Checking whether LWP %ld needs to move out of the jump pad.",
2026 lwpid_of (current_thread));
fa593d66 2027
229d26fc
SM
2028 fast_tpoint_collect_result r
2029 = linux_fast_tracepoint_collecting (lwp, &status);
fa593d66
PA
2030
2031 if (wstat == NULL
2032 || (WSTOPSIG (*wstat) != SIGILL
2033 && WSTOPSIG (*wstat) != SIGFPE
2034 && WSTOPSIG (*wstat) != SIGSEGV
2035 && WSTOPSIG (*wstat) != SIGBUS))
2036 {
2037 lwp->collecting_fast_tracepoint = r;
2038
229d26fc 2039 if (r != fast_tpoint_collect_result::not_collecting)
fa593d66 2040 {
229d26fc
SM
2041 if (r == fast_tpoint_collect_result::before_insn
2042 && lwp->exit_jump_pad_bkpt == NULL)
fa593d66
PA
2043 {
2044 /* Haven't executed the original instruction yet.
2045 Set breakpoint there, and wait till it's hit,
2046 then single-step until exiting the jump pad. */
2047 lwp->exit_jump_pad_bkpt
2048 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2049 }
2050
c058728c
SM
2051 threads_debug_printf
2052 ("Checking whether LWP %ld needs to move out of the jump pad..."
2053 " it does", lwpid_of (current_thread));
fa593d66 2054
d16f3f6c 2055 return true;
fa593d66
PA
2056 }
2057 }
2058 else
2059 {
2060 /* If we get a synchronous signal while collecting, *and*
2061 while executing the (relocated) original instruction,
2062 reset the PC to point at the tpoint address, before
2063 reporting to GDB. Otherwise, it's an IPA lib bug: just
2064 report the signal to GDB, and pray for the best. */
2065
229d26fc
SM
2066 lwp->collecting_fast_tracepoint
2067 = fast_tpoint_collect_result::not_collecting;
fa593d66 2068
229d26fc 2069 if (r != fast_tpoint_collect_result::not_collecting
fa593d66
PA
2070 && (status.adjusted_insn_addr <= lwp->stop_pc
2071 && lwp->stop_pc < status.adjusted_insn_addr_end))
2072 {
2073 siginfo_t info;
2074 struct regcache *regcache;
2075
2076 /* The si_addr on a few signals references the address
2077 of the faulting instruction. Adjust that as
2078 well. */
2079 if ((WSTOPSIG (*wstat) == SIGILL
2080 || WSTOPSIG (*wstat) == SIGFPE
2081 || WSTOPSIG (*wstat) == SIGBUS
2082 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 2083 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2084 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
2085 /* Final check just to make sure we don't clobber
2086 the siginfo of non-kernel-sent signals. */
2087 && (uintptr_t) info.si_addr == lwp->stop_pc)
2088 {
2089 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 2090 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2091 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
2092 }
2093
0bfdf32f 2094 regcache = get_thread_regcache (current_thread, 1);
bf9ae9d8 2095 low_set_pc (regcache, status.tpoint_addr);
fa593d66
PA
2096 lwp->stop_pc = status.tpoint_addr;
2097
2098 /* Cancel any fast tracepoint lock this thread was
2099 holding. */
2100 force_unlock_trace_buffer ();
2101 }
2102
2103 if (lwp->exit_jump_pad_bkpt != NULL)
2104 {
c058728c
SM
2105 threads_debug_printf
2106 ("Cancelling fast exit-jump-pad: removing bkpt."
2107 "stopping all threads momentarily.");
fa593d66
PA
2108
2109 stop_all_lwps (1, lwp);
fa593d66
PA
2110
2111 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2112 lwp->exit_jump_pad_bkpt = NULL;
2113
2114 unstop_all_lwps (1, lwp);
2115
2116 gdb_assert (lwp->suspended >= 0);
2117 }
2118 }
2119 }
2120
c058728c
SM
2121 threads_debug_printf
2122 ("Checking whether LWP %ld needs to move out of the jump pad... no",
2123 lwpid_of (current_thread));
0cccb683 2124
d16f3f6c 2125 return false;
fa593d66
PA
2126}
2127
2128/* Enqueue one signal in the "signals to report later when out of the
2129 jump pad" list. */
2130
2131static void
2132enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2133{
d86d4aaf 2134 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66 2135
c058728c
SM
2136 threads_debug_printf ("Deferring signal %d for LWP %ld.",
2137 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2138
2139 if (debug_threads)
2140 {
013e3554 2141 for (const auto &sig : lwp->pending_signals_to_report)
c058728c 2142 threads_debug_printf (" Already queued %d", sig.signal);
fa593d66 2143
c058728c 2144 threads_debug_printf (" (no more currently queued signals)");
fa593d66
PA
2145 }
2146
1a981360
PA
2147 /* Don't enqueue non-RT signals if they are already in the deferred
2148 queue. (SIGSTOP being the easiest signal to see ending up here
2149 twice) */
2150 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2151 {
013e3554 2152 for (const auto &sig : lwp->pending_signals_to_report)
1a981360 2153 {
013e3554 2154 if (sig.signal == WSTOPSIG (*wstat))
1a981360 2155 {
c058728c
SM
2156 threads_debug_printf
2157 ("Not requeuing already queued non-RT signal %d for LWP %ld",
2158 sig.signal, lwpid_of (thread));
1a981360
PA
2159 return;
2160 }
2161 }
2162 }
2163
013e3554 2164 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
8d749320 2165
d86d4aaf 2166 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554 2167 &lwp->pending_signals_to_report.back ().info);
fa593d66
PA
2168}
2169
2170/* Dequeue one signal from the "signals to report later when out of
2171 the jump pad" list. */
2172
2173static int
2174dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2175{
d86d4aaf
DE
2176 struct thread_info *thread = get_lwp_thread (lwp);
2177
013e3554 2178 if (!lwp->pending_signals_to_report.empty ())
fa593d66 2179 {
013e3554 2180 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
fa593d66 2181
013e3554
TBA
2182 *wstat = W_STOPCODE (p_sig.signal);
2183 if (p_sig.info.si_signo != 0)
d86d4aaf 2184 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554
TBA
2185 &p_sig.info);
2186
2187 lwp->pending_signals_to_report.pop_front ();
fa593d66 2188
c058728c
SM
2189 threads_debug_printf ("Reporting deferred signal %d for LWP %ld.",
2190 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2191
2192 if (debug_threads)
2193 {
013e3554 2194 for (const auto &sig : lwp->pending_signals_to_report)
c058728c 2195 threads_debug_printf (" Still queued %d", sig.signal);
fa593d66 2196
c058728c 2197 threads_debug_printf (" (no more queued signals)");
fa593d66
PA
2198 }
2199
2200 return 1;
2201 }
2202
2203 return 0;
2204}
2205
ac1bbaca
TBA
2206bool
2207linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
582511be 2208{
24583e45
TBA
2209 scoped_restore_current_thread restore_thread;
2210 switch_to_thread (get_lwp_thread (child));
d50171e4 2211
ac1bbaca
TBA
2212 if (low_stopped_by_watchpoint ())
2213 {
2214 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2215 child->stopped_data_address = low_stopped_data_address ();
2216 }
582511be 2217
ac1bbaca
TBA
2218 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2219}
d50171e4 2220
ac1bbaca
TBA
2221bool
2222linux_process_target::low_stopped_by_watchpoint ()
2223{
2224 return false;
2225}
d50171e4 2226
ac1bbaca
TBA
2227CORE_ADDR
2228linux_process_target::low_stopped_data_address ()
2229{
2230 return 0;
c4d9ceb6
YQ
2231}
2232
de0d863e
DB
2233/* Return the ptrace options that we want to try to enable. */
2234
2235static int
2236linux_low_ptrace_options (int attached)
2237{
c12a5089 2238 client_state &cs = get_client_state ();
de0d863e
DB
2239 int options = 0;
2240
2241 if (!attached)
2242 options |= PTRACE_O_EXITKILL;
2243
c12a5089 2244 if (cs.report_fork_events)
de0d863e
DB
2245 options |= PTRACE_O_TRACEFORK;
2246
c12a5089 2247 if (cs.report_vfork_events)
c269dbdb
DB
2248 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2249
c12a5089 2250 if (cs.report_exec_events)
94585166
DB
2251 options |= PTRACE_O_TRACEEXEC;
2252
82075af2
JS
2253 options |= PTRACE_O_TRACESYSGOOD;
2254
de0d863e
DB
2255 return options;
2256}
2257
1a48f002 2258void
d16f3f6c 2259linux_process_target::filter_event (int lwpid, int wstat)
fa96cb38
PA
2260{
2261 struct lwp_info *child;
2262 struct thread_info *thread;
582511be 2263 int have_stop_pc = 0;
fa96cb38 2264
f2907e49 2265 child = find_lwp_pid (ptid_t (lwpid));
fa96cb38 2266
5406bc3f
PA
2267 /* Check for events reported by anything not in our LWP list. */
2268 if (child == nullptr)
94585166 2269 {
5406bc3f
PA
2270 if (WIFSTOPPED (wstat))
2271 {
2272 if (WSTOPSIG (wstat) == SIGTRAP
2273 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2274 {
2275 /* A non-leader thread exec'ed after we've seen the
2276 leader zombie, and removed it from our lists (in
2277 check_zombie_leaders). The non-leader thread changes
2278 its tid to the tgid. */
2279 threads_debug_printf
2280 ("Re-adding thread group leader LWP %d after exec.",
2281 lwpid);
94585166 2282
5406bc3f
PA
2283 child = add_lwp (ptid_t (lwpid, lwpid));
2284 child->stopped = 1;
2285 switch_to_thread (child->thread);
2286 }
2287 else
2288 {
2289 /* A process we are controlling has forked and the new
2290 child's stop was reported to us by the kernel. Save
2291 its PID and go back to waiting for the fork event to
2292 be reported - the stopped process might be returned
2293 from waitpid before or after the fork event is. */
2294 threads_debug_printf
2295 ("Saving LWP %d status %s in stopped_pids list",
2296 lwpid, status_to_str (wstat).c_str ());
2297 add_to_pid_list (&stopped_pids, lwpid, wstat);
2298 }
2299 }
2300 else
2301 {
2302 /* Don't report an event for the exit of an LWP not in our
2303 list, i.e. not part of any inferior we're debugging.
2304 This can happen if we detach from a program we originally
8a841a35
PA
2305 forked and then it exits. However, note that we may have
2306 earlier deleted a leader of an inferior we're debugging,
2307 in check_zombie_leaders. Re-add it back here if so. */
2308 find_process ([&] (process_info *proc)
2309 {
2310 if (proc->pid == lwpid)
2311 {
2312 threads_debug_printf
2313 ("Re-adding thread group leader LWP %d after exit.",
2314 lwpid);
2315
2316 child = add_lwp (ptid_t (lwpid, lwpid));
2317 return true;
2318 }
2319 return false;
2320 });
5406bc3f 2321 }
94585166 2322
5406bc3f
PA
2323 if (child == nullptr)
2324 return;
fa96cb38 2325 }
fa96cb38
PA
2326
2327 thread = get_lwp_thread (child);
2328
2329 child->stopped = 1;
2330
2331 child->last_status = wstat;
2332
582511be
PA
2333 /* Check if the thread has exited. */
2334 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2335 {
c058728c 2336 threads_debug_printf ("%d exited", lwpid);
f50bf8e5
YQ
2337
2338 if (finish_step_over (child))
2339 {
2340 /* Unsuspend all other LWPs, and set them back running again. */
2341 unsuspend_all_lwps (child);
2342 }
2343
8a841a35
PA
2344 /* If this is not the leader LWP, then the exit signal was not
2345 the end of the debugged application and should be ignored,
2346 unless GDB wants to hear about thread exits. */
48989498 2347 if (report_exit_events_for (thread) || is_leader (thread))
582511be 2348 {
65706a29
PA
2349 /* Since events are serialized to GDB core, and we can't
2350 report this one right now. Leave the status pending for
2351 the next time we're able to report it. */
e8a625d1 2352 mark_lwp_dead (child, wstat, false);
1a48f002 2353 return;
582511be
PA
2354 }
2355 else
2356 {
65706a29 2357 delete_lwp (child);
1a48f002 2358 return;
582511be
PA
2359 }
2360 }
2361
2362 gdb_assert (WIFSTOPPED (wstat));
2363
fa96cb38
PA
2364 if (WIFSTOPPED (wstat))
2365 {
2366 struct process_info *proc;
2367
c06cbd92 2368 /* Architecture-specific setup after inferior is running. */
fa96cb38 2369 proc = find_process_pid (pid_of (thread));
c06cbd92 2370 if (proc->tdesc == NULL)
fa96cb38 2371 {
c06cbd92
YQ
2372 if (proc->attached)
2373 {
c06cbd92
YQ
2374 /* This needs to happen after we have attached to the
2375 inferior and it is stopped for the first time, but
2376 before we access any inferior registers. */
797bcff5 2377 arch_setup_thread (thread);
c06cbd92
YQ
2378 }
2379 else
2380 {
2381 /* The process is started, but GDBserver will do
2382 architecture-specific setup after the program stops at
2383 the first instruction. */
2384 child->status_pending_p = 1;
2385 child->status_pending = wstat;
1a48f002 2386 return;
c06cbd92 2387 }
fa96cb38
PA
2388 }
2389 }
2390
fa96cb38
PA
2391 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2392 {
beed38b8 2393 struct process_info *proc = find_process_pid (pid_of (thread));
de0d863e 2394 int options = linux_low_ptrace_options (proc->attached);
beed38b8 2395
de0d863e 2396 linux_enable_event_reporting (lwpid, options);
fa96cb38
PA
2397 child->must_set_ptrace_flags = 0;
2398 }
2399
82075af2
JS
2400 /* Always update syscall_state, even if it will be filtered later. */
2401 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2402 {
2403 child->syscall_state
2404 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2405 ? TARGET_WAITKIND_SYSCALL_RETURN
2406 : TARGET_WAITKIND_SYSCALL_ENTRY);
2407 }
2408 else
2409 {
2410 /* Almost all other ptrace-stops are known to be outside of system
2411 calls, with further exceptions in handle_extended_wait. */
2412 child->syscall_state = TARGET_WAITKIND_IGNORE;
2413 }
2414
e7ad2f14
PA
2415 /* Be careful to not overwrite stop_pc until save_stop_reason is
2416 called. */
fa96cb38 2417 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2418 && linux_is_extended_waitstatus (wstat))
fa96cb38 2419 {
582511be 2420 child->stop_pc = get_pc (child);
94585166 2421 if (handle_extended_wait (&child, wstat))
de0d863e
DB
2422 {
2423 /* The event has been handled, so just return without
2424 reporting it. */
1a48f002 2425 return;
de0d863e 2426 }
fa96cb38
PA
2427 }
2428
80aea927 2429 if (linux_wstatus_maybe_breakpoint (wstat))
582511be 2430 {
e7ad2f14 2431 if (save_stop_reason (child))
582511be
PA
2432 have_stop_pc = 1;
2433 }
2434
2435 if (!have_stop_pc)
2436 child->stop_pc = get_pc (child);
2437
fa96cb38
PA
2438 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2439 && child->stop_expected)
2440 {
c058728c
SM
2441 threads_debug_printf ("Expected stop.");
2442
fa96cb38
PA
2443 child->stop_expected = 0;
2444
2445 if (thread->last_resume_kind == resume_stop)
2446 {
2447 /* We want to report the stop to the core. Treat the
2448 SIGSTOP as a normal event. */
c058728c
SM
2449 threads_debug_printf ("resume_stop SIGSTOP caught for %s.",
2450 target_pid_to_str (ptid_of (thread)).c_str ());
fa96cb38
PA
2451 }
2452 else if (stopping_threads != NOT_STOPPING_THREADS)
2453 {
2454 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2455 pending. */
c058728c
SM
2456 threads_debug_printf ("SIGSTOP caught for %s while stopping threads.",
2457 target_pid_to_str (ptid_of (thread)).c_str ());
1a48f002 2458 return;
fa96cb38
PA
2459 }
2460 else
2461 {
2bf6fb9d 2462 /* This is a delayed SIGSTOP. Filter out the event. */
c058728c 2463 threads_debug_printf ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2bf6fb9d 2464 child->stepping ? "step" : "continue",
61d7f128 2465 target_pid_to_str (ptid_of (thread)).c_str ());
2bf6fb9d 2466
df95181f 2467 resume_one_lwp (child, child->stepping, 0, NULL);
1a48f002 2468 return;
fa96cb38
PA
2469 }
2470 }
2471
582511be
PA
2472 child->status_pending_p = 1;
2473 child->status_pending = wstat;
1a48f002 2474 return;
fa96cb38
PA
2475}
2476
b31cdfa6
TBA
2477bool
2478linux_process_target::maybe_hw_step (thread_info *thread)
f79b145d 2479{
b31cdfa6
TBA
2480 if (supports_hardware_single_step ())
2481 return true;
f79b145d
YQ
2482 else
2483 {
3b9a79ef 2484 /* GDBserver must insert single-step breakpoint for software
f79b145d 2485 single step. */
3b9a79ef 2486 gdb_assert (has_single_step_breakpoints (thread));
b31cdfa6 2487 return false;
f79b145d
YQ
2488 }
2489}
2490
df95181f
TBA
2491void
2492linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
20ba1ce6 2493{
20ba1ce6
PA
2494 struct lwp_info *lp = get_thread_lwp (thread);
2495
2496 if (lp->stopped
863d01bd 2497 && !lp->suspended
20ba1ce6 2498 && !lp->status_pending_p
183be222 2499 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
20ba1ce6 2500 {
8901d193
YQ
2501 int step = 0;
2502
2503 if (thread->last_resume_kind == resume_step)
b6d8d612
KB
2504 {
2505 if (supports_software_single_step ())
2506 install_software_single_step_breakpoints (lp);
2507
2508 step = maybe_hw_step (thread);
2509 }
20ba1ce6 2510
c058728c
SM
2511 threads_debug_printf ("resuming stopped-resumed LWP %s at %s: step=%d",
2512 target_pid_to_str (ptid_of (thread)).c_str (),
2513 paddress (lp->stop_pc), step);
20ba1ce6 2514
df95181f 2515 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
20ba1ce6
PA
2516 }
2517}
2518
d16f3f6c
TBA
2519int
2520linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2521 ptid_t filter_ptid,
2522 int *wstatp, int options)
0d62e5e8 2523{
d86d4aaf 2524 struct thread_info *event_thread;
d50171e4 2525 struct lwp_info *event_child, *requested_child;
fa96cb38 2526 sigset_t block_mask, prev_mask;
d50171e4 2527
fa96cb38 2528 retry:
d86d4aaf
DE
2529 /* N.B. event_thread points to the thread_info struct that contains
2530 event_child. Keep them in sync. */
2531 event_thread = NULL;
d50171e4
PA
2532 event_child = NULL;
2533 requested_child = NULL;
0d62e5e8 2534
95954743 2535 /* Check for a lwp with a pending status. */
bd99dc85 2536
d7e15655 2537 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
0d62e5e8 2538 {
83e1b6c1
SM
2539 event_thread = find_thread_in_random ([&] (thread_info *thread)
2540 {
2541 return status_pending_p_callback (thread, filter_ptid);
2542 });
2543
d86d4aaf 2544 if (event_thread != NULL)
c058728c
SM
2545 {
2546 event_child = get_thread_lwp (event_thread);
2547 threads_debug_printf ("Got a pending child %ld", lwpid_of (event_thread));
2548 }
0d62e5e8 2549 }
d7e15655 2550 else if (filter_ptid != null_ptid)
0d62e5e8 2551 {
fa96cb38 2552 requested_child = find_lwp_pid (filter_ptid);
59487af3 2553 gdb_assert (requested_child != nullptr);
d50171e4 2554
bde24c0a 2555 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66 2556 && requested_child->status_pending_p
229d26fc
SM
2557 && (requested_child->collecting_fast_tracepoint
2558 != fast_tpoint_collect_result::not_collecting))
fa593d66
PA
2559 {
2560 enqueue_one_deferred_signal (requested_child,
2561 &requested_child->status_pending);
2562 requested_child->status_pending_p = 0;
2563 requested_child->status_pending = 0;
df95181f 2564 resume_one_lwp (requested_child, 0, 0, NULL);
fa593d66
PA
2565 }
2566
2567 if (requested_child->suspended
2568 && requested_child->status_pending_p)
38e08fca 2569 {
f34652de 2570 internal_error ("requesting an event out of a"
38e08fca
GB
2571 " suspended child?");
2572 }
fa593d66 2573
d50171e4 2574 if (requested_child->status_pending_p)
d86d4aaf
DE
2575 {
2576 event_child = requested_child;
2577 event_thread = get_lwp_thread (event_child);
2578 }
0d62e5e8 2579 }
611cb4a5 2580
0d62e5e8
DJ
2581 if (event_child != NULL)
2582 {
c058728c
SM
2583 threads_debug_printf ("Got an event from pending child %ld (%04x)",
2584 lwpid_of (event_thread),
2585 event_child->status_pending);
2586
fa96cb38 2587 *wstatp = event_child->status_pending;
bd99dc85
PA
2588 event_child->status_pending_p = 0;
2589 event_child->status_pending = 0;
24583e45 2590 switch_to_thread (event_thread);
d86d4aaf 2591 return lwpid_of (event_thread);
0d62e5e8
DJ
2592 }
2593
fa96cb38
PA
2594 /* But if we don't find a pending event, we'll have to wait.
2595
2596 We only enter this loop if no process has a pending wait status.
2597 Thus any action taken in response to a wait status inside this
2598 loop is responding as soon as we detect the status, not after any
2599 pending events. */
d8301ad1 2600
fa96cb38
PA
2601 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2602 all signals while here. */
2603 sigfillset (&block_mask);
21987b9c 2604 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
fa96cb38 2605
582511be
PA
2606 /* Always pull all events out of the kernel. We'll randomly select
2607 an event LWP out of all that have events, to prevent
2608 starvation. */
fa96cb38 2609 while (event_child == NULL)
0d62e5e8 2610 {
fa96cb38 2611 pid_t ret = 0;
0d62e5e8 2612
fa96cb38
PA
2613 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2614 quirks:
0d62e5e8 2615
fa96cb38
PA
2616 - If the thread group leader exits while other threads in the
2617 thread group still exist, waitpid(TGID, ...) hangs. That
2618 waitpid won't return an exit status until the other threads
2619 in the group are reaped.
611cb4a5 2620
fa96cb38
PA
2621 - When a non-leader thread execs, that thread just vanishes
2622 without reporting an exit (so we'd hang if we waited for it
2623 explicitly in that case). The exec event is reported to
94585166 2624 the TGID pid. */
fa96cb38
PA
2625 errno = 0;
2626 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2627
c058728c
SM
2628 threads_debug_printf ("waitpid(-1, ...) returned %d, %s",
2629 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
0d62e5e8 2630
fa96cb38 2631 if (ret > 0)
0d62e5e8 2632 {
c058728c
SM
2633 threads_debug_printf ("waitpid %ld received %s",
2634 (long) ret, status_to_str (*wstatp).c_str ());
89be2091 2635
582511be
PA
2636 /* Filter all events. IOW, leave all events pending. We'll
2637 randomly select an event LWP out of all that have events
2638 below. */
d16f3f6c 2639 filter_event (ret, *wstatp);
fa96cb38
PA
2640 /* Retry until nothing comes out of waitpid. A single
2641 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2642 continue;
2643 }
2644
20ba1ce6
PA
2645 /* Now that we've pulled all events out of the kernel, resume
2646 LWPs that don't have an interesting event to report. */
2647 if (stopping_threads == NOT_STOPPING_THREADS)
df95181f
TBA
2648 for_each_thread ([this] (thread_info *thread)
2649 {
2650 resume_stopped_resumed_lwps (thread);
2651 });
20ba1ce6
PA
2652
2653 /* ... and find an LWP with a status to report to the core, if
2654 any. */
83e1b6c1
SM
2655 event_thread = find_thread_in_random ([&] (thread_info *thread)
2656 {
2657 return status_pending_p_callback (thread, filter_ptid);
2658 });
2659
582511be
PA
2660 if (event_thread != NULL)
2661 {
2662 event_child = get_thread_lwp (event_thread);
2663 *wstatp = event_child->status_pending;
2664 event_child->status_pending_p = 0;
2665 event_child->status_pending = 0;
2666 break;
2667 }
2668
fa96cb38
PA
2669 /* Check for zombie thread group leaders. Those can't be reaped
2670 until all other threads in the thread group are. */
e8a625d1
PA
2671 if (check_zombie_leaders ())
2672 goto retry;
fa96cb38 2673
a1385b7b
SM
2674 auto not_stopped = [&] (thread_info *thread)
2675 {
2676 return not_stopped_callback (thread, wait_ptid);
2677 };
2678
fa96cb38
PA
2679 /* If there are no resumed children left in the set of LWPs we
2680 want to wait for, bail. We can't just block in
2681 waitpid/sigsuspend, because lwps might have been left stopped
2682 in trace-stop state, and we'd be stuck forever waiting for
2683 their status to change (which would only happen if we resumed
2684 them). Even if WNOHANG is set, this return code is preferred
2685 over 0 (below), as it is more detailed. */
a1385b7b 2686 if (find_thread (not_stopped) == NULL)
a6dbe5df 2687 {
c058728c
SM
2688 threads_debug_printf ("exit (no unwaited-for LWP)");
2689
21987b9c 2690 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2691 return -1;
a6dbe5df
PA
2692 }
2693
fa96cb38
PA
2694 /* No interesting event to report to the caller. */
2695 if ((options & WNOHANG))
24a09b5f 2696 {
c058728c 2697 threads_debug_printf ("WNOHANG set, no event found");
fa96cb38 2698
21987b9c 2699 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2700 return 0;
24a09b5f
DJ
2701 }
2702
fa96cb38 2703 /* Block until we get an event reported with SIGCHLD. */
c058728c 2704 threads_debug_printf ("sigsuspend'ing");
d50171e4 2705
fa96cb38 2706 sigsuspend (&prev_mask);
21987b9c 2707 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38
PA
2708 goto retry;
2709 }
d50171e4 2710
21987b9c 2711 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2712
24583e45 2713 switch_to_thread (event_thread);
d50171e4 2714
fa96cb38
PA
2715 return lwpid_of (event_thread);
2716}
2717
d16f3f6c
TBA
2718int
2719linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
fa96cb38 2720{
d16f3f6c 2721 return wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2722}
2723
6bf5e0ba
PA
2724/* Select one LWP out of those that have events pending. */
2725
2726static void
2727select_event_lwp (struct lwp_info **orig_lp)
2728{
582511be
PA
2729 struct thread_info *event_thread = NULL;
2730
2731 /* In all-stop, give preference to the LWP that is being
2732 single-stepped. There will be at most one, and it's the LWP that
2733 the core is most interested in. If we didn't do this, then we'd
2734 have to handle pending step SIGTRAPs somehow in case the core
2735 later continues the previously-stepped thread, otherwise we'd
2736 report the pending SIGTRAP, and the core, not having stepped the
2737 thread, wouldn't understand what the trap was for, and therefore
2738 would report it to the user as a random signal. */
2739 if (!non_stop)
6bf5e0ba 2740 {
39a64da5
SM
2741 event_thread = find_thread ([] (thread_info *thread)
2742 {
2743 lwp_info *lp = get_thread_lwp (thread);
2744
183be222 2745 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
39a64da5
SM
2746 && thread->last_resume_kind == resume_step
2747 && lp->status_pending_p);
2748 });
2749
582511be 2750 if (event_thread != NULL)
c058728c
SM
2751 threads_debug_printf
2752 ("Select single-step %s",
2753 target_pid_to_str (ptid_of (event_thread)).c_str ());
6bf5e0ba 2754 }
582511be 2755 if (event_thread == NULL)
6bf5e0ba
PA
2756 {
2757 /* No single-stepping LWP. Select one at random, out of those
dda83cd7 2758 which have had events. */
6bf5e0ba 2759
b0319eaa 2760 event_thread = find_thread_in_random ([&] (thread_info *thread)
39a64da5
SM
2761 {
2762 lwp_info *lp = get_thread_lwp (thread);
2763
b0319eaa 2764 /* Only resumed LWPs that have an event pending. */
183be222 2765 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
b0319eaa 2766 && lp->status_pending_p);
39a64da5 2767 });
6bf5e0ba
PA
2768 }
2769
d86d4aaf 2770 if (event_thread != NULL)
6bf5e0ba 2771 {
d86d4aaf
DE
2772 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2773
6bf5e0ba
PA
2774 /* Switch the event LWP. */
2775 *orig_lp = event_lp;
2776 }
2777}
2778
7984d532
PA
2779/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2780 NULL. */
2781
2782static void
2783unsuspend_all_lwps (struct lwp_info *except)
2784{
139720c5
SM
2785 for_each_thread ([&] (thread_info *thread)
2786 {
2787 lwp_info *lwp = get_thread_lwp (thread);
2788
2789 if (lwp != except)
2790 lwp_suspended_decr (lwp);
2791 });
7984d532
PA
2792}
2793
5a6b0a41 2794static bool lwp_running (thread_info *thread);
fa593d66
PA
2795
2796/* Stabilize threads (move out of jump pads).
2797
2798 If a thread is midway collecting a fast tracepoint, we need to
2799 finish the collection and move it out of the jump pad before
2800 reporting the signal.
2801
2802 This avoids recursion while collecting (when a signal arrives
2803 midway, and the signal handler itself collects), which would trash
2804 the trace buffer. In case the user set a breakpoint in a signal
2805 handler, this avoids the backtrace showing the jump pad, etc..
2806 Most importantly, there are certain things we can't do safely if
2807 threads are stopped in a jump pad (or in its callee's). For
2808 example:
2809
2810 - starting a new trace run. A thread still collecting the
2811 previous run, could trash the trace buffer when resumed. The trace
2812 buffer control structures would have been reset but the thread had
2813 no way to tell. The thread could even midway memcpy'ing to the
2814 buffer, which would mean that when resumed, it would clobber the
2815 trace buffer that had been set for a new run.
2816
2817 - we can't rewrite/reuse the jump pads for new tracepoints
2818 safely. Say you do tstart while a thread is stopped midway while
2819 collecting. When the thread is later resumed, it finishes the
2820 collection, and returns to the jump pad, to execute the original
2821 instruction that was under the tracepoint jump at the time the
2822 older run had been started. If the jump pad had been rewritten
2823 since for something else in the new run, the thread would now
2824 execute the wrong / random instructions. */
2825
5c9eb2f2
TBA
2826void
2827linux_process_target::stabilize_threads ()
fa593d66 2828{
13e567af
TBA
2829 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2830 {
2831 return stuck_in_jump_pad (thread);
2832 });
fa593d66 2833
d86d4aaf 2834 if (thread_stuck != NULL)
fa593d66 2835 {
c058728c
SM
2836 threads_debug_printf ("can't stabilize, LWP %ld is stuck in jump pad",
2837 lwpid_of (thread_stuck));
fa593d66
PA
2838 return;
2839 }
2840
24583e45 2841 scoped_restore_current_thread restore_thread;
fa593d66
PA
2842
2843 stabilizing_threads = 1;
2844
2845 /* Kick 'em all. */
d16f3f6c
TBA
2846 for_each_thread ([this] (thread_info *thread)
2847 {
2848 move_out_of_jump_pad (thread);
2849 });
fa593d66
PA
2850
2851 /* Loop until all are stopped out of the jump pads. */
5a6b0a41 2852 while (find_thread (lwp_running) != NULL)
fa593d66
PA
2853 {
2854 struct target_waitstatus ourstatus;
2855 struct lwp_info *lwp;
fa593d66
PA
2856 int wstat;
2857
2858 /* Note that we go through the full wait even loop. While
2859 moving threads out of jump pad, we need to be able to step
2860 over internal breakpoints and such. */
d16f3f6c 2861 wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66 2862
183be222 2863 if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
fa593d66 2864 {
0bfdf32f 2865 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2866
2867 /* Lock it. */
863d01bd 2868 lwp_suspended_inc (lwp);
fa593d66 2869
183be222 2870 if (ourstatus.sig () != GDB_SIGNAL_0
0bfdf32f 2871 || current_thread->last_resume_kind == resume_stop)
fa593d66 2872 {
183be222 2873 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
fa593d66
PA
2874 enqueue_one_deferred_signal (lwp, &wstat);
2875 }
2876 }
2877 }
2878
fcdad592 2879 unsuspend_all_lwps (NULL);
fa593d66
PA
2880
2881 stabilizing_threads = 0;
2882
b4d51a55 2883 if (debug_threads)
fa593d66 2884 {
13e567af
TBA
2885 thread_stuck = find_thread ([this] (thread_info *thread)
2886 {
2887 return stuck_in_jump_pad (thread);
2888 });
fcb056a5 2889
d86d4aaf 2890 if (thread_stuck != NULL)
c058728c
SM
2891 threads_debug_printf
2892 ("couldn't stabilize, LWP %ld got stuck in jump pad",
2893 lwpid_of (thread_stuck));
fa593d66
PA
2894 }
2895}
2896
582511be
PA
2897/* Convenience function that is called when the kernel reports an
2898 event that is not passed out to GDB. */
2899
2900static ptid_t
2901ignore_event (struct target_waitstatus *ourstatus)
2902{
2903 /* If we got an event, there may still be others, as a single
2904 SIGCHLD can indicate more than one child stopped. This forces
2905 another target_wait call. */
2906 async_file_mark ();
2907
183be222 2908 ourstatus->set_ignore ();
582511be
PA
2909 return null_ptid;
2910}
2911
fd000fb3
TBA
2912ptid_t
2913linux_process_target::filter_exit_event (lwp_info *event_child,
2914 target_waitstatus *ourstatus)
65706a29
PA
2915{
2916 struct thread_info *thread = get_lwp_thread (event_child);
2917 ptid_t ptid = ptid_of (thread);
2918
e8a625d1
PA
2919 if (ourstatus->kind () == TARGET_WAITKIND_THREAD_EXITED)
2920 {
2921 /* We're reporting a thread exit for the leader. The exit was
2922 detected by check_zombie_leaders. */
2923 gdb_assert (is_leader (thread));
2924 gdb_assert (report_exit_events_for (thread));
2925
2926 delete_lwp (event_child);
2927 return ptid;
2928 }
2929
48989498
PA
2930 /* Note we must filter TARGET_WAITKIND_SIGNALLED as well, otherwise
2931 if a non-leader thread exits with a signal, we'd report it to the
2932 core which would interpret it as the whole-process exiting.
2933 There is no TARGET_WAITKIND_THREAD_SIGNALLED event kind. */
2934 if (ourstatus->kind () != TARGET_WAITKIND_EXITED
2935 && ourstatus->kind () != TARGET_WAITKIND_SIGNALLED)
2936 return ptid;
2937
8a841a35 2938 if (!is_leader (thread))
65706a29 2939 {
48989498 2940 if (report_exit_events_for (thread))
183be222 2941 ourstatus->set_thread_exited (0);
65706a29 2942 else
183be222 2943 ourstatus->set_ignore ();
65706a29
PA
2944
2945 delete_lwp (event_child);
2946 }
2947 return ptid;
2948}
2949
82075af2
JS
2950/* Returns 1 if GDB is interested in any event_child syscalls. */
2951
2952static int
2953gdb_catching_syscalls_p (struct lwp_info *event_child)
2954{
2955 struct thread_info *thread = get_lwp_thread (event_child);
2956 struct process_info *proc = get_thread_process (thread);
2957
f27866ba 2958 return !proc->syscalls_to_catch.empty ();
82075af2
JS
2959}
2960
9eedd27d
TBA
2961bool
2962linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
82075af2 2963{
4cc32bec 2964 int sysno;
82075af2
JS
2965 struct thread_info *thread = get_lwp_thread (event_child);
2966 struct process_info *proc = get_thread_process (thread);
2967
f27866ba 2968 if (proc->syscalls_to_catch.empty ())
9eedd27d 2969 return false;
82075af2 2970
f27866ba 2971 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
9eedd27d 2972 return true;
82075af2 2973
4cc32bec 2974 get_syscall_trapinfo (event_child, &sysno);
f27866ba
SM
2975
2976 for (int iter : proc->syscalls_to_catch)
82075af2 2977 if (iter == sysno)
9eedd27d 2978 return true;
82075af2 2979
9eedd27d 2980 return false;
82075af2
JS
2981}
2982
d16f3f6c
TBA
2983ptid_t
2984linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
b60cea74 2985 target_wait_flags target_options)
da6d8c04 2986{
c058728c
SM
2987 THREADS_SCOPED_DEBUG_ENTER_EXIT;
2988
c12a5089 2989 client_state &cs = get_client_state ();
e5f1222d 2990 int w;
fc7238bb 2991 struct lwp_info *event_child;
bd99dc85 2992 int options;
bd99dc85 2993 int pid;
6bf5e0ba
PA
2994 int step_over_finished;
2995 int bp_explains_trap;
2996 int maybe_internal_trap;
2997 int report_to_gdb;
219f2f23 2998 int trace_event;
c2d6af84 2999 int in_step_range;
bd99dc85 3000
c058728c 3001 threads_debug_printf ("[%s]", target_pid_to_str (ptid).c_str ());
87ce2a04 3002
bd99dc85
PA
3003 /* Translate generic target options into linux options. */
3004 options = __WALL;
3005 if (target_options & TARGET_WNOHANG)
3006 options |= WNOHANG;
0d62e5e8 3007
fa593d66
PA
3008 bp_explains_trap = 0;
3009 trace_event = 0;
c2d6af84 3010 in_step_range = 0;
183be222 3011 ourstatus->set_ignore ();
bd99dc85 3012
ef980d65 3013 bool was_any_resumed = any_resumed ();
f2faf941 3014
d7e15655 3015 if (step_over_bkpt == null_ptid)
d16f3f6c 3016 pid = wait_for_event (ptid, &w, options);
6bf5e0ba
PA
3017 else
3018 {
c058728c
SM
3019 threads_debug_printf ("step_over_bkpt set [%s], doing a blocking wait",
3020 target_pid_to_str (step_over_bkpt).c_str ());
d16f3f6c 3021 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
6bf5e0ba
PA
3022 }
3023
ef980d65 3024 if (pid == 0 || (pid == -1 && !was_any_resumed))
87ce2a04 3025 {
fa96cb38
PA
3026 gdb_assert (target_options & TARGET_WNOHANG);
3027
c058728c 3028 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_IGNORE");
fa96cb38 3029
183be222 3030 ourstatus->set_ignore ();
87ce2a04
DE
3031 return null_ptid;
3032 }
fa96cb38
PA
3033 else if (pid == -1)
3034 {
c058728c 3035 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_NO_RESUMED");
bd99dc85 3036
183be222 3037 ourstatus->set_no_resumed ();
fa96cb38
PA
3038 return null_ptid;
3039 }
0d62e5e8 3040
0bfdf32f 3041 event_child = get_thread_lwp (current_thread);
0d62e5e8 3042
d16f3f6c 3043 /* wait_for_event only returns an exit status for the last
fa96cb38
PA
3044 child of a process. Report it. */
3045 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 3046 {
fa96cb38 3047 if (WIFEXITED (w))
0d62e5e8 3048 {
e8a625d1
PA
3049 /* If we already have the exit recorded in waitstatus, use
3050 it. This will happen when we detect a zombie leader,
3051 when we had GDB_THREAD_OPTION_EXIT enabled for it. We
3052 want to report its exit as TARGET_WAITKIND_THREAD_EXITED,
3053 as the whole process hasn't exited yet. */
3054 const target_waitstatus &ws = event_child->waitstatus;
3055 if (ws.kind () != TARGET_WAITKIND_IGNORE)
3056 {
3057 gdb_assert (ws.kind () == TARGET_WAITKIND_EXITED
3058 || ws.kind () == TARGET_WAITKIND_THREAD_EXITED);
3059 *ourstatus = ws;
3060 }
3061 else
3062 ourstatus->set_exited (WEXITSTATUS (w));
bd99dc85 3063
c058728c
SM
3064 threads_debug_printf
3065 ("ret = %s, exited with retcode %d",
3066 target_pid_to_str (ptid_of (current_thread)).c_str (),
3067 WEXITSTATUS (w));
fa96cb38
PA
3068 }
3069 else
3070 {
183be222 3071 ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
5b1c542e 3072
c058728c
SM
3073 threads_debug_printf
3074 ("ret = %s, terminated with signal %d",
3075 target_pid_to_str (ptid_of (current_thread)).c_str (),
3076 WTERMSIG (w));
0d62e5e8 3077 }
fa96cb38 3078
48989498 3079 return filter_exit_event (event_child, ourstatus);
da6d8c04
DJ
3080 }
3081
2d97cd35
AT
3082 /* If step-over executes a breakpoint instruction, in the case of a
3083 hardware single step it means a gdb/gdbserver breakpoint had been
3084 planted on top of a permanent breakpoint, in the case of a software
3085 single step it may just mean that gdbserver hit the reinsert breakpoint.
e7ad2f14 3086 The PC has been adjusted by save_stop_reason to point at
2d97cd35
AT
3087 the breakpoint address.
3088 So in the case of the hardware single step advance the PC manually
3089 past the breakpoint and in the case of software single step advance only
3b9a79ef 3090 if it's not the single_step_breakpoint we are hitting.
2d97cd35
AT
3091 This avoids that a program would keep trapping a permanent breakpoint
3092 forever. */
d7e15655 3093 if (step_over_bkpt != null_ptid
2d97cd35
AT
3094 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3095 && (event_child->stepping
3b9a79ef 3096 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
8090aef2 3097 {
dd373349
AT
3098 int increment_pc = 0;
3099 int breakpoint_kind = 0;
3100 CORE_ADDR stop_pc = event_child->stop_pc;
3101
d16f3f6c
TBA
3102 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3103 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
8090aef2 3104
c058728c
SM
3105 threads_debug_printf
3106 ("step-over for %s executed software breakpoint",
3107 target_pid_to_str (ptid_of (current_thread)).c_str ());
8090aef2
PA
3108
3109 if (increment_pc != 0)
3110 {
3111 struct regcache *regcache
3112 = get_thread_regcache (current_thread, 1);
3113
3114 event_child->stop_pc += increment_pc;
bf9ae9d8 3115 low_set_pc (regcache, event_child->stop_pc);
8090aef2 3116
d7146cda 3117 if (!low_breakpoint_at (event_child->stop_pc))
15c66dd6 3118 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
3119 }
3120 }
3121
6bf5e0ba
PA
3122 /* If this event was not handled before, and is not a SIGTRAP, we
3123 report it. SIGILL and SIGSEGV are also treated as traps in case
3124 a breakpoint is inserted at the current PC. If this target does
3125 not support internal breakpoints at all, we also report the
3126 SIGTRAP without further processing; it's of no concern to us. */
3127 maybe_internal_trap
bf9ae9d8 3128 = (low_supports_breakpoints ()
6bf5e0ba
PA
3129 && (WSTOPSIG (w) == SIGTRAP
3130 || ((WSTOPSIG (w) == SIGILL
3131 || WSTOPSIG (w) == SIGSEGV)
d7146cda 3132 && low_breakpoint_at (event_child->stop_pc))));
6bf5e0ba
PA
3133
3134 if (maybe_internal_trap)
3135 {
3136 /* Handle anything that requires bookkeeping before deciding to
3137 report the event or continue waiting. */
3138
3139 /* First check if we can explain the SIGTRAP with an internal
3140 breakpoint, or if we should possibly report the event to GDB.
3141 Do this before anything that may remove or insert a
3142 breakpoint. */
3143 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3144
3145 /* We have a SIGTRAP, possibly a step-over dance has just
3146 finished. If so, tweak the state machine accordingly,
3b9a79ef
YQ
3147 reinsert breakpoints and delete any single-step
3148 breakpoints. */
6bf5e0ba
PA
3149 step_over_finished = finish_step_over (event_child);
3150
3151 /* Now invoke the callbacks of any internal breakpoints there. */
3152 check_breakpoints (event_child->stop_pc);
3153
219f2f23
PA
3154 /* Handle tracepoint data collecting. This may overflow the
3155 trace buffer, and cause a tracing stop, removing
3156 breakpoints. */
3157 trace_event = handle_tracepoints (event_child);
3158
6bf5e0ba 3159 if (bp_explains_trap)
c058728c 3160 threads_debug_printf ("Hit a gdbserver breakpoint.");
6bf5e0ba
PA
3161 }
3162 else
3163 {
3164 /* We have some other signal, possibly a step-over dance was in
3165 progress, and it should be cancelled too. */
3166 step_over_finished = finish_step_over (event_child);
fa593d66
PA
3167 }
3168
3169 /* We have all the data we need. Either report the event to GDB, or
3170 resume threads and keep waiting for more. */
3171
3172 /* If we're collecting a fast tracepoint, finish the collection and
3173 move out of the jump pad before delivering a signal. See
3174 linux_stabilize_threads. */
3175
3176 if (WIFSTOPPED (w)
3177 && WSTOPSIG (w) != SIGTRAP
3178 && supports_fast_tracepoints ()
58b4daa5 3179 && agent_loaded_p ())
fa593d66 3180 {
c058728c
SM
3181 threads_debug_printf ("Got signal %d for LWP %ld. Check if we need "
3182 "to defer or adjust it.",
3183 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3184
3185 /* Allow debugging the jump pad itself. */
0bfdf32f 3186 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
3187 && maybe_move_out_of_jump_pad (event_child, &w))
3188 {
3189 enqueue_one_deferred_signal (event_child, &w);
3190
c058728c
SM
3191 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad)",
3192 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66 3193
df95181f 3194 resume_one_lwp (event_child, 0, 0, NULL);
582511be
PA
3195
3196 return ignore_event (ourstatus);
fa593d66
PA
3197 }
3198 }
219f2f23 3199
229d26fc
SM
3200 if (event_child->collecting_fast_tracepoint
3201 != fast_tpoint_collect_result::not_collecting)
fa593d66 3202 {
c058728c
SM
3203 threads_debug_printf
3204 ("LWP %ld was trying to move out of the jump pad (%d). "
3205 "Check if we're already there.",
3206 lwpid_of (current_thread),
3207 (int) event_child->collecting_fast_tracepoint);
fa593d66
PA
3208
3209 trace_event = 1;
3210
3211 event_child->collecting_fast_tracepoint
3212 = linux_fast_tracepoint_collecting (event_child, NULL);
3213
229d26fc
SM
3214 if (event_child->collecting_fast_tracepoint
3215 != fast_tpoint_collect_result::before_insn)
fa593d66
PA
3216 {
3217 /* No longer need this breakpoint. */
3218 if (event_child->exit_jump_pad_bkpt != NULL)
3219 {
c058728c
SM
3220 threads_debug_printf
3221 ("No longer need exit-jump-pad bkpt; removing it."
3222 "stopping all threads momentarily.");
fa593d66
PA
3223
3224 /* Other running threads could hit this breakpoint.
3225 We don't handle moribund locations like GDB does,
3226 instead we always pause all threads when removing
3227 breakpoints, so that any step-over or
3228 decr_pc_after_break adjustment is always taken
3229 care of while the breakpoint is still
3230 inserted. */
3231 stop_all_lwps (1, event_child);
fa593d66
PA
3232
3233 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3234 event_child->exit_jump_pad_bkpt = NULL;
3235
3236 unstop_all_lwps (1, event_child);
3237
3238 gdb_assert (event_child->suspended >= 0);
3239 }
3240 }
3241
229d26fc
SM
3242 if (event_child->collecting_fast_tracepoint
3243 == fast_tpoint_collect_result::not_collecting)
fa593d66 3244 {
c058728c
SM
3245 threads_debug_printf
3246 ("fast tracepoint finished collecting successfully.");
fa593d66
PA
3247
3248 /* We may have a deferred signal to report. */
3249 if (dequeue_one_deferred_signal (event_child, &w))
c058728c 3250 threads_debug_printf ("dequeued one signal.");
3c11dd79 3251 else
fa593d66 3252 {
c058728c 3253 threads_debug_printf ("no deferred signals.");
fa593d66
PA
3254
3255 if (stabilizing_threads)
3256 {
183be222 3257 ourstatus->set_stopped (GDB_SIGNAL_0);
87ce2a04 3258
c058728c
SM
3259 threads_debug_printf
3260 ("ret = %s, stopped while stabilizing threads",
3261 target_pid_to_str (ptid_of (current_thread)).c_str ());
87ce2a04 3262
0bfdf32f 3263 return ptid_of (current_thread);
fa593d66
PA
3264 }
3265 }
3266 }
6bf5e0ba
PA
3267 }
3268
e471f25b
PA
3269 /* Check whether GDB would be interested in this event. */
3270
82075af2
JS
3271 /* Check if GDB is interested in this syscall. */
3272 if (WIFSTOPPED (w)
3273 && WSTOPSIG (w) == SYSCALL_SIGTRAP
9eedd27d 3274 && !gdb_catch_this_syscall (event_child))
82075af2 3275 {
c058728c
SM
3276 threads_debug_printf ("Ignored syscall for LWP %ld.",
3277 lwpid_of (current_thread));
82075af2 3278
df95181f 3279 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
edeeb602 3280
82075af2
JS
3281 return ignore_event (ourstatus);
3282 }
3283
e471f25b
PA
3284 /* If GDB is not interested in this signal, don't stop other
3285 threads, and don't report it to GDB. Just resume the inferior
3286 right away. We do this for threading-related signals as well as
3287 any that GDB specifically requested we ignore. But never ignore
3288 SIGSTOP if we sent it ourselves, and do not ignore signals when
3289 stepping - they may require special handling to skip the signal
c9587f88
AT
3290 handler. Also never ignore signals that could be caused by a
3291 breakpoint. */
e471f25b 3292 if (WIFSTOPPED (w)
0bfdf32f 3293 && current_thread->last_resume_kind != resume_step
e471f25b 3294 && (
1a981360 3295#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 3296 (current_process ()->priv->thread_db != NULL
e471f25b
PA
3297 && (WSTOPSIG (w) == __SIGRTMIN
3298 || WSTOPSIG (w) == __SIGRTMIN + 1))
3299 ||
3300#endif
c12a5089 3301 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 3302 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
3303 && current_thread->last_resume_kind == resume_stop)
3304 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
3305 {
3306 siginfo_t info, *info_p;
3307
c058728c
SM
3308 threads_debug_printf ("Ignored signal %d for LWP %ld.",
3309 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 3310
0bfdf32f 3311 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 3312 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
3313 info_p = &info;
3314 else
3315 info_p = NULL;
863d01bd
PA
3316
3317 if (step_over_finished)
3318 {
3319 /* We cancelled this thread's step-over above. We still
3320 need to unsuspend all other LWPs, and set them back
3321 running again while the signal handler runs. */
3322 unsuspend_all_lwps (event_child);
3323
3324 /* Enqueue the pending signal info so that proceed_all_lwps
3325 doesn't lose it. */
3326 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3327
3328 proceed_all_lwps ();
3329 }
3330 else
3331 {
df95181f
TBA
3332 resume_one_lwp (event_child, event_child->stepping,
3333 WSTOPSIG (w), info_p);
863d01bd 3334 }
edeeb602 3335
582511be 3336 return ignore_event (ourstatus);
e471f25b
PA
3337 }
3338
c2d6af84
PA
3339 /* Note that all addresses are always "out of the step range" when
3340 there's no range to begin with. */
3341 in_step_range = lwp_in_step_range (event_child);
3342
3343 /* If GDB wanted this thread to single step, and the thread is out
3344 of the step range, we always want to report the SIGTRAP, and let
3345 GDB handle it. Watchpoints should always be reported. So should
3346 signals we can't explain. A SIGTRAP we can't explain could be a
3347 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3348 do, we're be able to handle GDB breakpoints on top of internal
3349 breakpoints, by handling the internal breakpoint and still
3350 reporting the event to GDB. If we don't, we're out of luck, GDB
863d01bd
PA
3351 won't see the breakpoint hit. If we see a single-step event but
3352 the thread should be continuing, don't pass the trap to gdb.
3353 That indicates that we had previously finished a single-step but
3354 left the single-step pending -- see
3355 complete_ongoing_step_over. */
6bf5e0ba 3356 report_to_gdb = (!maybe_internal_trap
0bfdf32f 3357 || (current_thread->last_resume_kind == resume_step
c2d6af84 3358 && !in_step_range)
15c66dd6 3359 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
863d01bd
PA
3360 || (!in_step_range
3361 && !bp_explains_trap
3362 && !trace_event
3363 && !step_over_finished
3364 && !(current_thread->last_resume_kind == resume_continue
3365 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
9f3a5c85 3366 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5 3367 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
de0d863e 3368 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
183be222 3369 || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
d3ce09f5
SS
3370
3371 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
3372
3373 /* We found no reason GDB would want us to stop. We either hit one
3374 of our own breakpoints, or finished an internal step GDB
3375 shouldn't know about. */
3376 if (!report_to_gdb)
3377 {
c058728c
SM
3378 if (bp_explains_trap)
3379 threads_debug_printf ("Hit a gdbserver breakpoint.");
3380
3381 if (step_over_finished)
3382 threads_debug_printf ("Step-over finished.");
3383
3384 if (trace_event)
3385 threads_debug_printf ("Tracepoint event.");
3386
3387 if (lwp_in_step_range (event_child))
3388 threads_debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).",
3389 paddress (event_child->stop_pc),
3390 paddress (event_child->step_range_start),
3391 paddress (event_child->step_range_end));
6bf5e0ba
PA
3392
3393 /* We're not reporting this breakpoint to GDB, so apply the
3394 decr_pc_after_break adjustment to the inferior's regcache
3395 ourselves. */
3396
bf9ae9d8 3397 if (low_supports_breakpoints ())
6bf5e0ba
PA
3398 {
3399 struct regcache *regcache
0bfdf32f 3400 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3401 low_set_pc (regcache, event_child->stop_pc);
6bf5e0ba
PA
3402 }
3403
7984d532 3404 if (step_over_finished)
e3652c84
YQ
3405 {
3406 /* If we have finished stepping over a breakpoint, we've
3407 stopped and suspended all LWPs momentarily except the
3408 stepping one. This is where we resume them all again.
3409 We're going to keep waiting, so use proceed, which
3410 handles stepping over the next breakpoint. */
3411 unsuspend_all_lwps (event_child);
3412 }
3413 else
3414 {
3415 /* Remove the single-step breakpoints if any. Note that
3416 there isn't single-step breakpoint if we finished stepping
3417 over. */
7582c77c 3418 if (supports_software_single_step ()
e3652c84
YQ
3419 && has_single_step_breakpoints (current_thread))
3420 {
3421 stop_all_lwps (0, event_child);
3422 delete_single_step_breakpoints (current_thread);
3423 unstop_all_lwps (0, event_child);
3424 }
3425 }
7984d532 3426
c058728c 3427 threads_debug_printf ("proceeding all threads.");
edeeb602 3428
c058728c 3429 proceed_all_lwps ();
edeeb602 3430
582511be 3431 return ignore_event (ourstatus);
6bf5e0ba
PA
3432 }
3433
c058728c
SM
3434 if (debug_threads)
3435 {
3436 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3437 threads_debug_printf ("LWP %ld: extended event with waitstatus %s",
3438 lwpid_of (get_lwp_thread (event_child)),
3439 event_child->waitstatus.to_string ().c_str ());
3440
3441 if (current_thread->last_resume_kind == resume_step)
3442 {
3443 if (event_child->step_range_start == event_child->step_range_end)
3444 threads_debug_printf
3445 ("GDB wanted to single-step, reporting event.");
3446 else if (!lwp_in_step_range (event_child))
3447 threads_debug_printf ("Out of step range, reporting event.");
3448 }
3449
3450 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3451 threads_debug_printf ("Stopped by watchpoint.");
3452 else if (gdb_breakpoint_here (event_child->stop_pc))
3453 threads_debug_printf ("Stopped by GDB breakpoint.");
3454 }
3455
3456 threads_debug_printf ("Hit a non-gdbserver trap event.");
6bf5e0ba
PA
3457
3458 /* Alright, we're going to report a stop. */
3459
3b9a79ef 3460 /* Remove single-step breakpoints. */
7582c77c 3461 if (supports_software_single_step ())
8901d193 3462 {
3b9a79ef 3463 /* Remove single-step breakpoints or not. It it is true, stop all
8901d193
YQ
3464 lwps, so that other threads won't hit the breakpoint in the
3465 staled memory. */
3b9a79ef 3466 int remove_single_step_breakpoints_p = 0;
8901d193
YQ
3467
3468 if (non_stop)
3469 {
3b9a79ef
YQ
3470 remove_single_step_breakpoints_p
3471 = has_single_step_breakpoints (current_thread);
8901d193
YQ
3472 }
3473 else
3474 {
3475 /* In all-stop, a stop reply cancels all previous resume
3b9a79ef 3476 requests. Delete all single-step breakpoints. */
8901d193 3477
9c80ecd6
SM
3478 find_thread ([&] (thread_info *thread) {
3479 if (has_single_step_breakpoints (thread))
3480 {
3481 remove_single_step_breakpoints_p = 1;
3482 return true;
3483 }
8901d193 3484
9c80ecd6
SM
3485 return false;
3486 });
8901d193
YQ
3487 }
3488
3b9a79ef 3489 if (remove_single_step_breakpoints_p)
8901d193 3490 {
3b9a79ef 3491 /* If we remove single-step breakpoints from memory, stop all lwps,
8901d193
YQ
3492 so that other threads won't hit the breakpoint in the staled
3493 memory. */
3494 stop_all_lwps (0, event_child);
3495
3496 if (non_stop)
3497 {
3b9a79ef
YQ
3498 gdb_assert (has_single_step_breakpoints (current_thread));
3499 delete_single_step_breakpoints (current_thread);
8901d193
YQ
3500 }
3501 else
3502 {
9c80ecd6
SM
3503 for_each_thread ([] (thread_info *thread){
3504 if (has_single_step_breakpoints (thread))
3505 delete_single_step_breakpoints (thread);
3506 });
8901d193
YQ
3507 }
3508
3509 unstop_all_lwps (0, event_child);
3510 }
3511 }
3512
582511be 3513 if (!stabilizing_threads)
6bf5e0ba
PA
3514 {
3515 /* In all-stop, stop all threads. */
582511be
PA
3516 if (!non_stop)
3517 stop_all_lwps (0, NULL);
6bf5e0ba 3518
c03e6ccc 3519 if (step_over_finished)
582511be
PA
3520 {
3521 if (!non_stop)
3522 {
3523 /* If we were doing a step-over, all other threads but
3524 the stepping one had been paused in start_step_over,
3525 with their suspend counts incremented. We don't want
3526 to do a full unstop/unpause, because we're in
3527 all-stop mode (so we want threads stopped), but we
3528 still need to unsuspend the other threads, to
3529 decrement their `suspended' count back. */
3530 unsuspend_all_lwps (event_child);
3531 }
3532 else
3533 {
3534 /* If we just finished a step-over, then all threads had
3535 been momentarily paused. In all-stop, that's fine,
3536 we want threads stopped by now anyway. In non-stop,
3537 we need to re-resume threads that GDB wanted to be
3538 running. */
3539 unstop_all_lwps (1, event_child);
3540 }
3541 }
c03e6ccc 3542
3aa5cfa0
AT
3543 /* If we're not waiting for a specific LWP, choose an event LWP
3544 from among those that have had events. Giving equal priority
3545 to all LWPs that have had events helps prevent
3546 starvation. */
d7e15655 3547 if (ptid == minus_one_ptid)
3aa5cfa0
AT
3548 {
3549 event_child->status_pending_p = 1;
3550 event_child->status_pending = w;
3551
3552 select_event_lwp (&event_child);
3553
3554 /* current_thread and event_child must stay in sync. */
24583e45 3555 switch_to_thread (get_lwp_thread (event_child));
3aa5cfa0
AT
3556
3557 event_child->status_pending_p = 0;
3558 w = event_child->status_pending;
3559 }
3560
3561
fa593d66 3562 /* Stabilize threads (move out of jump pads). */
582511be 3563 if (!non_stop)
5c9eb2f2 3564 target_stabilize_threads ();
6bf5e0ba
PA
3565 }
3566 else
3567 {
3568 /* If we just finished a step-over, then all threads had been
3569 momentarily paused. In all-stop, that's fine, we want
3570 threads stopped by now anyway. In non-stop, we need to
3571 re-resume threads that GDB wanted to be running. */
3572 if (step_over_finished)
7984d532 3573 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3574 }
3575
e88cf517
SM
3576 /* At this point, we haven't set OURSTATUS. This is where we do it. */
3577 gdb_assert (ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3578
183be222 3579 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
de0d863e 3580 {
393a6b59
PA
3581 /* If the reported event is an exit, fork, vfork, clone or exec,
3582 let GDB know. */
5a04c4cf 3583
393a6b59
PA
3584 /* Break the unreported fork/vfork/clone relationship chain. */
3585 if (is_new_child_status (event_child->waitstatus.kind ()))
5a04c4cf 3586 {
393a6b59
PA
3587 event_child->relative->relative = NULL;
3588 event_child->relative = NULL;
5a04c4cf
PA
3589 }
3590
00db26fa 3591 *ourstatus = event_child->waitstatus;
de0d863e 3592 /* Clear the event lwp's waitstatus since we handled it already. */
183be222 3593 event_child->waitstatus.set_ignore ();
de0d863e
DB
3594 }
3595 else
183be222 3596 {
e88cf517 3597 /* The LWP stopped due to a plain signal or a syscall signal. Either way,
3bfdcabb 3598 event_child->waitstatus wasn't filled in with the details, so look at
e88cf517
SM
3599 the wait status W. */
3600 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3601 {
3602 int syscall_number;
3603
3604 get_syscall_trapinfo (event_child, &syscall_number);
3605 if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3606 ourstatus->set_syscall_entry (syscall_number);
3607 else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3608 ourstatus->set_syscall_return (syscall_number);
3609 else
3610 gdb_assert_not_reached ("unexpected syscall state");
3611 }
3612 else if (current_thread->last_resume_kind == resume_stop
3613 && WSTOPSIG (w) == SIGSTOP)
3614 {
3615 /* A thread that has been requested to stop by GDB with vCont;t,
3616 and it stopped cleanly, so report as SIG0. The use of
3617 SIGSTOP is an implementation detail. */
3618 ourstatus->set_stopped (GDB_SIGNAL_0);
3619 }
3620 else
3621 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
183be222 3622 }
5b1c542e 3623
582511be 3624 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3625 it was a software breakpoint, and the client doesn't know we can
3626 adjust the breakpoint ourselves. */
3627 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
c12a5089 3628 && !cs.swbreak_feature)
582511be 3629 {
d4807ea2 3630 int decr_pc = low_decr_pc_after_break ();
582511be
PA
3631
3632 if (decr_pc != 0)
3633 {
3634 struct regcache *regcache
3635 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3636 low_set_pc (regcache, event_child->stop_pc + decr_pc);
582511be
PA
3637 }
3638 }
3639
d7e15655 3640 gdb_assert (step_over_bkpt == null_ptid);
d50171e4 3641
e48359ea 3642 threads_debug_printf ("ret = %s, %s",
c058728c 3643 target_pid_to_str (ptid_of (current_thread)).c_str (),
e48359ea 3644 ourstatus->to_string ().c_str ());
bd99dc85 3645
48989498 3646 return filter_exit_event (event_child, ourstatus);
bd99dc85
PA
3647}
3648
3649/* Get rid of any pending event in the pipe. */
3650static void
3651async_file_flush (void)
3652{
cdc8e9b2 3653 linux_event_pipe.flush ();
bd99dc85
PA
3654}
3655
3656/* Put something in the pipe, so the event loop wakes up. */
3657static void
3658async_file_mark (void)
3659{
cdc8e9b2 3660 linux_event_pipe.mark ();
bd99dc85
PA
3661}
3662
6532e7e3
TBA
3663ptid_t
3664linux_process_target::wait (ptid_t ptid,
3665 target_waitstatus *ourstatus,
b60cea74 3666 target_wait_flags target_options)
bd99dc85 3667{
95954743 3668 ptid_t event_ptid;
bd99dc85 3669
bd99dc85
PA
3670 /* Flush the async file first. */
3671 if (target_is_async_p ())
3672 async_file_flush ();
3673
582511be
PA
3674 do
3675 {
d16f3f6c 3676 event_ptid = wait_1 (ptid, ourstatus, target_options);
582511be
PA
3677 }
3678 while ((target_options & TARGET_WNOHANG) == 0
183be222 3679 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3680
3681 /* If at least one stop was reported, there may be more. A single
3682 SIGCHLD can signal more than one child stop. */
3683 if (target_is_async_p ()
3684 && (target_options & TARGET_WNOHANG) != 0
d7e15655 3685 && event_ptid != null_ptid)
bd99dc85
PA
3686 async_file_mark ();
3687
3688 return event_ptid;
da6d8c04
DJ
3689}
3690
c5f62d5f 3691/* Send a signal to an LWP. */
fd500816
DJ
3692
3693static int
a1928bad 3694kill_lwp (unsigned long lwpid, int signo)
fd500816 3695{
4a6ed09b 3696 int ret;
fd500816 3697
4a6ed09b
PA
3698 errno = 0;
3699 ret = syscall (__NR_tkill, lwpid, signo);
3700 if (errno == ENOSYS)
3701 {
3702 /* If tkill fails, then we are not using nptl threads, a
3703 configuration we no longer support. */
3704 perror_with_name (("tkill"));
3705 }
3706 return ret;
fd500816
DJ
3707}
3708
964e4306
PA
3709void
3710linux_stop_lwp (struct lwp_info *lwp)
3711{
3712 send_sigstop (lwp);
3713}
3714
0d62e5e8 3715static void
02fc4de7 3716send_sigstop (struct lwp_info *lwp)
0d62e5e8 3717{
bd99dc85 3718 int pid;
0d62e5e8 3719
d86d4aaf 3720 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3721
0d62e5e8
DJ
3722 /* If we already have a pending stop signal for this process, don't
3723 send another. */
54a0b537 3724 if (lwp->stop_expected)
0d62e5e8 3725 {
c058728c 3726 threads_debug_printf ("Have pending sigstop for lwp %d", pid);
ae13219e 3727
0d62e5e8
DJ
3728 return;
3729 }
3730
c058728c 3731 threads_debug_printf ("Sending sigstop to lwp %d", pid);
0d62e5e8 3732
d50171e4 3733 lwp->stop_expected = 1;
bd99dc85 3734 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3735}
3736
df3e4dbe
SM
3737static void
3738send_sigstop (thread_info *thread, lwp_info *except)
02fc4de7 3739{
d86d4aaf 3740 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3741
7984d532
PA
3742 /* Ignore EXCEPT. */
3743 if (lwp == except)
df3e4dbe 3744 return;
7984d532 3745
02fc4de7 3746 if (lwp->stopped)
df3e4dbe 3747 return;
02fc4de7
PA
3748
3749 send_sigstop (lwp);
7984d532
PA
3750}
3751
3752/* Increment the suspend count of an LWP, and stop it, if not stopped
3753 yet. */
df3e4dbe
SM
3754static void
3755suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
7984d532 3756{
d86d4aaf 3757 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3758
3759 /* Ignore EXCEPT. */
3760 if (lwp == except)
df3e4dbe 3761 return;
7984d532 3762
863d01bd 3763 lwp_suspended_inc (lwp);
7984d532 3764
df3e4dbe 3765 send_sigstop (thread, except);
02fc4de7
PA
3766}
3767
e8a625d1
PA
3768/* Mark LWP dead, with WSTAT as exit status pending to report later.
3769 If THREAD_EVENT is true, interpret WSTAT as a thread exit event
3770 instead of a process exit event. This is meaningful for the leader
3771 thread, as we normally report a process-wide exit event when we see
3772 the leader exit, and a thread exit event when we see any other
3773 thread exit. */
3774
95954743 3775static void
e8a625d1 3776mark_lwp_dead (struct lwp_info *lwp, int wstat, bool thread_event)
95954743 3777{
95954743
PA
3778 /* Store the exit status for later. */
3779 lwp->status_pending_p = 1;
3780 lwp->status_pending = wstat;
3781
00db26fa
PA
3782 /* Store in waitstatus as well, as there's nothing else to process
3783 for this event. */
3784 if (WIFEXITED (wstat))
e8a625d1
PA
3785 {
3786 if (thread_event)
3787 lwp->waitstatus.set_thread_exited (WEXITSTATUS (wstat));
3788 else
3789 lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
3790 }
00db26fa 3791 else if (WIFSIGNALED (wstat))
e8a625d1
PA
3792 {
3793 gdb_assert (!thread_event);
3794 lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
3795 }
3796 else
3797 gdb_assert_not_reached ("unknown status kind");
00db26fa 3798
95954743
PA
3799 /* Prevent trying to stop it. */
3800 lwp->stopped = 1;
3801
3802 /* No further stops are expected from a dead lwp. */
3803 lwp->stop_expected = 0;
3804}
3805
00db26fa
PA
3806/* Return true if LWP has exited already, and has a pending exit event
3807 to report to GDB. */
3808
3809static int
3810lwp_is_marked_dead (struct lwp_info *lwp)
3811{
3812 return (lwp->status_pending_p
3813 && (WIFEXITED (lwp->status_pending)
3814 || WIFSIGNALED (lwp->status_pending)));
3815}
3816
d16f3f6c
TBA
3817void
3818linux_process_target::wait_for_sigstop ()
0d62e5e8 3819{
0bfdf32f 3820 struct thread_info *saved_thread;
95954743 3821 ptid_t saved_tid;
fa96cb38
PA
3822 int wstat;
3823 int ret;
0d62e5e8 3824
0bfdf32f
GB
3825 saved_thread = current_thread;
3826 if (saved_thread != NULL)
9c80ecd6 3827 saved_tid = saved_thread->id;
bd99dc85 3828 else
95954743 3829 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3830
20ac1cdb
TBA
3831 scoped_restore_current_thread restore_thread;
3832
c058728c 3833 threads_debug_printf ("pulling events");
d50171e4 3834
fa96cb38
PA
3835 /* Passing NULL_PTID as filter indicates we want all events to be
3836 left pending. Eventually this returns when there are no
3837 unwaited-for children left. */
d16f3f6c 3838 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
fa96cb38 3839 gdb_assert (ret == -1);
0d62e5e8 3840
13d3d99b 3841 if (saved_thread == NULL || mythread_alive (saved_tid))
20ac1cdb 3842 return;
0d62e5e8
DJ
3843 else
3844 {
c058728c 3845 threads_debug_printf ("Previously current thread died.");
0d62e5e8 3846
f0db101d
PA
3847 /* We can't change the current inferior behind GDB's back,
3848 otherwise, a subsequent command may apply to the wrong
3849 process. */
20ac1cdb
TBA
3850 restore_thread.dont_restore ();
3851 switch_to_thread (nullptr);
0d62e5e8
DJ
3852 }
3853}
3854
13e567af
TBA
3855bool
3856linux_process_target::stuck_in_jump_pad (thread_info *thread)
fa593d66 3857{
d86d4aaf 3858 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3859
863d01bd
PA
3860 if (lwp->suspended != 0)
3861 {
f34652de 3862 internal_error ("LWP %ld is suspended, suspended=%d\n",
863d01bd
PA
3863 lwpid_of (thread), lwp->suspended);
3864 }
fa593d66
PA
3865 gdb_assert (lwp->stopped);
3866
3867 /* Allow debugging the jump pad, gdb_collect, etc.. */
3868 return (supports_fast_tracepoints ()
58b4daa5 3869 && agent_loaded_p ()
fa593d66 3870 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3871 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66 3872 || thread->last_resume_kind == resume_step)
229d26fc
SM
3873 && (linux_fast_tracepoint_collecting (lwp, NULL)
3874 != fast_tpoint_collect_result::not_collecting));
fa593d66
PA
3875}
3876
d16f3f6c
TBA
3877void
3878linux_process_target::move_out_of_jump_pad (thread_info *thread)
fa593d66 3879{
d86d4aaf 3880 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3881 int *wstat;
3882
863d01bd
PA
3883 if (lwp->suspended != 0)
3884 {
f34652de 3885 internal_error ("LWP %ld is suspended, suspended=%d\n",
863d01bd
PA
3886 lwpid_of (thread), lwp->suspended);
3887 }
fa593d66
PA
3888 gdb_assert (lwp->stopped);
3889
f0ce0d3a 3890 /* For gdb_breakpoint_here. */
24583e45
TBA
3891 scoped_restore_current_thread restore_thread;
3892 switch_to_thread (thread);
f0ce0d3a 3893
fa593d66
PA
3894 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3895
3896 /* Allow debugging the jump pad, gdb_collect, etc. */
3897 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3898 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3899 && thread->last_resume_kind != resume_step
3900 && maybe_move_out_of_jump_pad (lwp, wstat))
3901 {
c058728c
SM
3902 threads_debug_printf ("LWP %ld needs stabilizing (in jump pad)",
3903 lwpid_of (thread));
fa593d66
PA
3904
3905 if (wstat)
3906 {
3907 lwp->status_pending_p = 0;
3908 enqueue_one_deferred_signal (lwp, wstat);
3909
c058728c
SM
3910 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad",
3911 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
3912 }
3913
df95181f 3914 resume_one_lwp (lwp, 0, 0, NULL);
fa593d66
PA
3915 }
3916 else
863d01bd 3917 lwp_suspended_inc (lwp);
fa593d66
PA
3918}
3919
5a6b0a41
SM
3920static bool
3921lwp_running (thread_info *thread)
fa593d66 3922{
d86d4aaf 3923 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3924
00db26fa 3925 if (lwp_is_marked_dead (lwp))
5a6b0a41
SM
3926 return false;
3927
3928 return !lwp->stopped;
fa593d66
PA
3929}
3930
d16f3f6c
TBA
3931void
3932linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
0d62e5e8 3933{
bde24c0a
PA
3934 /* Should not be called recursively. */
3935 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3936
c058728c
SM
3937 THREADS_SCOPED_DEBUG_ENTER_EXIT;
3938
3939 threads_debug_printf
3940 ("%s, except=%s", suspend ? "stop-and-suspend" : "stop",
3941 (except != NULL
3942 ? target_pid_to_str (ptid_of (get_lwp_thread (except))).c_str ()
3943 : "none"));
87ce2a04 3944
bde24c0a
PA
3945 stopping_threads = (suspend
3946 ? STOPPING_AND_SUSPENDING_THREADS
3947 : STOPPING_THREADS);
7984d532
PA
3948
3949 if (suspend)
df3e4dbe
SM
3950 for_each_thread ([&] (thread_info *thread)
3951 {
3952 suspend_and_send_sigstop (thread, except);
3953 });
7984d532 3954 else
df3e4dbe
SM
3955 for_each_thread ([&] (thread_info *thread)
3956 {
3957 send_sigstop (thread, except);
3958 });
3959
fa96cb38 3960 wait_for_sigstop ();
bde24c0a 3961 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04 3962
c058728c 3963 threads_debug_printf ("setting stopping_threads back to !stopping");
0d62e5e8
DJ
3964}
3965
863d01bd
PA
3966/* Enqueue one signal in the chain of signals which need to be
3967 delivered to this process on next resume. */
3968
3969static void
3970enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3971{
013e3554
TBA
3972 lwp->pending_signals.emplace_back (signal);
3973 if (info == nullptr)
3974 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
863d01bd 3975 else
013e3554 3976 lwp->pending_signals.back ().info = *info;
863d01bd
PA
3977}
3978
df95181f
TBA
3979void
3980linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
fa5308bd 3981{
984a2c04
YQ
3982 struct thread_info *thread = get_lwp_thread (lwp);
3983 struct regcache *regcache = get_thread_regcache (thread, 1);
8ce47547 3984
24583e45 3985 scoped_restore_current_thread restore_thread;
984a2c04 3986
24583e45 3987 switch_to_thread (thread);
7582c77c 3988 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
fa5308bd 3989
a0ff9e1a 3990 for (CORE_ADDR pc : next_pcs)
3b9a79ef 3991 set_single_step_breakpoint (pc, current_ptid);
fa5308bd
AT
3992}
3993
df95181f
TBA
3994int
3995linux_process_target::single_step (lwp_info* lwp)
7fe5e27e
AT
3996{
3997 int step = 0;
3998
b31cdfa6 3999 if (supports_hardware_single_step ())
7fe5e27e
AT
4000 {
4001 step = 1;
4002 }
7582c77c 4003 else if (supports_software_single_step ())
7fe5e27e
AT
4004 {
4005 install_software_single_step_breakpoints (lwp);
4006 step = 0;
4007 }
4008 else
c058728c 4009 threads_debug_printf ("stepping is not implemented on this target");
7fe5e27e
AT
4010
4011 return step;
4012}
4013
35ac8b3e 4014/* The signal can be delivered to the inferior if we are not trying to
5b061e98
YQ
4015 finish a fast tracepoint collect. Since signal can be delivered in
4016 the step-over, the program may go to signal handler and trap again
4017 after return from the signal handler. We can live with the spurious
4018 double traps. */
35ac8b3e
YQ
4019
4020static int
4021lwp_signal_can_be_delivered (struct lwp_info *lwp)
4022{
229d26fc
SM
4023 return (lwp->collecting_fast_tracepoint
4024 == fast_tpoint_collect_result::not_collecting);
35ac8b3e
YQ
4025}
4026
df95181f
TBA
4027void
4028linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4029 int signal, siginfo_t *info)
da6d8c04 4030{
d86d4aaf 4031 struct thread_info *thread = get_lwp_thread (lwp);
82075af2 4032 int ptrace_request;
c06cbd92
YQ
4033 struct process_info *proc = get_thread_process (thread);
4034
4035 /* Note that target description may not be initialised
4036 (proc->tdesc == NULL) at this point because the program hasn't
4037 stopped at the first instruction yet. It means GDBserver skips
4038 the extra traps from the wrapper program (see option --wrapper).
4039 Code in this function that requires register access should be
4040 guarded by proc->tdesc == NULL or something else. */
0d62e5e8 4041
54a0b537 4042 if (lwp->stopped == 0)
0d62e5e8
DJ
4043 return;
4044
183be222 4045 gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
65706a29 4046
229d26fc
SM
4047 fast_tpoint_collect_result fast_tp_collecting
4048 = lwp->collecting_fast_tracepoint;
fa593d66 4049
229d26fc
SM
4050 gdb_assert (!stabilizing_threads
4051 || (fast_tp_collecting
4052 != fast_tpoint_collect_result::not_collecting));
fa593d66 4053
219f2f23
PA
4054 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4055 user used the "jump" command, or "set $pc = foo"). */
c06cbd92 4056 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
219f2f23
PA
4057 {
4058 /* Collecting 'while-stepping' actions doesn't make sense
4059 anymore. */
d86d4aaf 4060 release_while_stepping_state_list (thread);
219f2f23
PA
4061 }
4062
0d62e5e8 4063 /* If we have pending signals or status, and a new signal, enqueue the
35ac8b3e
YQ
4064 signal. Also enqueue the signal if it can't be delivered to the
4065 inferior right now. */
0d62e5e8 4066 if (signal != 0
fa593d66 4067 && (lwp->status_pending_p
013e3554 4068 || !lwp->pending_signals.empty ()
35ac8b3e 4069 || !lwp_signal_can_be_delivered (lwp)))
94610ec4
YQ
4070 {
4071 enqueue_pending_signal (lwp, signal, info);
4072
4073 /* Postpone any pending signal. It was enqueued above. */
4074 signal = 0;
4075 }
0d62e5e8 4076
d50171e4
PA
4077 if (lwp->status_pending_p)
4078 {
c058728c
SM
4079 threads_debug_printf
4080 ("Not resuming lwp %ld (%s, stop %s); has pending status",
4081 lwpid_of (thread), step ? "step" : "continue",
4082 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
4083 return;
4084 }
0d62e5e8 4085
24583e45
TBA
4086 scoped_restore_current_thread restore_thread;
4087 switch_to_thread (thread);
0d62e5e8 4088
0d62e5e8
DJ
4089 /* This bit needs some thinking about. If we get a signal that
4090 we must report while a single-step reinsert is still pending,
4091 we often end up resuming the thread. It might be better to
4092 (ew) allow a stack of pending events; then we could be sure that
4093 the reinsert happened right away and not lose any signals.
4094
4095 Making this stack would also shrink the window in which breakpoints are
54a0b537 4096 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
4097 complete correctness, so it won't solve that problem. It may be
4098 worthwhile just to solve this one, however. */
54a0b537 4099 if (lwp->bp_reinsert != 0)
0d62e5e8 4100 {
c058728c
SM
4101 threads_debug_printf (" pending reinsert at 0x%s",
4102 paddress (lwp->bp_reinsert));
d50171e4 4103
b31cdfa6 4104 if (supports_hardware_single_step ())
d50171e4 4105 {
229d26fc 4106 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
4107 {
4108 if (step == 0)
9986ba08 4109 warning ("BAD - reinserting but not stepping.");
fa593d66 4110 if (lwp->suspended)
9986ba08
PA
4111 warning ("BAD - reinserting and suspended(%d).",
4112 lwp->suspended);
fa593d66 4113 }
d50171e4 4114 }
f79b145d
YQ
4115
4116 step = maybe_hw_step (thread);
0d62e5e8
DJ
4117 }
4118
229d26fc 4119 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
c058728c
SM
4120 threads_debug_printf
4121 ("lwp %ld wants to get out of fast tracepoint jump pad "
4122 "(exit-jump-pad-bkpt)", lwpid_of (thread));
4123
229d26fc 4124 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
fa593d66 4125 {
c058728c
SM
4126 threads_debug_printf
4127 ("lwp %ld wants to get out of fast tracepoint jump pad single-stepping",
4128 lwpid_of (thread));
fa593d66 4129
b31cdfa6 4130 if (supports_hardware_single_step ())
fa593d66
PA
4131 step = 1;
4132 else
38e08fca 4133 {
f34652de 4134 internal_error ("moving out of jump pad single-stepping"
38e08fca
GB
4135 " not implemented on this target");
4136 }
fa593d66
PA
4137 }
4138
219f2f23
PA
4139 /* If we have while-stepping actions in this thread set it stepping.
4140 If we have a signal to deliver, it may or may not be set to
4141 SIG_IGN, we don't know. Assume so, and allow collecting
4142 while-stepping into a signal handler. A possible smart thing to
4143 do would be to set an internal breakpoint at the signal return
4144 address, continue, and carry on catching this while-stepping
4145 action only when that breakpoint is hit. A future
4146 enhancement. */
7fe5e27e 4147 if (thread->while_stepping != NULL)
219f2f23 4148 {
c058728c
SM
4149 threads_debug_printf
4150 ("lwp %ld has a while-stepping action -> forcing step.",
4151 lwpid_of (thread));
7fe5e27e
AT
4152
4153 step = single_step (lwp);
219f2f23
PA
4154 }
4155
bf9ae9d8 4156 if (proc->tdesc != NULL && low_supports_breakpoints ())
0d62e5e8 4157 {
0bfdf32f 4158 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be 4159
bf9ae9d8 4160 lwp->stop_pc = low_get_pc (regcache);
582511be 4161
c058728c
SM
4162 threads_debug_printf (" %s from pc 0x%lx", step ? "step" : "continue",
4163 (long) lwp->stop_pc);
0d62e5e8
DJ
4164 }
4165
35ac8b3e
YQ
4166 /* If we have pending signals, consume one if it can be delivered to
4167 the inferior. */
013e3554 4168 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
0d62e5e8 4169 {
013e3554 4170 const pending_signal &p_sig = lwp->pending_signals.front ();
0d62e5e8 4171
013e3554
TBA
4172 signal = p_sig.signal;
4173 if (p_sig.info.si_signo != 0)
d86d4aaf 4174 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554 4175 &p_sig.info);
32ca6d61 4176
013e3554 4177 lwp->pending_signals.pop_front ();
0d62e5e8
DJ
4178 }
4179
c058728c
SM
4180 threads_debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)",
4181 lwpid_of (thread), step ? "step" : "continue", signal,
4182 lwp->stop_expected ? "expected" : "not expected");
94610ec4 4183
d7599cc0 4184 low_prepare_to_resume (lwp);
aa5ca48f 4185
d86d4aaf 4186 regcache_invalidate_thread (thread);
da6d8c04 4187 errno = 0;
54a0b537 4188 lwp->stepping = step;
82075af2
JS
4189 if (step)
4190 ptrace_request = PTRACE_SINGLESTEP;
4191 else if (gdb_catching_syscalls_p (lwp))
4192 ptrace_request = PTRACE_SYSCALL;
4193 else
4194 ptrace_request = PTRACE_CONT;
4195 ptrace (ptrace_request,
4196 lwpid_of (thread),
b8e1b30e 4197 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
4198 /* Coerce to a uintptr_t first to avoid potential gcc warning
4199 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4200 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 4201
da6d8c04 4202 if (errno)
20471e00
SM
4203 {
4204 int saved_errno = errno;
4205
4206 threads_debug_printf ("ptrace errno = %d (%s)",
4207 saved_errno, strerror (saved_errno));
4208
4209 errno = saved_errno;
4210 perror_with_name ("resuming thread");
4211 }
23f238d3
PA
4212
4213 /* Successfully resumed. Clear state that no longer makes sense,
4214 and mark the LWP as running. Must not do this before resuming
4215 otherwise if that fails other code will be confused. E.g., we'd
4216 later try to stop the LWP and hang forever waiting for a stop
4217 status. Note that we must not throw after this is cleared,
4218 otherwise handle_zombie_lwp_error would get confused. */
4219 lwp->stopped = 0;
4220 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4221}
4222
d7599cc0
TBA
4223void
4224linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4225{
4226 /* Nop. */
4227}
4228
23f238d3
PA
4229/* Called when we try to resume a stopped LWP and that errors out. If
4230 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4231 or about to become), discard the error, clear any pending status
4232 the LWP may have, and return true (we'll collect the exit status
4233 soon enough). Otherwise, return false. */
4234
4235static int
4236check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4237{
4238 struct thread_info *thread = get_lwp_thread (lp);
4239
4240 /* If we get an error after resuming the LWP successfully, we'd
4241 confuse !T state for the LWP being gone. */
4242 gdb_assert (lp->stopped);
4243
4244 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4245 because even if ptrace failed with ESRCH, the tracee may be "not
4246 yet fully dead", but already refusing ptrace requests. In that
4247 case the tracee has 'R (Running)' state for a little bit
4248 (observed in Linux 3.18). See also the note on ESRCH in the
4249 ptrace(2) man page. Instead, check whether the LWP has any state
4250 other than ptrace-stopped. */
4251
4252 /* Don't assume anything if /proc/PID/status can't be read. */
4253 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 4254 {
23f238d3
PA
4255 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4256 lp->status_pending_p = 0;
4257 return 1;
4258 }
4259 return 0;
4260}
4261
df95181f
TBA
4262void
4263linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4264 siginfo_t *info)
23f238d3 4265{
a70b8144 4266 try
23f238d3 4267 {
df95181f 4268 resume_one_lwp_throw (lwp, step, signal, info);
23f238d3 4269 }
230d2906 4270 catch (const gdb_exception_error &ex)
23f238d3 4271 {
20471e00
SM
4272 if (check_ptrace_stopped_lwp_gone (lwp))
4273 {
4274 /* This could because we tried to resume an LWP after its leader
4275 exited. Mark it as resumed, so we can collect an exit event
4276 from it. */
4277 lwp->stopped = 0;
4278 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4279 }
4280 else
eedc3f4f 4281 throw;
3221518c 4282 }
da6d8c04
DJ
4283}
4284
5fdda392
SM
4285/* This function is called once per thread via for_each_thread.
4286 We look up which resume request applies to THREAD and mark it with a
4287 pointer to the appropriate resume request.
5544ad89
DJ
4288
4289 This algorithm is O(threads * resume elements), but resume elements
4290 is small (and will remain small at least until GDB supports thread
4291 suspension). */
ebcf782c 4292
5fdda392
SM
4293static void
4294linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
0d62e5e8 4295{
d86d4aaf 4296 struct lwp_info *lwp = get_thread_lwp (thread);
64386c31 4297
5fdda392 4298 for (int ndx = 0; ndx < n; ndx++)
95954743 4299 {
5fdda392 4300 ptid_t ptid = resume[ndx].thread;
d7e15655 4301 if (ptid == minus_one_ptid
9c80ecd6 4302 || ptid == thread->id
0c9070b3
YQ
4303 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4304 of PID'. */
e99b03dc 4305 || (ptid.pid () == pid_of (thread)
0e998d96 4306 && (ptid.is_pid ()
e38504b3 4307 || ptid.lwp () == -1)))
95954743 4308 {
5fdda392 4309 if (resume[ndx].kind == resume_stop
8336d594 4310 && thread->last_resume_kind == resume_stop)
d50171e4 4311 {
c058728c
SM
4312 threads_debug_printf
4313 ("already %s LWP %ld at GDB's request",
4314 (thread->last_status.kind () == TARGET_WAITKIND_STOPPED
4315 ? "stopped" : "stopping"),
4316 lwpid_of (thread));
d50171e4
PA
4317
4318 continue;
4319 }
4320
5a04c4cf
PA
4321 /* Ignore (wildcard) resume requests for already-resumed
4322 threads. */
5fdda392 4323 if (resume[ndx].kind != resume_stop
5a04c4cf
PA
4324 && thread->last_resume_kind != resume_stop)
4325 {
c058728c
SM
4326 threads_debug_printf
4327 ("already %s LWP %ld at GDB's request",
4328 (thread->last_resume_kind == resume_step
4329 ? "stepping" : "continuing"),
4330 lwpid_of (thread));
5a04c4cf
PA
4331 continue;
4332 }
4333
393a6b59
PA
4334 /* Don't let wildcard resumes resume fork/vfork/clone
4335 children that GDB does not yet know are new children. */
4336 if (lwp->relative != NULL)
5a04c4cf 4337 {
393a6b59 4338 struct lwp_info *rel = lwp->relative;
5a04c4cf
PA
4339
4340 if (rel->status_pending_p
393a6b59 4341 && is_new_child_status (rel->waitstatus.kind ()))
5a04c4cf 4342 {
c058728c
SM
4343 threads_debug_printf
4344 ("not resuming LWP %ld: has queued stop reply",
4345 lwpid_of (thread));
5a04c4cf
PA
4346 continue;
4347 }
4348 }
4349
4350 /* If the thread has a pending event that has already been
4351 reported to GDBserver core, but GDB has not pulled the
4352 event out of the vStopped queue yet, likewise, ignore the
4353 (wildcard) resume request. */
9c80ecd6 4354 if (in_queued_stop_replies (thread->id))
5a04c4cf 4355 {
c058728c
SM
4356 threads_debug_printf
4357 ("not resuming LWP %ld: has queued stop reply",
4358 lwpid_of (thread));
5a04c4cf
PA
4359 continue;
4360 }
4361
5fdda392 4362 lwp->resume = &resume[ndx];
8336d594 4363 thread->last_resume_kind = lwp->resume->kind;
fa593d66 4364
c2d6af84
PA
4365 lwp->step_range_start = lwp->resume->step_range_start;
4366 lwp->step_range_end = lwp->resume->step_range_end;
4367
fa593d66
PA
4368 /* If we had a deferred signal to report, dequeue one now.
4369 This can happen if LWP gets more than one signal while
4370 trying to get out of a jump pad. */
4371 if (lwp->stopped
4372 && !lwp->status_pending_p
4373 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4374 {
4375 lwp->status_pending_p = 1;
4376
c058728c
SM
4377 threads_debug_printf
4378 ("Dequeueing deferred signal %d for LWP %ld, "
4379 "leaving status pending.",
4380 WSTOPSIG (lwp->status_pending),
4381 lwpid_of (thread));
fa593d66
PA
4382 }
4383
5fdda392 4384 return;
95954743
PA
4385 }
4386 }
2bd7c093
PA
4387
4388 /* No resume action for this thread. */
4389 lwp->resume = NULL;
5544ad89
DJ
4390}
4391
df95181f
TBA
4392bool
4393linux_process_target::resume_status_pending (thread_info *thread)
5544ad89 4394{
d86d4aaf 4395 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4396
bd99dc85
PA
4397 /* LWPs which will not be resumed are not interesting, because
4398 we might not wait for them next time through linux_wait. */
2bd7c093 4399 if (lwp->resume == NULL)
25c28b4d 4400 return false;
64386c31 4401
df95181f 4402 return thread_still_has_status_pending (thread);
d50171e4
PA
4403}
4404
df95181f
TBA
4405bool
4406linux_process_target::thread_needs_step_over (thread_info *thread)
d50171e4 4407{
d86d4aaf 4408 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4409 CORE_ADDR pc;
c06cbd92
YQ
4410 struct process_info *proc = get_thread_process (thread);
4411
4412 /* GDBserver is skipping the extra traps from the wrapper program,
4413 don't have to do step over. */
4414 if (proc->tdesc == NULL)
eca55aec 4415 return false;
d50171e4
PA
4416
4417 /* LWPs which will not be resumed are not interesting, because we
4418 might not wait for them next time through linux_wait. */
4419
4420 if (!lwp->stopped)
4421 {
c058728c
SM
4422 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped",
4423 lwpid_of (thread));
eca55aec 4424 return false;
d50171e4
PA
4425 }
4426
8336d594 4427 if (thread->last_resume_kind == resume_stop)
d50171e4 4428 {
c058728c
SM
4429 threads_debug_printf
4430 ("Need step over [LWP %ld]? Ignoring, should remain stopped",
4431 lwpid_of (thread));
eca55aec 4432 return false;
d50171e4
PA
4433 }
4434
7984d532
PA
4435 gdb_assert (lwp->suspended >= 0);
4436
4437 if (lwp->suspended)
4438 {
c058728c
SM
4439 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, suspended",
4440 lwpid_of (thread));
eca55aec 4441 return false;
7984d532
PA
4442 }
4443
bd99dc85 4444 if (lwp->status_pending_p)
d50171e4 4445 {
c058728c
SM
4446 threads_debug_printf
4447 ("Need step over [LWP %ld]? Ignoring, has pending status.",
4448 lwpid_of (thread));
eca55aec 4449 return false;
d50171e4
PA
4450 }
4451
4452 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4453 or we have. */
4454 pc = get_pc (lwp);
4455
4456 /* If the PC has changed since we stopped, then don't do anything,
4457 and let the breakpoint/tracepoint be hit. This happens if, for
4458 instance, GDB handled the decr_pc_after_break subtraction itself,
4459 GDB is OOL stepping this thread, or the user has issued a "jump"
4460 command, or poked thread's registers herself. */
4461 if (pc != lwp->stop_pc)
4462 {
c058728c
SM
4463 threads_debug_printf
4464 ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4465 "Old stop_pc was 0x%s, PC is now 0x%s", lwpid_of (thread),
4466 paddress (lwp->stop_pc), paddress (pc));
eca55aec 4467 return false;
d50171e4
PA
4468 }
4469
484b3c32
YQ
4470 /* On software single step target, resume the inferior with signal
4471 rather than stepping over. */
7582c77c 4472 if (supports_software_single_step ()
013e3554 4473 && !lwp->pending_signals.empty ()
484b3c32
YQ
4474 && lwp_signal_can_be_delivered (lwp))
4475 {
c058728c
SM
4476 threads_debug_printf
4477 ("Need step over [LWP %ld]? Ignoring, has pending signals.",
4478 lwpid_of (thread));
484b3c32 4479
eca55aec 4480 return false;
484b3c32
YQ
4481 }
4482
24583e45
TBA
4483 scoped_restore_current_thread restore_thread;
4484 switch_to_thread (thread);
d50171e4 4485
8b07ae33 4486 /* We can only step over breakpoints we know about. */
fa593d66 4487 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 4488 {
8b07ae33 4489 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
4490 though. If the condition is being evaluated on the target's side
4491 and it evaluate to false, step over this breakpoint as well. */
4492 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
4493 && gdb_condition_true_at_breakpoint (pc)
4494 && gdb_no_commands_at_breakpoint (pc))
8b07ae33 4495 {
c058728c
SM
4496 threads_debug_printf ("Need step over [LWP %ld]? yes, but found"
4497 " GDB breakpoint at 0x%s; skipping step over",
4498 lwpid_of (thread), paddress (pc));
d50171e4 4499
eca55aec 4500 return false;
8b07ae33
PA
4501 }
4502 else
4503 {
c058728c
SM
4504 threads_debug_printf ("Need step over [LWP %ld]? yes, "
4505 "found breakpoint at 0x%s",
4506 lwpid_of (thread), paddress (pc));
d50171e4 4507
8b07ae33 4508 /* We've found an lwp that needs stepping over --- return 1 so
8f86d7aa 4509 that find_thread stops looking. */
eca55aec 4510 return true;
8b07ae33 4511 }
d50171e4
PA
4512 }
4513
c058728c
SM
4514 threads_debug_printf
4515 ("Need step over [LWP %ld]? No, no breakpoint found at 0x%s",
4516 lwpid_of (thread), paddress (pc));
c6ecbae5 4517
eca55aec 4518 return false;
5544ad89
DJ
4519}
4520
d16f3f6c
TBA
4521void
4522linux_process_target::start_step_over (lwp_info *lwp)
d50171e4 4523{
d86d4aaf 4524 struct thread_info *thread = get_lwp_thread (lwp);
d50171e4 4525 CORE_ADDR pc;
d50171e4 4526
c058728c
SM
4527 threads_debug_printf ("Starting step-over on LWP %ld. Stopping all threads",
4528 lwpid_of (thread));
d50171e4 4529
7984d532 4530 stop_all_lwps (1, lwp);
863d01bd
PA
4531
4532 if (lwp->suspended != 0)
4533 {
f34652de 4534 internal_error ("LWP %ld suspended=%d\n", lwpid_of (thread),
863d01bd
PA
4535 lwp->suspended);
4536 }
d50171e4 4537
c058728c 4538 threads_debug_printf ("Done stopping all threads for step-over.");
d50171e4
PA
4539
4540 /* Note, we should always reach here with an already adjusted PC,
4541 either by GDB (if we're resuming due to GDB's request), or by our
4542 caller, if we just finished handling an internal breakpoint GDB
4543 shouldn't care about. */
4544 pc = get_pc (lwp);
4545
24583e45
TBA
4546 bool step = false;
4547 {
4548 scoped_restore_current_thread restore_thread;
4549 switch_to_thread (thread);
d50171e4 4550
24583e45
TBA
4551 lwp->bp_reinsert = pc;
4552 uninsert_breakpoints_at (pc);
4553 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4 4554
24583e45
TBA
4555 step = single_step (lwp);
4556 }
d50171e4 4557
df95181f 4558 resume_one_lwp (lwp, step, 0, NULL);
d50171e4
PA
4559
4560 /* Require next event from this LWP. */
9c80ecd6 4561 step_over_bkpt = thread->id;
d50171e4
PA
4562}
4563
b31cdfa6
TBA
4564bool
4565linux_process_target::finish_step_over (lwp_info *lwp)
d50171e4
PA
4566{
4567 if (lwp->bp_reinsert != 0)
4568 {
24583e45 4569 scoped_restore_current_thread restore_thread;
f79b145d 4570
c058728c 4571 threads_debug_printf ("Finished step over.");
d50171e4 4572
24583e45 4573 switch_to_thread (get_lwp_thread (lwp));
f79b145d 4574
d50171e4
PA
4575 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4576 may be no breakpoint to reinsert there by now. */
4577 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4578 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4579
4580 lwp->bp_reinsert = 0;
4581
3b9a79ef
YQ
4582 /* Delete any single-step breakpoints. No longer needed. We
4583 don't have to worry about other threads hitting this trap,
4584 and later not being able to explain it, because we were
4585 stepping over a breakpoint, and we hold all threads but
4586 LWP stopped while doing that. */
b31cdfa6 4587 if (!supports_hardware_single_step ())
f79b145d 4588 {
3b9a79ef
YQ
4589 gdb_assert (has_single_step_breakpoints (current_thread));
4590 delete_single_step_breakpoints (current_thread);
f79b145d 4591 }
d50171e4
PA
4592
4593 step_over_bkpt = null_ptid;
b31cdfa6 4594 return true;
d50171e4
PA
4595 }
4596 else
b31cdfa6 4597 return false;
d50171e4
PA
4598}
4599
d16f3f6c
TBA
4600void
4601linux_process_target::complete_ongoing_step_over ()
863d01bd 4602{
d7e15655 4603 if (step_over_bkpt != null_ptid)
863d01bd
PA
4604 {
4605 struct lwp_info *lwp;
4606 int wstat;
4607 int ret;
4608
c058728c 4609 threads_debug_printf ("detach: step over in progress, finish it first");
863d01bd
PA
4610
4611 /* Passing NULL_PTID as filter indicates we want all events to
4612 be left pending. Eventually this returns when there are no
4613 unwaited-for children left. */
d16f3f6c
TBA
4614 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4615 __WALL);
863d01bd
PA
4616 gdb_assert (ret == -1);
4617
4618 lwp = find_lwp_pid (step_over_bkpt);
4619 if (lwp != NULL)
7e9cf1fe
PA
4620 {
4621 finish_step_over (lwp);
4622
4623 /* If we got our step SIGTRAP, don't leave it pending,
4624 otherwise we would report it to GDB as a spurious
4625 SIGTRAP. */
4626 gdb_assert (lwp->status_pending_p);
4627 if (WIFSTOPPED (lwp->status_pending)
4628 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4629 {
4630 thread_info *thread = get_lwp_thread (lwp);
4631 if (thread->last_resume_kind != resume_step)
4632 {
c058728c 4633 threads_debug_printf ("detach: discard step-over SIGTRAP");
7e9cf1fe
PA
4634
4635 lwp->status_pending_p = 0;
4636 lwp->status_pending = 0;
4637 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4638 }
4639 else
c058728c
SM
4640 threads_debug_printf
4641 ("detach: resume_step, not discarding step-over SIGTRAP");
7e9cf1fe
PA
4642 }
4643 }
863d01bd
PA
4644 step_over_bkpt = null_ptid;
4645 unsuspend_all_lwps (lwp);
4646 }
4647}
4648
df95181f
TBA
4649void
4650linux_process_target::resume_one_thread (thread_info *thread,
4651 bool leave_all_stopped)
5544ad89 4652{
d86d4aaf 4653 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4654 int leave_pending;
5544ad89 4655
2bd7c093 4656 if (lwp->resume == NULL)
c80825ff 4657 return;
5544ad89 4658
bd99dc85 4659 if (lwp->resume->kind == resume_stop)
5544ad89 4660 {
c058728c
SM
4661 threads_debug_printf ("resume_stop request for LWP %ld",
4662 lwpid_of (thread));
bd99dc85
PA
4663
4664 if (!lwp->stopped)
4665 {
c058728c 4666 threads_debug_printf ("stopping LWP %ld", lwpid_of (thread));
bd99dc85 4667
d50171e4
PA
4668 /* Stop the thread, and wait for the event asynchronously,
4669 through the event loop. */
02fc4de7 4670 send_sigstop (lwp);
bd99dc85
PA
4671 }
4672 else
4673 {
c058728c 4674 threads_debug_printf ("already stopped LWP %ld", lwpid_of (thread));
d50171e4
PA
4675
4676 /* The LWP may have been stopped in an internal event that
4677 was not meant to be notified back to GDB (e.g., gdbserver
4678 breakpoint), so we should be reporting a stop event in
4679 this case too. */
4680
4681 /* If the thread already has a pending SIGSTOP, this is a
4682 no-op. Otherwise, something later will presumably resume
4683 the thread and this will cause it to cancel any pending
4684 operation, due to last_resume_kind == resume_stop. If
4685 the thread already has a pending status to report, we
4686 will still report it the next time we wait - see
4687 status_pending_p_callback. */
1a981360
PA
4688
4689 /* If we already have a pending signal to report, then
4690 there's no need to queue a SIGSTOP, as this means we're
4691 midway through moving the LWP out of the jumppad, and we
4692 will report the pending signal as soon as that is
4693 finished. */
013e3554 4694 if (lwp->pending_signals_to_report.empty ())
1a981360 4695 send_sigstop (lwp);
bd99dc85 4696 }
32ca6d61 4697
bd99dc85
PA
4698 /* For stop requests, we're done. */
4699 lwp->resume = NULL;
183be222 4700 thread->last_status.set_ignore ();
c80825ff 4701 return;
5544ad89
DJ
4702 }
4703
bd99dc85 4704 /* If this thread which is about to be resumed has a pending status,
863d01bd
PA
4705 then don't resume it - we can just report the pending status.
4706 Likewise if it is suspended, because e.g., another thread is
4707 stepping past a breakpoint. Make sure to queue any signals that
4708 would otherwise be sent. In all-stop mode, we do this decision
4709 based on if *any* thread has a pending status. If there's a
4710 thread that needs the step-over-breakpoint dance, then don't
4711 resume any other thread but that particular one. */
4712 leave_pending = (lwp->suspended
4713 || lwp->status_pending_p
4714 || leave_all_stopped);
5544ad89 4715
0e9a339e
YQ
4716 /* If we have a new signal, enqueue the signal. */
4717 if (lwp->resume->sig != 0)
4718 {
4719 siginfo_t info, *info_p;
4720
4721 /* If this is the same signal we were previously stopped by,
4722 make sure to queue its siginfo. */
4723 if (WIFSTOPPED (lwp->last_status)
4724 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4725 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4726 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4727 info_p = &info;
4728 else
4729 info_p = NULL;
4730
4731 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4732 }
4733
d50171e4 4734 if (!leave_pending)
bd99dc85 4735 {
c058728c 4736 threads_debug_printf ("resuming LWP %ld", lwpid_of (thread));
5544ad89 4737
9c80ecd6 4738 proceed_one_lwp (thread, NULL);
bd99dc85
PA
4739 }
4740 else
c058728c 4741 threads_debug_printf ("leaving LWP %ld stopped", lwpid_of (thread));
5544ad89 4742
183be222 4743 thread->last_status.set_ignore ();
bd99dc85 4744 lwp->resume = NULL;
0d62e5e8
DJ
4745}
4746
0e4d7e35
TBA
4747void
4748linux_process_target::resume (thread_resume *resume_info, size_t n)
0d62e5e8 4749{
d86d4aaf 4750 struct thread_info *need_step_over = NULL;
c6ecbae5 4751
c058728c 4752 THREADS_SCOPED_DEBUG_ENTER_EXIT;
87ce2a04 4753
5fdda392
SM
4754 for_each_thread ([&] (thread_info *thread)
4755 {
4756 linux_set_resume_request (thread, resume_info, n);
4757 });
5544ad89 4758
d50171e4
PA
4759 /* If there is a thread which would otherwise be resumed, which has
4760 a pending status, then don't resume any threads - we can just
4761 report the pending status. Make sure to queue any signals that
4762 would otherwise be sent. In non-stop mode, we'll apply this
4763 logic to each thread individually. We consume all pending events
4764 before considering to start a step-over (in all-stop). */
25c28b4d 4765 bool any_pending = false;
bd99dc85 4766 if (!non_stop)
df95181f
TBA
4767 any_pending = find_thread ([this] (thread_info *thread)
4768 {
4769 return resume_status_pending (thread);
4770 }) != nullptr;
d50171e4
PA
4771
4772 /* If there is a thread which would otherwise be resumed, which is
4773 stopped at a breakpoint that needs stepping over, then don't
4774 resume any threads - have it step over the breakpoint with all
4775 other threads stopped, then resume all threads again. Make sure
4776 to queue any signals that would otherwise be delivered or
4777 queued. */
bf9ae9d8 4778 if (!any_pending && low_supports_breakpoints ())
df95181f
TBA
4779 need_step_over = find_thread ([this] (thread_info *thread)
4780 {
4781 return thread_needs_step_over (thread);
4782 });
d50171e4 4783
c80825ff 4784 bool leave_all_stopped = (need_step_over != NULL || any_pending);
d50171e4 4785
c058728c
SM
4786 if (need_step_over != NULL)
4787 threads_debug_printf ("Not resuming all, need step over");
4788 else if (any_pending)
4789 threads_debug_printf ("Not resuming, all-stop and found "
4790 "an LWP with pending status");
4791 else
4792 threads_debug_printf ("Resuming, no pending status or step over needed");
d50171e4
PA
4793
4794 /* Even if we're leaving threads stopped, queue all signals we'd
4795 otherwise deliver. */
c80825ff
SM
4796 for_each_thread ([&] (thread_info *thread)
4797 {
df95181f 4798 resume_one_thread (thread, leave_all_stopped);
c80825ff 4799 });
d50171e4
PA
4800
4801 if (need_step_over)
d86d4aaf 4802 start_step_over (get_thread_lwp (need_step_over));
87ce2a04 4803
1bebeeca
PA
4804 /* We may have events that were pending that can/should be sent to
4805 the client now. Trigger a linux_wait call. */
4806 if (target_is_async_p ())
4807 async_file_mark ();
d50171e4
PA
4808}
4809
df95181f
TBA
4810void
4811linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
d50171e4 4812{
d86d4aaf 4813 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4814 int step;
4815
7984d532 4816 if (lwp == except)
e2b44075 4817 return;
d50171e4 4818
c058728c 4819 threads_debug_printf ("lwp %ld", lwpid_of (thread));
d50171e4
PA
4820
4821 if (!lwp->stopped)
4822 {
c058728c 4823 threads_debug_printf (" LWP %ld already running", lwpid_of (thread));
e2b44075 4824 return;
d50171e4
PA
4825 }
4826
02fc4de7 4827 if (thread->last_resume_kind == resume_stop
183be222 4828 && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
d50171e4 4829 {
c058728c
SM
4830 threads_debug_printf (" client wants LWP to remain %ld stopped",
4831 lwpid_of (thread));
e2b44075 4832 return;
d50171e4
PA
4833 }
4834
4835 if (lwp->status_pending_p)
4836 {
c058728c
SM
4837 threads_debug_printf (" LWP %ld has pending status, leaving stopped",
4838 lwpid_of (thread));
e2b44075 4839 return;
d50171e4
PA
4840 }
4841
7984d532
PA
4842 gdb_assert (lwp->suspended >= 0);
4843
d50171e4
PA
4844 if (lwp->suspended)
4845 {
c058728c 4846 threads_debug_printf (" LWP %ld is suspended", lwpid_of (thread));
e2b44075 4847 return;
d50171e4
PA
4848 }
4849
1a981360 4850 if (thread->last_resume_kind == resume_stop
013e3554 4851 && lwp->pending_signals_to_report.empty ()
229d26fc
SM
4852 && (lwp->collecting_fast_tracepoint
4853 == fast_tpoint_collect_result::not_collecting))
02fc4de7
PA
4854 {
4855 /* We haven't reported this LWP as stopped yet (otherwise, the
4856 last_status.kind check above would catch it, and we wouldn't
4857 reach here. This LWP may have been momentarily paused by a
4858 stop_all_lwps call while handling for example, another LWP's
4859 step-over. In that case, the pending expected SIGSTOP signal
4860 that was queued at vCont;t handling time will have already
4861 been consumed by wait_for_sigstop, and so we need to requeue
4862 another one here. Note that if the LWP already has a SIGSTOP
4863 pending, this is a no-op. */
4864
c058728c
SM
4865 threads_debug_printf
4866 ("Client wants LWP %ld to stop. Making sure it has a SIGSTOP pending",
4867 lwpid_of (thread));
02fc4de7
PA
4868
4869 send_sigstop (lwp);
4870 }
4871
863d01bd
PA
4872 if (thread->last_resume_kind == resume_step)
4873 {
c058728c
SM
4874 threads_debug_printf (" stepping LWP %ld, client wants it stepping",
4875 lwpid_of (thread));
8901d193 4876
3b9a79ef 4877 /* If resume_step is requested by GDB, install single-step
8901d193 4878 breakpoints when the thread is about to be actually resumed if
3b9a79ef 4879 the single-step breakpoints weren't removed. */
7582c77c 4880 if (supports_software_single_step ()
3b9a79ef 4881 && !has_single_step_breakpoints (thread))
8901d193
YQ
4882 install_software_single_step_breakpoints (lwp);
4883
4884 step = maybe_hw_step (thread);
863d01bd
PA
4885 }
4886 else if (lwp->bp_reinsert != 0)
4887 {
c058728c
SM
4888 threads_debug_printf (" stepping LWP %ld, reinsert set",
4889 lwpid_of (thread));
f79b145d
YQ
4890
4891 step = maybe_hw_step (thread);
863d01bd
PA
4892 }
4893 else
4894 step = 0;
4895
df95181f 4896 resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
4897}
4898
df95181f
TBA
4899void
4900linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4901 lwp_info *except)
7984d532 4902{
d86d4aaf 4903 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
4904
4905 if (lwp == except)
e2b44075 4906 return;
7984d532 4907
863d01bd 4908 lwp_suspended_decr (lwp);
7984d532 4909
e2b44075 4910 proceed_one_lwp (thread, except);
d50171e4
PA
4911}
4912
d16f3f6c
TBA
4913void
4914linux_process_target::proceed_all_lwps ()
d50171e4 4915{
d86d4aaf 4916 struct thread_info *need_step_over;
d50171e4
PA
4917
4918 /* If there is a thread which would otherwise be resumed, which is
4919 stopped at a breakpoint that needs stepping over, then don't
4920 resume any threads - have it step over the breakpoint with all
4921 other threads stopped, then resume all threads again. */
4922
bf9ae9d8 4923 if (low_supports_breakpoints ())
d50171e4 4924 {
df95181f
TBA
4925 need_step_over = find_thread ([this] (thread_info *thread)
4926 {
4927 return thread_needs_step_over (thread);
4928 });
d50171e4
PA
4929
4930 if (need_step_over != NULL)
4931 {
c058728c
SM
4932 threads_debug_printf ("found thread %ld needing a step-over",
4933 lwpid_of (need_step_over));
d50171e4 4934
d86d4aaf 4935 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
4936 return;
4937 }
4938 }
5544ad89 4939
c058728c 4940 threads_debug_printf ("Proceeding, no step-over needed");
d50171e4 4941
df95181f 4942 for_each_thread ([this] (thread_info *thread)
e2b44075
SM
4943 {
4944 proceed_one_lwp (thread, NULL);
4945 });
d50171e4
PA
4946}
4947
d16f3f6c
TBA
4948void
4949linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
d50171e4 4950{
c058728c
SM
4951 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4952
4953 if (except)
4954 threads_debug_printf ("except=(LWP %ld)",
4955 lwpid_of (get_lwp_thread (except)));
4956 else
4957 threads_debug_printf ("except=nullptr");
5544ad89 4958
7984d532 4959 if (unsuspend)
e2b44075
SM
4960 for_each_thread ([&] (thread_info *thread)
4961 {
4962 unsuspend_and_proceed_one_lwp (thread, except);
4963 });
7984d532 4964 else
e2b44075
SM
4965 for_each_thread ([&] (thread_info *thread)
4966 {
4967 proceed_one_lwp (thread, except);
4968 });
0d62e5e8
DJ
4969}
4970
58caa3dc
DJ
4971
4972#ifdef HAVE_LINUX_REGSETS
4973
1faeff08
MR
4974#define use_linux_regsets 1
4975
030031ee
PA
4976/* Returns true if REGSET has been disabled. */
4977
4978static int
4979regset_disabled (struct regsets_info *info, struct regset_info *regset)
4980{
4981 return (info->disabled_regsets != NULL
4982 && info->disabled_regsets[regset - info->regsets]);
4983}
4984
4985/* Disable REGSET. */
4986
4987static void
4988disable_regset (struct regsets_info *info, struct regset_info *regset)
4989{
4990 int dr_offset;
4991
4992 dr_offset = regset - info->regsets;
4993 if (info->disabled_regsets == NULL)
224c3ddb 4994 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
030031ee
PA
4995 info->disabled_regsets[dr_offset] = 1;
4996}
4997
58caa3dc 4998static int
3aee8918
PA
4999regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5000 struct regcache *regcache)
58caa3dc
DJ
5001{
5002 struct regset_info *regset;
e9d25b98 5003 int saw_general_regs = 0;
95954743 5004 int pid;
1570b33e 5005 struct iovec iov;
58caa3dc 5006
0bfdf32f 5007 pid = lwpid_of (current_thread);
28eef672 5008 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5009 {
1570b33e
L
5010 void *buf, *data;
5011 int nt_type, res;
58caa3dc 5012
030031ee 5013 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 5014 continue;
58caa3dc 5015
bca929d3 5016 buf = xmalloc (regset->size);
1570b33e
L
5017
5018 nt_type = regset->nt_type;
5019 if (nt_type)
5020 {
5021 iov.iov_base = buf;
5022 iov.iov_len = regset->size;
5023 data = (void *) &iov;
5024 }
5025 else
5026 data = buf;
5027
dfb64f85 5028#ifndef __sparc__
f15f9948 5029 res = ptrace (regset->get_request, pid,
b8e1b30e 5030 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5031#else
1570b33e 5032 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5033#endif
58caa3dc
DJ
5034 if (res < 0)
5035 {
1ef53e6b
AH
5036 if (errno == EIO
5037 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5038 {
1ef53e6b
AH
5039 /* If we get EIO on a regset, or an EINVAL and the regset is
5040 optional, do not try it again for this process mode. */
030031ee 5041 disable_regset (regsets_info, regset);
58caa3dc 5042 }
e5a9158d
AA
5043 else if (errno == ENODATA)
5044 {
5045 /* ENODATA may be returned if the regset is currently
5046 not "active". This can happen in normal operation,
5047 so suppress the warning in this case. */
5048 }
fcd4a73d
YQ
5049 else if (errno == ESRCH)
5050 {
5051 /* At this point, ESRCH should mean the process is
5052 already gone, in which case we simply ignore attempts
5053 to read its registers. */
5054 }
58caa3dc
DJ
5055 else
5056 {
0d62e5e8 5057 char s[256];
95954743
PA
5058 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5059 pid);
0d62e5e8 5060 perror (s);
58caa3dc
DJ
5061 }
5062 }
098dbe61
AA
5063 else
5064 {
5065 if (regset->type == GENERAL_REGS)
5066 saw_general_regs = 1;
5067 regset->store_function (regcache, buf);
5068 }
fdeb2a12 5069 free (buf);
58caa3dc 5070 }
e9d25b98
DJ
5071 if (saw_general_regs)
5072 return 0;
5073 else
5074 return 1;
58caa3dc
DJ
5075}
5076
5077static int
3aee8918
PA
5078regsets_store_inferior_registers (struct regsets_info *regsets_info,
5079 struct regcache *regcache)
58caa3dc
DJ
5080{
5081 struct regset_info *regset;
e9d25b98 5082 int saw_general_regs = 0;
95954743 5083 int pid;
1570b33e 5084 struct iovec iov;
58caa3dc 5085
0bfdf32f 5086 pid = lwpid_of (current_thread);
28eef672 5087 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5088 {
1570b33e
L
5089 void *buf, *data;
5090 int nt_type, res;
58caa3dc 5091
feea5f36
AA
5092 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5093 || regset->fill_function == NULL)
28eef672 5094 continue;
58caa3dc 5095
bca929d3 5096 buf = xmalloc (regset->size);
545587ee
DJ
5097
5098 /* First fill the buffer with the current register set contents,
5099 in case there are any items in the kernel's regset that are
5100 not in gdbserver's regcache. */
1570b33e
L
5101
5102 nt_type = regset->nt_type;
5103 if (nt_type)
5104 {
5105 iov.iov_base = buf;
5106 iov.iov_len = regset->size;
5107 data = (void *) &iov;
5108 }
5109 else
5110 data = buf;
5111
dfb64f85 5112#ifndef __sparc__
f15f9948 5113 res = ptrace (regset->get_request, pid,
b8e1b30e 5114 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5115#else
689cc2ae 5116 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5117#endif
545587ee
DJ
5118
5119 if (res == 0)
5120 {
5121 /* Then overlay our cached registers on that. */
442ea881 5122 regset->fill_function (regcache, buf);
545587ee
DJ
5123
5124 /* Only now do we write the register set. */
dfb64f85 5125#ifndef __sparc__
f15f9948 5126 res = ptrace (regset->set_request, pid,
b8e1b30e 5127 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5128#else
1570b33e 5129 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 5130#endif
545587ee
DJ
5131 }
5132
58caa3dc
DJ
5133 if (res < 0)
5134 {
1ef53e6b
AH
5135 if (errno == EIO
5136 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5137 {
1ef53e6b
AH
5138 /* If we get EIO on a regset, or an EINVAL and the regset is
5139 optional, do not try it again for this process mode. */
030031ee 5140 disable_regset (regsets_info, regset);
58caa3dc 5141 }
3221518c
UW
5142 else if (errno == ESRCH)
5143 {
1b3f6016
PA
5144 /* At this point, ESRCH should mean the process is
5145 already gone, in which case we simply ignore attempts
5146 to change its registers. See also the related
df95181f 5147 comment in resume_one_lwp. */
fdeb2a12 5148 free (buf);
3221518c
UW
5149 return 0;
5150 }
58caa3dc
DJ
5151 else
5152 {
ce3a066d 5153 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
5154 }
5155 }
e9d25b98
DJ
5156 else if (regset->type == GENERAL_REGS)
5157 saw_general_regs = 1;
09ec9b38 5158 free (buf);
58caa3dc 5159 }
e9d25b98
DJ
5160 if (saw_general_regs)
5161 return 0;
5162 else
5163 return 1;
58caa3dc
DJ
5164}
5165
1faeff08 5166#else /* !HAVE_LINUX_REGSETS */
58caa3dc 5167
1faeff08 5168#define use_linux_regsets 0
3aee8918
PA
5169#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5170#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 5171
58caa3dc 5172#endif
1faeff08
MR
5173
5174/* Return 1 if register REGNO is supported by one of the regset ptrace
5175 calls or 0 if it has to be transferred individually. */
5176
5177static int
3aee8918 5178linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
5179{
5180 unsigned char mask = 1 << (regno % 8);
5181 size_t index = regno / 8;
5182
5183 return (use_linux_regsets
3aee8918
PA
5184 && (regs_info->regset_bitmap == NULL
5185 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
5186}
5187
58caa3dc 5188#ifdef HAVE_LINUX_USRREGS
1faeff08 5189
5b3da067 5190static int
3aee8918 5191register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
5192{
5193 int addr;
5194
3aee8918 5195 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
5196 error ("Invalid register number %d.", regnum);
5197
3aee8918 5198 addr = usrregs->regmap[regnum];
1faeff08
MR
5199
5200 return addr;
5201}
5202
daca57a7
TBA
5203
5204void
5205linux_process_target::fetch_register (const usrregs_info *usrregs,
5206 regcache *regcache, int regno)
1faeff08
MR
5207{
5208 CORE_ADDR regaddr;
5209 int i, size;
5210 char *buf;
5211 int pid;
5212
3aee8918 5213 if (regno >= usrregs->num_regs)
1faeff08 5214 return;
daca57a7 5215 if (low_cannot_fetch_register (regno))
1faeff08
MR
5216 return;
5217
3aee8918 5218 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5219 if (regaddr == -1)
5220 return;
5221
3aee8918
PA
5222 size = ((register_size (regcache->tdesc, regno)
5223 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5224 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5225 buf = (char *) alloca (size);
1faeff08 5226
0bfdf32f 5227 pid = lwpid_of (current_thread);
1faeff08
MR
5228 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5229 {
5230 errno = 0;
5231 *(PTRACE_XFER_TYPE *) (buf + i) =
5232 ptrace (PTRACE_PEEKUSER, pid,
5233 /* Coerce to a uintptr_t first to avoid potential gcc warning
5234 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5235 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
5236 regaddr += sizeof (PTRACE_XFER_TYPE);
5237 if (errno != 0)
9a70f35c
YQ
5238 {
5239 /* Mark register REGNO unavailable. */
5240 supply_register (regcache, regno, NULL);
5241 return;
5242 }
1faeff08
MR
5243 }
5244
b35db733 5245 low_supply_ptrace_register (regcache, regno, buf);
1faeff08
MR
5246}
5247
daca57a7
TBA
5248void
5249linux_process_target::store_register (const usrregs_info *usrregs,
5250 regcache *regcache, int regno)
1faeff08
MR
5251{
5252 CORE_ADDR regaddr;
5253 int i, size;
5254 char *buf;
5255 int pid;
5256
3aee8918 5257 if (regno >= usrregs->num_regs)
1faeff08 5258 return;
daca57a7 5259 if (low_cannot_store_register (regno))
1faeff08
MR
5260 return;
5261
3aee8918 5262 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5263 if (regaddr == -1)
5264 return;
5265
3aee8918
PA
5266 size = ((register_size (regcache->tdesc, regno)
5267 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5268 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5269 buf = (char *) alloca (size);
1faeff08
MR
5270 memset (buf, 0, size);
5271
b35db733 5272 low_collect_ptrace_register (regcache, regno, buf);
1faeff08 5273
0bfdf32f 5274 pid = lwpid_of (current_thread);
1faeff08
MR
5275 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5276 {
5277 errno = 0;
5278 ptrace (PTRACE_POKEUSER, pid,
5279 /* Coerce to a uintptr_t first to avoid potential gcc warning
5280 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5281 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5282 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
5283 if (errno != 0)
5284 {
5285 /* At this point, ESRCH should mean the process is
5286 already gone, in which case we simply ignore attempts
5287 to change its registers. See also the related
df95181f 5288 comment in resume_one_lwp. */
1faeff08
MR
5289 if (errno == ESRCH)
5290 return;
5291
daca57a7
TBA
5292
5293 if (!low_cannot_store_register (regno))
6d91ce9a 5294 error ("writing register %d: %s", regno, safe_strerror (errno));
1faeff08
MR
5295 }
5296 regaddr += sizeof (PTRACE_XFER_TYPE);
5297 }
5298}
daca57a7 5299#endif /* HAVE_LINUX_USRREGS */
1faeff08 5300
b35db733
TBA
5301void
5302linux_process_target::low_collect_ptrace_register (regcache *regcache,
5303 int regno, char *buf)
5304{
5305 collect_register (regcache, regno, buf);
5306}
5307
5308void
5309linux_process_target::low_supply_ptrace_register (regcache *regcache,
5310 int regno, const char *buf)
5311{
5312 supply_register (regcache, regno, buf);
5313}
5314
daca57a7
TBA
5315void
5316linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5317 regcache *regcache,
5318 int regno, int all)
1faeff08 5319{
daca57a7 5320#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5321 struct usrregs_info *usr = regs_info->usrregs;
5322
1faeff08
MR
5323 if (regno == -1)
5324 {
3aee8918
PA
5325 for (regno = 0; regno < usr->num_regs; regno++)
5326 if (all || !linux_register_in_regsets (regs_info, regno))
5327 fetch_register (usr, regcache, regno);
1faeff08
MR
5328 }
5329 else
3aee8918 5330 fetch_register (usr, regcache, regno);
daca57a7 5331#endif
1faeff08
MR
5332}
5333
daca57a7
TBA
5334void
5335linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5336 regcache *regcache,
5337 int regno, int all)
1faeff08 5338{
daca57a7 5339#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5340 struct usrregs_info *usr = regs_info->usrregs;
5341
1faeff08
MR
5342 if (regno == -1)
5343 {
3aee8918
PA
5344 for (regno = 0; regno < usr->num_regs; regno++)
5345 if (all || !linux_register_in_regsets (regs_info, regno))
5346 store_register (usr, regcache, regno);
1faeff08
MR
5347 }
5348 else
3aee8918 5349 store_register (usr, regcache, regno);
58caa3dc 5350#endif
daca57a7 5351}
1faeff08 5352
a5a4d4cd
TBA
5353void
5354linux_process_target::fetch_registers (regcache *regcache, int regno)
1faeff08
MR
5355{
5356 int use_regsets;
5357 int all = 0;
aa8d21c9 5358 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5359
5360 if (regno == -1)
5361 {
bd70b1f2 5362 if (regs_info->usrregs != NULL)
3aee8918 5363 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
bd70b1f2 5364 low_fetch_register (regcache, regno);
c14dfd32 5365
3aee8918
PA
5366 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5367 if (regs_info->usrregs != NULL)
5368 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
5369 }
5370 else
5371 {
bd70b1f2 5372 if (low_fetch_register (regcache, regno))
c14dfd32
PA
5373 return;
5374
3aee8918 5375 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5376 if (use_regsets)
3aee8918
PA
5377 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5378 regcache);
5379 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5380 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5381 }
58caa3dc
DJ
5382}
5383
a5a4d4cd
TBA
5384void
5385linux_process_target::store_registers (regcache *regcache, int regno)
58caa3dc 5386{
1faeff08
MR
5387 int use_regsets;
5388 int all = 0;
aa8d21c9 5389 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5390
5391 if (regno == -1)
5392 {
3aee8918
PA
5393 all = regsets_store_inferior_registers (regs_info->regsets_info,
5394 regcache);
5395 if (regs_info->usrregs != NULL)
5396 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
5397 }
5398 else
5399 {
3aee8918 5400 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5401 if (use_regsets)
3aee8918
PA
5402 all = regsets_store_inferior_registers (regs_info->regsets_info,
5403 regcache);
5404 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5405 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5406 }
58caa3dc
DJ
5407}
5408
bd70b1f2
TBA
5409bool
5410linux_process_target::low_fetch_register (regcache *regcache, int regno)
5411{
5412 return false;
5413}
da6d8c04 5414
e2558df3 5415/* A wrapper for the read_memory target op. */
da6d8c04 5416
c3e735a6 5417static int
f450004a 5418linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
e2558df3 5419{
52405d85 5420 return the_target->read_memory (memaddr, myaddr, len);
e2558df3
TBA
5421}
5422
e2558df3 5423
421490af
PA
5424/* Helper for read_memory/write_memory using /proc/PID/mem. Because
5425 we can use a single read/write call, this can be much more
5426 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
5427 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running threads.
5428 One an only one of READBUF and WRITEBUF is non-null. If READBUF is
5429 not null, then we're reading, otherwise we're writing. */
5430
5431static int
5432proc_xfer_memory (CORE_ADDR memaddr, unsigned char *readbuf,
5433 const gdb_byte *writebuf, int len)
da6d8c04 5434{
421490af 5435 gdb_assert ((readbuf == nullptr) != (writebuf == nullptr));
fd462a61 5436
421490af
PA
5437 process_info *proc = current_process ();
5438
5439 int fd = proc->priv->mem_fd;
5440 if (fd == -1)
5441 return EIO;
5442
5443 while (len > 0)
fd462a61 5444 {
4934b29e
MR
5445 int bytes;
5446
31a56a22
PA
5447 /* Use pread64/pwrite64 if available, since they save a syscall
5448 and can handle 64-bit offsets even on 32-bit platforms (for
5449 instance, SPARC debugging a SPARC64 application). But only
5450 use them if the offset isn't so high that when cast to off_t
5451 it'd be negative, as seen on SPARC64. pread64/pwrite64
5452 outright reject such offsets. lseek does not. */
fd462a61 5453#ifdef HAVE_PREAD64
31a56a22 5454 if ((off_t) memaddr >= 0)
421490af 5455 bytes = (readbuf != nullptr
31a56a22
PA
5456 ? pread64 (fd, readbuf, len, memaddr)
5457 : pwrite64 (fd, writebuf, len, memaddr));
5458 else
fd462a61 5459#endif
31a56a22
PA
5460 {
5461 bytes = -1;
5462 if (lseek (fd, memaddr, SEEK_SET) != -1)
5463 bytes = (readbuf != nullptr
5464 ? read (fd, readbuf, len)
5465 : write (fd, writebuf, len));
5466 }
fd462a61 5467
421490af
PA
5468 if (bytes < 0)
5469 return errno;
5470 else if (bytes == 0)
4934b29e 5471 {
421490af
PA
5472 /* EOF means the address space is gone, the whole process
5473 exited or execed. */
5474 return EIO;
4934b29e 5475 }
da6d8c04 5476
421490af
PA
5477 memaddr += bytes;
5478 if (readbuf != nullptr)
5479 readbuf += bytes;
5480 else
5481 writebuf += bytes;
5482 len -= bytes;
da6d8c04
DJ
5483 }
5484
421490af
PA
5485 return 0;
5486}
c3e735a6 5487
421490af
PA
5488int
5489linux_process_target::read_memory (CORE_ADDR memaddr,
5490 unsigned char *myaddr, int len)
5491{
5492 return proc_xfer_memory (memaddr, myaddr, nullptr, len);
da6d8c04
DJ
5493}
5494
93ae6fdc
PA
5495/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5496 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 5497 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 5498
e2558df3
TBA
5499int
5500linux_process_target::write_memory (CORE_ADDR memaddr,
5501 const unsigned char *myaddr, int len)
da6d8c04 5502{
0d62e5e8
DJ
5503 if (debug_threads)
5504 {
58d6951d 5505 /* Dump up to four bytes. */
bf47e248
PA
5506 char str[4 * 2 + 1];
5507 char *p = str;
5508 int dump = len < 4 ? len : 4;
5509
421490af 5510 for (int i = 0; i < dump; i++)
bf47e248
PA
5511 {
5512 sprintf (p, "%02x", myaddr[i]);
5513 p += 2;
5514 }
5515 *p = '\0';
5516
c058728c 5517 threads_debug_printf ("Writing %s to 0x%08lx in process %d",
421490af 5518 str, (long) memaddr, current_process ()->pid);
0d62e5e8
DJ
5519 }
5520
421490af 5521 return proc_xfer_memory (memaddr, nullptr, myaddr, len);
da6d8c04 5522}
2f2893d9 5523
2a31c7aa
TBA
5524void
5525linux_process_target::look_up_symbols ()
2f2893d9 5526{
0d62e5e8 5527#ifdef USE_THREAD_DB
95954743
PA
5528 struct process_info *proc = current_process ();
5529
fe978cb0 5530 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5531 return;
5532
9b4c5f87 5533 thread_db_init ();
0d62e5e8
DJ
5534#endif
5535}
5536
eb497a2a
TBA
5537void
5538linux_process_target::request_interrupt ()
e5379b03 5539{
78708b7c
PA
5540 /* Send a SIGINT to the process group. This acts just like the user
5541 typed a ^C on the controlling terminal. */
4c35c4c6
TV
5542 int res = ::kill (-signal_pid, SIGINT);
5543 if (res == -1)
5544 warning (_("Sending SIGINT to process group of pid %ld failed: %s"),
5545 signal_pid, safe_strerror (errno));
e5379b03
DJ
5546}
5547
eac215cc
TBA
5548bool
5549linux_process_target::supports_read_auxv ()
5550{
5551 return true;
5552}
5553
aa691b87
RM
5554/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5555 to debugger memory starting at MYADDR. */
5556
eac215cc 5557int
43e5fbd8
TJB
5558linux_process_target::read_auxv (int pid, CORE_ADDR offset,
5559 unsigned char *myaddr, unsigned int len)
aa691b87
RM
5560{
5561 char filename[PATH_MAX];
5562 int fd, n;
5563
6cebaf6e 5564 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5565
5566 fd = open (filename, O_RDONLY);
5567 if (fd < 0)
5568 return -1;
5569
5570 if (offset != (CORE_ADDR) 0
5571 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5572 n = -1;
5573 else
5574 n = read (fd, myaddr, len);
5575
5576 close (fd);
5577
5578 return n;
5579}
5580
7e0bde70
TBA
5581int
5582linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5583 int size, raw_breakpoint *bp)
e013ee27 5584{
c8f4bfdd
YQ
5585 if (type == raw_bkpt_type_sw)
5586 return insert_memory_breakpoint (bp);
e013ee27 5587 else
9db9aa23
TBA
5588 return low_insert_point (type, addr, size, bp);
5589}
5590
5591int
5592linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5593 int size, raw_breakpoint *bp)
5594{
5595 /* Unsupported (see target.h). */
5596 return 1;
e013ee27
OF
5597}
5598
7e0bde70
TBA
5599int
5600linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5601 int size, raw_breakpoint *bp)
e013ee27 5602{
c8f4bfdd
YQ
5603 if (type == raw_bkpt_type_sw)
5604 return remove_memory_breakpoint (bp);
e013ee27 5605 else
9db9aa23
TBA
5606 return low_remove_point (type, addr, size, bp);
5607}
5608
5609int
5610linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5611 int size, raw_breakpoint *bp)
5612{
5613 /* Unsupported (see target.h). */
5614 return 1;
e013ee27
OF
5615}
5616
84320c4e 5617/* Implement the stopped_by_sw_breakpoint target_ops
3e572f71
PA
5618 method. */
5619
84320c4e
TBA
5620bool
5621linux_process_target::stopped_by_sw_breakpoint ()
3e572f71
PA
5622{
5623 struct lwp_info *lwp = get_thread_lwp (current_thread);
5624
5625 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5626}
5627
84320c4e 5628/* Implement the supports_stopped_by_sw_breakpoint target_ops
3e572f71
PA
5629 method. */
5630
84320c4e
TBA
5631bool
5632linux_process_target::supports_stopped_by_sw_breakpoint ()
3e572f71
PA
5633{
5634 return USE_SIGTRAP_SIGINFO;
5635}
5636
93fe88b2 5637/* Implement the stopped_by_hw_breakpoint target_ops
3e572f71
PA
5638 method. */
5639
93fe88b2
TBA
5640bool
5641linux_process_target::stopped_by_hw_breakpoint ()
3e572f71
PA
5642{
5643 struct lwp_info *lwp = get_thread_lwp (current_thread);
5644
5645 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5646}
5647
93fe88b2 5648/* Implement the supports_stopped_by_hw_breakpoint target_ops
3e572f71
PA
5649 method. */
5650
93fe88b2
TBA
5651bool
5652linux_process_target::supports_stopped_by_hw_breakpoint ()
3e572f71
PA
5653{
5654 return USE_SIGTRAP_SIGINFO;
5655}
5656
70b90b91 5657/* Implement the supports_hardware_single_step target_ops method. */
45614f15 5658
22aa6223
TBA
5659bool
5660linux_process_target::supports_hardware_single_step ()
45614f15 5661{
b31cdfa6 5662 return true;
45614f15
YQ
5663}
5664
6eeb5c55
TBA
5665bool
5666linux_process_target::stopped_by_watchpoint ()
e013ee27 5667{
0bfdf32f 5668 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 5669
15c66dd6 5670 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
5671}
5672
6eeb5c55
TBA
5673CORE_ADDR
5674linux_process_target::stopped_data_address ()
e013ee27 5675{
0bfdf32f 5676 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
5677
5678 return lwp->stopped_data_address;
e013ee27
OF
5679}
5680
db0dfaa0
LM
5681/* This is only used for targets that define PT_TEXT_ADDR,
5682 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5683 the target has different ways of acquiring this information, like
5684 loadmaps. */
52fb6437 5685
5203ae1e
TBA
5686bool
5687linux_process_target::supports_read_offsets ()
5688{
5689#ifdef SUPPORTS_READ_OFFSETS
5690 return true;
5691#else
5692 return false;
5693#endif
5694}
5695
52fb6437
NS
5696/* Under uClinux, programs are loaded at non-zero offsets, which we need
5697 to tell gdb about. */
5698
5203ae1e
TBA
5699int
5700linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
52fb6437 5701{
5203ae1e 5702#ifdef SUPPORTS_READ_OFFSETS
52fb6437 5703 unsigned long text, text_end, data;
62828379 5704 int pid = lwpid_of (current_thread);
52fb6437
NS
5705
5706 errno = 0;
5707
b8e1b30e
LM
5708 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5709 (PTRACE_TYPE_ARG4) 0);
5710 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5711 (PTRACE_TYPE_ARG4) 0);
5712 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5713 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
5714
5715 if (errno == 0)
5716 {
5717 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
5718 used by gdb) are relative to the beginning of the program,
5719 with the data segment immediately following the text segment.
5720 However, the actual runtime layout in memory may put the data
5721 somewhere else, so when we send gdb a data base-address, we
5722 use the real data base address and subtract the compile-time
5723 data base-address from it (which is just the length of the
5724 text segment). BSS immediately follows data in both
5725 cases. */
52fb6437
NS
5726 *text_p = text;
5727 *data_p = data - (text_end - text);
1b3f6016 5728
52fb6437
NS
5729 return 1;
5730 }
5203ae1e
TBA
5731 return 0;
5732#else
5733 gdb_assert_not_reached ("target op read_offsets not supported");
52fb6437 5734#endif
5203ae1e 5735}
52fb6437 5736
6e3fd7e9
TBA
5737bool
5738linux_process_target::supports_get_tls_address ()
5739{
5740#ifdef USE_THREAD_DB
5741 return true;
5742#else
5743 return false;
5744#endif
5745}
5746
5747int
5748linux_process_target::get_tls_address (thread_info *thread,
5749 CORE_ADDR offset,
5750 CORE_ADDR load_module,
5751 CORE_ADDR *address)
5752{
5753#ifdef USE_THREAD_DB
5754 return thread_db_get_tls_address (thread, offset, load_module, address);
5755#else
5756 return -1;
5757#endif
5758}
5759
2d0795ee
TBA
5760bool
5761linux_process_target::supports_qxfer_osdata ()
5762{
5763 return true;
5764}
5765
5766int
5767linux_process_target::qxfer_osdata (const char *annex,
5768 unsigned char *readbuf,
5769 unsigned const char *writebuf,
5770 CORE_ADDR offset, int len)
07e059b5 5771{
d26e3629 5772 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
5773}
5774
cb63de7c
TBA
5775void
5776linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5777 gdb_byte *inf_siginfo, int direction)
d0722149 5778{
cb63de7c 5779 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
d0722149
DE
5780
5781 /* If there was no callback, or the callback didn't do anything,
5782 then just do a straight memcpy. */
5783 if (!done)
5784 {
5785 if (direction == 1)
a5362b9a 5786 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 5787 else
a5362b9a 5788 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
5789 }
5790}
5791
cb63de7c
TBA
5792bool
5793linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5794 int direction)
5795{
5796 return false;
5797}
5798
d7abedf7
TBA
5799bool
5800linux_process_target::supports_qxfer_siginfo ()
5801{
5802 return true;
5803}
5804
5805int
5806linux_process_target::qxfer_siginfo (const char *annex,
5807 unsigned char *readbuf,
5808 unsigned const char *writebuf,
5809 CORE_ADDR offset, int len)
4aa995e1 5810{
d0722149 5811 int pid;
a5362b9a 5812 siginfo_t siginfo;
8adce034 5813 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1 5814
0bfdf32f 5815 if (current_thread == NULL)
4aa995e1
PA
5816 return -1;
5817
0bfdf32f 5818 pid = lwpid_of (current_thread);
4aa995e1 5819
c058728c
SM
5820 threads_debug_printf ("%s siginfo for lwp %d.",
5821 readbuf != NULL ? "Reading" : "Writing",
5822 pid);
4aa995e1 5823
0adea5f7 5824 if (offset >= sizeof (siginfo))
4aa995e1
PA
5825 return -1;
5826
b8e1b30e 5827 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5828 return -1;
5829
d0722149
DE
5830 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5831 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5832 inferior with a 64-bit GDBSERVER should look the same as debugging it
5833 with a 32-bit GDBSERVER, we need to convert it. */
5834 siginfo_fixup (&siginfo, inf_siginfo, 0);
5835
4aa995e1
PA
5836 if (offset + len > sizeof (siginfo))
5837 len = sizeof (siginfo) - offset;
5838
5839 if (readbuf != NULL)
d0722149 5840 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
5841 else
5842 {
d0722149
DE
5843 memcpy (inf_siginfo + offset, writebuf, len);
5844
5845 /* Convert back to ptrace layout before flushing it out. */
5846 siginfo_fixup (&siginfo, inf_siginfo, 1);
5847
b8e1b30e 5848 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5849 return -1;
5850 }
5851
5852 return len;
5853}
5854
bd99dc85
PA
5855/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5856 so we notice when children change state; as the handler for the
5857 sigsuspend in my_waitpid. */
5858
5859static void
5860sigchld_handler (int signo)
5861{
5862 int old_errno = errno;
5863
5864 if (debug_threads)
e581f2b4
PA
5865 {
5866 do
5867 {
a7e559cc
AH
5868 /* Use the async signal safe debug function. */
5869 if (debug_write ("sigchld_handler\n",
5870 sizeof ("sigchld_handler\n") - 1) < 0)
e581f2b4
PA
5871 break; /* just ignore */
5872 } while (0);
5873 }
bd99dc85
PA
5874
5875 if (target_is_async_p ())
5876 async_file_mark (); /* trigger a linux_wait */
5877
5878 errno = old_errno;
5879}
5880
0dc587d4
TBA
5881bool
5882linux_process_target::supports_non_stop ()
bd99dc85 5883{
0dc587d4 5884 return true;
bd99dc85
PA
5885}
5886
0dc587d4
TBA
5887bool
5888linux_process_target::async (bool enable)
bd99dc85 5889{
0dc587d4 5890 bool previous = target_is_async_p ();
bd99dc85 5891
c058728c
SM
5892 threads_debug_printf ("async (%d), previous=%d",
5893 enable, previous);
8336d594 5894
bd99dc85
PA
5895 if (previous != enable)
5896 {
5897 sigset_t mask;
5898 sigemptyset (&mask);
5899 sigaddset (&mask, SIGCHLD);
5900
21987b9c 5901 gdb_sigmask (SIG_BLOCK, &mask, NULL);
bd99dc85
PA
5902
5903 if (enable)
5904 {
8674f082 5905 if (!linux_event_pipe.open_pipe ())
aa96c426 5906 {
21987b9c 5907 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
aa96c426
GB
5908
5909 warning ("creating event pipe failed.");
5910 return previous;
5911 }
bd99dc85 5912
bd99dc85 5913 /* Register the event loop handler. */
cdc8e9b2 5914 add_file_handler (linux_event_pipe.event_fd (),
2554f6f5
SM
5915 handle_target_event, NULL,
5916 "linux-low");
bd99dc85
PA
5917
5918 /* Always trigger a linux_wait. */
5919 async_file_mark ();
5920 }
5921 else
5922 {
cdc8e9b2 5923 delete_file_handler (linux_event_pipe.event_fd ());
bd99dc85 5924
8674f082 5925 linux_event_pipe.close_pipe ();
bd99dc85
PA
5926 }
5927
21987b9c 5928 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
bd99dc85
PA
5929 }
5930
5931 return previous;
5932}
5933
0dc587d4
TBA
5934int
5935linux_process_target::start_non_stop (bool nonstop)
bd99dc85
PA
5936{
5937 /* Register or unregister from event-loop accordingly. */
0dc587d4 5938 target_async (nonstop);
aa96c426 5939
0dc587d4 5940 if (target_is_async_p () != (nonstop != false))
aa96c426
GB
5941 return -1;
5942
bd99dc85
PA
5943 return 0;
5944}
5945
652aef77
TBA
5946bool
5947linux_process_target::supports_multi_process ()
cf8fd78b 5948{
652aef77 5949 return true;
cf8fd78b
PA
5950}
5951
89245bc0
DB
5952/* Check if fork events are supported. */
5953
9690a72a
TBA
5954bool
5955linux_process_target::supports_fork_events ()
89245bc0 5956{
a2885186 5957 return true;
89245bc0
DB
5958}
5959
5960/* Check if vfork events are supported. */
5961
9690a72a
TBA
5962bool
5963linux_process_target::supports_vfork_events ()
89245bc0 5964{
a2885186 5965 return true;
89245bc0
DB
5966}
5967
393a6b59
PA
5968/* Return the set of supported thread options. */
5969
5970gdb_thread_options
5971linux_process_target::supported_thread_options ()
5972{
48989498 5973 return GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
393a6b59
PA
5974}
5975
94585166
DB
5976/* Check if exec events are supported. */
5977
9690a72a
TBA
5978bool
5979linux_process_target::supports_exec_events ()
94585166 5980{
a2885186 5981 return true;
94585166
DB
5982}
5983
de0d863e
DB
5984/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5985 ptrace flags for all inferiors. This is in case the new GDB connection
5986 doesn't support the same set of events that the previous one did. */
5987
fb00dfce
TBA
5988void
5989linux_process_target::handle_new_gdb_connection ()
de0d863e 5990{
de0d863e 5991 /* Request that all the lwps reset their ptrace options. */
bbf550d5
SM
5992 for_each_thread ([] (thread_info *thread)
5993 {
5994 struct lwp_info *lwp = get_thread_lwp (thread);
5995
5996 if (!lwp->stopped)
5997 {
5998 /* Stop the lwp so we can modify its ptrace options. */
5999 lwp->must_set_ptrace_flags = 1;
6000 linux_stop_lwp (lwp);
6001 }
6002 else
6003 {
6004 /* Already stopped; go ahead and set the ptrace options. */
6005 struct process_info *proc = find_process_pid (pid_of (thread));
6006 int options = linux_low_ptrace_options (proc->attached);
6007
6008 linux_enable_event_reporting (lwpid_of (thread), options);
6009 lwp->must_set_ptrace_flags = 0;
6010 }
6011 });
de0d863e
DB
6012}
6013
55cf3021
TBA
6014int
6015linux_process_target::handle_monitor_command (char *mon)
6016{
6017#ifdef USE_THREAD_DB
6018 return thread_db_handle_monitor_command (mon);
6019#else
6020 return 0;
6021#endif
6022}
6023
95a45fc1
TBA
6024int
6025linux_process_target::core_of_thread (ptid_t ptid)
6026{
6027 return linux_common_core_of_thread (ptid);
6028}
6029
c756403b
TBA
6030bool
6031linux_process_target::supports_disable_randomization ()
03583c20 6032{
c756403b 6033 return true;
03583c20 6034}
efcbbd14 6035
c0245cb9
TBA
6036bool
6037linux_process_target::supports_agent ()
d1feda86 6038{
c0245cb9 6039 return true;
d1feda86
YQ
6040}
6041
2526e0cd
TBA
6042bool
6043linux_process_target::supports_range_stepping ()
c2d6af84 6044{
7582c77c 6045 if (supports_software_single_step ())
2526e0cd 6046 return true;
c2d6af84 6047
9cfd8715
TBA
6048 return low_supports_range_stepping ();
6049}
6050
6051bool
6052linux_process_target::low_supports_range_stepping ()
6053{
6054 return false;
c2d6af84
PA
6055}
6056
8247b823
TBA
6057bool
6058linux_process_target::supports_pid_to_exec_file ()
6059{
6060 return true;
6061}
6062
04977957 6063const char *
8247b823
TBA
6064linux_process_target::pid_to_exec_file (int pid)
6065{
6066 return linux_proc_pid_to_exec_file (pid);
6067}
6068
c9b7b804
TBA
6069bool
6070linux_process_target::supports_multifs ()
6071{
6072 return true;
6073}
6074
6075int
6076linux_process_target::multifs_open (int pid, const char *filename,
6077 int flags, mode_t mode)
6078{
6079 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6080}
6081
6082int
6083linux_process_target::multifs_unlink (int pid, const char *filename)
6084{
6085 return linux_mntns_unlink (pid, filename);
6086}
6087
6088ssize_t
6089linux_process_target::multifs_readlink (int pid, const char *filename,
6090 char *buf, size_t bufsiz)
6091{
6092 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6093}
6094
723b724b 6095#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
6096struct target_loadseg
6097{
6098 /* Core address to which the segment is mapped. */
6099 Elf32_Addr addr;
6100 /* VMA recorded in the program header. */
6101 Elf32_Addr p_vaddr;
6102 /* Size of this segment in memory. */
6103 Elf32_Word p_memsz;
6104};
6105
723b724b 6106# if defined PT_GETDSBT
78d85199
YQ
6107struct target_loadmap
6108{
6109 /* Protocol version number, must be zero. */
6110 Elf32_Word version;
6111 /* Pointer to the DSBT table, its size, and the DSBT index. */
6112 unsigned *dsbt_table;
6113 unsigned dsbt_size, dsbt_index;
6114 /* Number of segments in this map. */
6115 Elf32_Word nsegs;
6116 /* The actual memory map. */
6117 struct target_loadseg segs[/*nsegs*/];
6118};
723b724b
MF
6119# define LINUX_LOADMAP PT_GETDSBT
6120# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6121# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6122# else
6123struct target_loadmap
6124{
6125 /* Protocol version number, must be zero. */
6126 Elf32_Half version;
6127 /* Number of segments in this map. */
6128 Elf32_Half nsegs;
6129 /* The actual memory map. */
6130 struct target_loadseg segs[/*nsegs*/];
6131};
6132# define LINUX_LOADMAP PTRACE_GETFDPIC
6133# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6134# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6135# endif
78d85199 6136
9da41fda
TBA
6137bool
6138linux_process_target::supports_read_loadmap ()
6139{
6140 return true;
6141}
6142
6143int
6144linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6145 unsigned char *myaddr, unsigned int len)
78d85199 6146{
0bfdf32f 6147 int pid = lwpid_of (current_thread);
78d85199
YQ
6148 int addr = -1;
6149 struct target_loadmap *data = NULL;
6150 unsigned int actual_length, copy_length;
6151
6152 if (strcmp (annex, "exec") == 0)
723b724b 6153 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 6154 else if (strcmp (annex, "interp") == 0)
723b724b 6155 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
6156 else
6157 return -1;
6158
723b724b 6159 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
6160 return -1;
6161
6162 if (data == NULL)
6163 return -1;
6164
6165 actual_length = sizeof (struct target_loadmap)
6166 + sizeof (struct target_loadseg) * data->nsegs;
6167
6168 if (offset < 0 || offset > actual_length)
6169 return -1;
6170
6171 copy_length = actual_length - offset < len ? actual_length - offset : len;
6172 memcpy (myaddr, (char *) data + offset, copy_length);
6173 return copy_length;
6174}
723b724b 6175#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 6176
bc8d3ae4
TBA
6177bool
6178linux_process_target::supports_catch_syscall ()
82075af2 6179{
a2885186 6180 return low_supports_catch_syscall ();
82075af2
JS
6181}
6182
9eedd27d
TBA
6183bool
6184linux_process_target::low_supports_catch_syscall ()
6185{
6186 return false;
6187}
6188
770d8f6a
TBA
6189CORE_ADDR
6190linux_process_target::read_pc (regcache *regcache)
219f2f23 6191{
bf9ae9d8 6192 if (!low_supports_breakpoints ())
219f2f23
PA
6193 return 0;
6194
bf9ae9d8 6195 return low_get_pc (regcache);
219f2f23
PA
6196}
6197
770d8f6a
TBA
6198void
6199linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
219f2f23 6200{
bf9ae9d8 6201 gdb_assert (low_supports_breakpoints ());
219f2f23 6202
bf9ae9d8 6203 low_set_pc (regcache, pc);
219f2f23
PA
6204}
6205
68119632
TBA
6206bool
6207linux_process_target::supports_thread_stopped ()
6208{
6209 return true;
6210}
6211
6212bool
6213linux_process_target::thread_stopped (thread_info *thread)
8336d594
PA
6214{
6215 return get_thread_lwp (thread)->stopped;
6216}
6217
ef980d65
PA
6218bool
6219linux_process_target::any_resumed ()
6220{
6221 bool any_resumed;
6222
6223 auto status_pending_p_any = [&] (thread_info *thread)
6224 {
6225 return status_pending_p_callback (thread, minus_one_ptid);
6226 };
6227
6228 auto not_stopped = [&] (thread_info *thread)
6229 {
6230 return not_stopped_callback (thread, minus_one_ptid);
6231 };
6232
6233 /* Find a resumed LWP, if any. */
6234 if (find_thread (status_pending_p_any) != NULL)
6235 any_resumed = 1;
6236 else if (find_thread (not_stopped) != NULL)
6237 any_resumed = 1;
6238 else
6239 any_resumed = 0;
6240
6241 return any_resumed;
6242}
6243
8336d594
PA
6244/* This exposes stop-all-threads functionality to other modules. */
6245
29e8dc09
TBA
6246void
6247linux_process_target::pause_all (bool freeze)
8336d594 6248{
7984d532
PA
6249 stop_all_lwps (freeze, NULL);
6250}
6251
6252/* This exposes unstop-all-threads functionality to other gdbserver
6253 modules. */
6254
29e8dc09
TBA
6255void
6256linux_process_target::unpause_all (bool unfreeze)
7984d532
PA
6257{
6258 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
6259}
6260
2268b414
JK
6261/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6262
6263static int
6264get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6265 CORE_ADDR *phdr_memaddr, int *num_phdr)
6266{
6267 char filename[PATH_MAX];
6268 int fd;
6269 const int auxv_size = is_elf64
6270 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6271 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6272
6273 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6274
6275 fd = open (filename, O_RDONLY);
6276 if (fd < 0)
6277 return 1;
6278
6279 *phdr_memaddr = 0;
6280 *num_phdr = 0;
6281 while (read (fd, buf, auxv_size) == auxv_size
6282 && (*phdr_memaddr == 0 || *num_phdr == 0))
6283 {
6284 if (is_elf64)
6285 {
6286 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6287
6288 switch (aux->a_type)
6289 {
6290 case AT_PHDR:
6291 *phdr_memaddr = aux->a_un.a_val;
6292 break;
6293 case AT_PHNUM:
6294 *num_phdr = aux->a_un.a_val;
6295 break;
6296 }
6297 }
6298 else
6299 {
6300 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6301
6302 switch (aux->a_type)
6303 {
6304 case AT_PHDR:
6305 *phdr_memaddr = aux->a_un.a_val;
6306 break;
6307 case AT_PHNUM:
6308 *num_phdr = aux->a_un.a_val;
6309 break;
6310 }
6311 }
6312 }
6313
6314 close (fd);
6315
6316 if (*phdr_memaddr == 0 || *num_phdr == 0)
6317 {
6318 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6319 "phdr_memaddr = %ld, phdr_num = %d",
6320 (long) *phdr_memaddr, *num_phdr);
6321 return 2;
6322 }
6323
6324 return 0;
6325}
6326
6327/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6328
6329static CORE_ADDR
6330get_dynamic (const int pid, const int is_elf64)
6331{
6332 CORE_ADDR phdr_memaddr, relocation;
db1ff28b 6333 int num_phdr, i;
2268b414 6334 unsigned char *phdr_buf;
db1ff28b 6335 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
2268b414
JK
6336
6337 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6338 return 0;
6339
6340 gdb_assert (num_phdr < 100); /* Basic sanity check. */
224c3ddb 6341 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
2268b414
JK
6342
6343 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6344 return 0;
6345
6346 /* Compute relocation: it is expected to be 0 for "regular" executables,
6347 non-zero for PIE ones. */
6348 relocation = -1;
db1ff28b
JK
6349 for (i = 0; relocation == -1 && i < num_phdr; i++)
6350 if (is_elf64)
6351 {
6352 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6353
6354 if (p->p_type == PT_PHDR)
6355 relocation = phdr_memaddr - p->p_vaddr;
6356 }
6357 else
6358 {
6359 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6360
6361 if (p->p_type == PT_PHDR)
6362 relocation = phdr_memaddr - p->p_vaddr;
6363 }
6364
2268b414
JK
6365 if (relocation == -1)
6366 {
e237a7e2
JK
6367 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6368 any real world executables, including PIE executables, have always
6369 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6370 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6371 or present DT_DEBUG anyway (fpc binaries are statically linked).
6372
6373 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6374
6375 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6376
2268b414
JK
6377 return 0;
6378 }
6379
db1ff28b
JK
6380 for (i = 0; i < num_phdr; i++)
6381 {
6382 if (is_elf64)
6383 {
6384 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6385
6386 if (p->p_type == PT_DYNAMIC)
6387 return p->p_vaddr + relocation;
6388 }
6389 else
6390 {
6391 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
2268b414 6392
db1ff28b
JK
6393 if (p->p_type == PT_DYNAMIC)
6394 return p->p_vaddr + relocation;
6395 }
6396 }
2268b414
JK
6397
6398 return 0;
6399}
6400
6401/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
6402 can be 0 if the inferior does not yet have the library list initialized.
6403 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6404 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
6405
6406static CORE_ADDR
6407get_r_debug (const int pid, const int is_elf64)
6408{
6409 CORE_ADDR dynamic_memaddr;
6410 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6411 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 6412 CORE_ADDR map = -1;
2268b414
JK
6413
6414 dynamic_memaddr = get_dynamic (pid, is_elf64);
6415 if (dynamic_memaddr == 0)
367ba2c2 6416 return map;
2268b414
JK
6417
6418 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6419 {
6420 if (is_elf64)
6421 {
6422 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
a738da3a 6423#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6424 union
6425 {
6426 Elf64_Xword map;
6427 unsigned char buf[sizeof (Elf64_Xword)];
6428 }
6429 rld_map;
a738da3a
MF
6430#endif
6431#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6432 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6433 {
6434 if (linux_read_memory (dyn->d_un.d_val,
6435 rld_map.buf, sizeof (rld_map.buf)) == 0)
6436 return rld_map.map;
6437 else
6438 break;
6439 }
75f62ce7 6440#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6441#ifdef DT_MIPS_RLD_MAP_REL
6442 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6443 {
6444 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6445 rld_map.buf, sizeof (rld_map.buf)) == 0)
6446 return rld_map.map;
6447 else
6448 break;
6449 }
6450#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6451
367ba2c2
MR
6452 if (dyn->d_tag == DT_DEBUG && map == -1)
6453 map = dyn->d_un.d_val;
2268b414
JK
6454
6455 if (dyn->d_tag == DT_NULL)
6456 break;
6457 }
6458 else
6459 {
6460 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
a738da3a 6461#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6462 union
6463 {
6464 Elf32_Word map;
6465 unsigned char buf[sizeof (Elf32_Word)];
6466 }
6467 rld_map;
a738da3a
MF
6468#endif
6469#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6470 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6471 {
6472 if (linux_read_memory (dyn->d_un.d_val,
6473 rld_map.buf, sizeof (rld_map.buf)) == 0)
6474 return rld_map.map;
6475 else
6476 break;
6477 }
75f62ce7 6478#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6479#ifdef DT_MIPS_RLD_MAP_REL
6480 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6481 {
6482 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6483 rld_map.buf, sizeof (rld_map.buf)) == 0)
6484 return rld_map.map;
6485 else
6486 break;
6487 }
6488#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6489
367ba2c2
MR
6490 if (dyn->d_tag == DT_DEBUG && map == -1)
6491 map = dyn->d_un.d_val;
2268b414
JK
6492
6493 if (dyn->d_tag == DT_NULL)
6494 break;
6495 }
6496
6497 dynamic_memaddr += dyn_size;
6498 }
6499
367ba2c2 6500 return map;
2268b414
JK
6501}
6502
6503/* Read one pointer from MEMADDR in the inferior. */
6504
6505static int
6506read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6507{
485f1ee4
PA
6508 int ret;
6509
6510 /* Go through a union so this works on either big or little endian
6511 hosts, when the inferior's pointer size is smaller than the size
6512 of CORE_ADDR. It is assumed the inferior's endianness is the
6513 same of the superior's. */
6514 union
6515 {
6516 CORE_ADDR core_addr;
6517 unsigned int ui;
6518 unsigned char uc;
6519 } addr;
6520
6521 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6522 if (ret == 0)
6523 {
6524 if (ptr_size == sizeof (CORE_ADDR))
6525 *ptr = addr.core_addr;
6526 else if (ptr_size == sizeof (unsigned int))
6527 *ptr = addr.ui;
6528 else
6529 gdb_assert_not_reached ("unhandled pointer size");
6530 }
6531 return ret;
2268b414
JK
6532}
6533
974387bb
TBA
6534bool
6535linux_process_target::supports_qxfer_libraries_svr4 ()
6536{
6537 return true;
6538}
6539
2268b414
JK
6540struct link_map_offsets
6541 {
6542 /* Offset and size of r_debug.r_version. */
6543 int r_version_offset;
6544
6545 /* Offset and size of r_debug.r_map. */
6546 int r_map_offset;
6547
8d56636a
MM
6548 /* Offset of r_debug_extended.r_next. */
6549 int r_next_offset;
6550
2268b414
JK
6551 /* Offset to l_addr field in struct link_map. */
6552 int l_addr_offset;
6553
6554 /* Offset to l_name field in struct link_map. */
6555 int l_name_offset;
6556
6557 /* Offset to l_ld field in struct link_map. */
6558 int l_ld_offset;
6559
6560 /* Offset to l_next field in struct link_map. */
6561 int l_next_offset;
6562
6563 /* Offset to l_prev field in struct link_map. */
6564 int l_prev_offset;
6565 };
6566
8d56636a
MM
6567static const link_map_offsets lmo_32bit_offsets =
6568 {
6569 0, /* r_version offset. */
6570 4, /* r_debug.r_map offset. */
6571 20, /* r_debug_extended.r_next. */
6572 0, /* l_addr offset in link_map. */
6573 4, /* l_name offset in link_map. */
6574 8, /* l_ld offset in link_map. */
6575 12, /* l_next offset in link_map. */
6576 16 /* l_prev offset in link_map. */
6577 };
6578
6579static const link_map_offsets lmo_64bit_offsets =
6580 {
6581 0, /* r_version offset. */
6582 8, /* r_debug.r_map offset. */
6583 40, /* r_debug_extended.r_next. */
6584 0, /* l_addr offset in link_map. */
6585 8, /* l_name offset in link_map. */
6586 16, /* l_ld offset in link_map. */
6587 24, /* l_next offset in link_map. */
6588 32 /* l_prev offset in link_map. */
6589 };
6590
6591/* Get the loaded shared libraries from one namespace. */
6592
6593static void
2733d9d5
MM
6594read_link_map (std::string &document, CORE_ADDR lmid, CORE_ADDR lm_addr,
6595 CORE_ADDR lm_prev, int ptr_size, const link_map_offsets *lmo)
8d56636a
MM
6596{
6597 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6598
6599 while (lm_addr
6600 && read_one_ptr (lm_addr + lmo->l_name_offset,
6601 &l_name, ptr_size) == 0
6602 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6603 &l_addr, ptr_size) == 0
6604 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6605 &l_ld, ptr_size) == 0
6606 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6607 &l_prev, ptr_size) == 0
6608 && read_one_ptr (lm_addr + lmo->l_next_offset,
6609 &l_next, ptr_size) == 0)
6610 {
6611 unsigned char libname[PATH_MAX];
6612
6613 if (lm_prev != l_prev)
6614 {
6615 warning ("Corrupted shared library list: 0x%s != 0x%s",
6616 paddress (lm_prev), paddress (l_prev));
6617 break;
6618 }
6619
ad10f44e
MM
6620 /* Not checking for error because reading may stop before we've got
6621 PATH_MAX worth of characters. */
6622 libname[0] = '\0';
6623 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6624 libname[sizeof (libname) - 1] = '\0';
6625 if (libname[0] != '\0')
8d56636a 6626 {
ad10f44e 6627 string_appendf (document, "<library name=\"");
de75275f 6628 xml_escape_text_append (document, (char *) libname);
ad10f44e 6629 string_appendf (document, "\" lm=\"0x%s\" l_addr=\"0x%s\" "
2733d9d5 6630 "l_ld=\"0x%s\" lmid=\"0x%s\"/>",
ad10f44e 6631 paddress (lm_addr), paddress (l_addr),
2733d9d5 6632 paddress (l_ld), paddress (lmid));
8d56636a
MM
6633 }
6634
6635 lm_prev = lm_addr;
6636 lm_addr = l_next;
6637 }
6638}
6639
fb723180 6640/* Construct qXfer:libraries-svr4:read reply. */
2268b414 6641
974387bb
TBA
6642int
6643linux_process_target::qxfer_libraries_svr4 (const char *annex,
6644 unsigned char *readbuf,
6645 unsigned const char *writebuf,
6646 CORE_ADDR offset, int len)
2268b414 6647{
fe978cb0 6648 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
6649 char filename[PATH_MAX];
6650 int pid, is_elf64;
214d508e 6651 unsigned int machine;
2733d9d5 6652 CORE_ADDR lmid = 0, lm_addr = 0, lm_prev = 0;
2268b414
JK
6653
6654 if (writebuf != NULL)
6655 return -2;
6656 if (readbuf == NULL)
6657 return -1;
6658
0bfdf32f 6659 pid = lwpid_of (current_thread);
2268b414 6660 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 6661 is_elf64 = elf_64_file_p (filename, &machine);
8d56636a
MM
6662 const link_map_offsets *lmo;
6663 int ptr_size;
6664 if (is_elf64)
6665 {
6666 lmo = &lmo_64bit_offsets;
6667 ptr_size = 8;
6668 }
6669 else
6670 {
6671 lmo = &lmo_32bit_offsets;
6672 ptr_size = 4;
6673 }
2268b414 6674
b1fbec62
GB
6675 while (annex[0] != '\0')
6676 {
6677 const char *sep;
6678 CORE_ADDR *addrp;
da4ae14a 6679 int name_len;
2268b414 6680
b1fbec62
GB
6681 sep = strchr (annex, '=');
6682 if (sep == NULL)
6683 break;
0c5bf5a9 6684
da4ae14a 6685 name_len = sep - annex;
2733d9d5
MM
6686 if (name_len == 4 && startswith (annex, "lmid"))
6687 addrp = &lmid;
6688 else if (name_len == 5 && startswith (annex, "start"))
b1fbec62 6689 addrp = &lm_addr;
da4ae14a 6690 else if (name_len == 4 && startswith (annex, "prev"))
b1fbec62
GB
6691 addrp = &lm_prev;
6692 else
6693 {
6694 annex = strchr (sep, ';');
6695 if (annex == NULL)
6696 break;
6697 annex++;
6698 continue;
6699 }
6700
6701 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 6702 }
b1fbec62 6703
8d56636a
MM
6704 std::string document = "<library-list-svr4 version=\"1.0\"";
6705
6706 /* When the starting LM_ADDR is passed in the annex, only traverse that
2733d9d5 6707 namespace, which is assumed to be identified by LMID.
8d56636a
MM
6708
6709 Otherwise, start with R_DEBUG and traverse all namespaces we find. */
6710 if (lm_addr != 0)
ad10f44e
MM
6711 {
6712 document += ">";
2733d9d5 6713 read_link_map (document, lmid, lm_addr, lm_prev, ptr_size, lmo);
ad10f44e 6714 }
8d56636a 6715 else
2268b414 6716 {
8d56636a
MM
6717 if (lm_prev != 0)
6718 warning ("ignoring prev=0x%s without start", paddress (lm_prev));
b1fbec62 6719
2733d9d5
MM
6720 /* We could interpret LMID as 'provide only the libraries for this
6721 namespace' but GDB is currently only providing lmid, start, and
6722 prev, or nothing. */
6723 if (lmid != 0)
6724 warning ("ignoring lmid=0x%s without start", paddress (lmid));
6725
8d56636a
MM
6726 CORE_ADDR r_debug = priv->r_debug;
6727 if (r_debug == 0)
6728 r_debug = priv->r_debug = get_r_debug (pid, is_elf64);
b1fbec62
GB
6729
6730 /* We failed to find DT_DEBUG. Such situation will not change
6731 for this inferior - do not retry it. Report it to GDB as
6732 E01, see for the reasons at the GDB solib-svr4.c side. */
8d56636a 6733 if (r_debug == (CORE_ADDR) -1)
b1fbec62
GB
6734 return -1;
6735
ad10f44e
MM
6736 /* Terminate the header if we end up with an empty list. */
6737 if (r_debug == 0)
6738 document += ">";
6739
8d56636a 6740 while (r_debug != 0)
2268b414 6741 {
8d56636a
MM
6742 int r_version = 0;
6743 if (linux_read_memory (r_debug + lmo->r_version_offset,
b1fbec62 6744 (unsigned char *) &r_version,
8d56636a
MM
6745 sizeof (r_version)) != 0)
6746 {
6747 warning ("unable to read r_version from 0x%s",
6748 paddress (r_debug + lmo->r_version_offset));
6749 break;
6750 }
6751
6752 if (r_version < 1)
b1fbec62
GB
6753 {
6754 warning ("unexpected r_debug version %d", r_version);
8d56636a 6755 break;
b1fbec62 6756 }
8d56636a
MM
6757
6758 if (read_one_ptr (r_debug + lmo->r_map_offset, &lm_addr,
6759 ptr_size) != 0)
b1fbec62 6760 {
8d56636a
MM
6761 warning ("unable to read r_map from 0x%s",
6762 paddress (r_debug + lmo->r_map_offset));
6763 break;
b1fbec62 6764 }
2268b414 6765
ad10f44e
MM
6766 /* We read the entire namespace. */
6767 lm_prev = 0;
6768
6769 /* The first entry corresponds to the main executable unless the
6770 dynamic loader was loaded late by a static executable. But
6771 in such case the main executable does not have PT_DYNAMIC
6772 present and we would not have gotten here. */
6773 if (r_debug == priv->r_debug)
6774 {
6775 if (lm_addr != 0)
6776 string_appendf (document, " main-lm=\"0x%s\">",
6777 paddress (lm_addr));
6778 else
6779 document += ">";
6780
6781 lm_prev = lm_addr;
6782 if (read_one_ptr (lm_addr + lmo->l_next_offset,
6783 &lm_addr, ptr_size) != 0)
6784 {
6785 warning ("unable to read l_next from 0x%s",
6786 paddress (lm_addr + lmo->l_next_offset));
6787 break;
6788 }
6789 }
6790
2733d9d5 6791 read_link_map (document, r_debug, lm_addr, lm_prev, ptr_size, lmo);
b1fbec62 6792
8d56636a
MM
6793 if (r_version < 2)
6794 break;
b1fbec62 6795
8d56636a
MM
6796 if (read_one_ptr (r_debug + lmo->r_next_offset, &r_debug,
6797 ptr_size) != 0)
2268b414 6798 {
8d56636a
MM
6799 warning ("unable to read r_next from 0x%s",
6800 paddress (r_debug + lmo->r_next_offset));
6801 break;
d878444c 6802 }
0afae3cf 6803 }
2268b414
JK
6804 }
6805
ad10f44e 6806 document += "</library-list-svr4>";
b1fbec62 6807
f6e8a41e 6808 int document_len = document.length ();
2268b414
JK
6809 if (offset < document_len)
6810 document_len -= offset;
6811 else
6812 document_len = 0;
6813 if (len > document_len)
6814 len = document_len;
6815
f6e8a41e 6816 memcpy (readbuf, document.data () + offset, len);
2268b414
JK
6817
6818 return len;
6819}
6820
9accd112
MM
6821#ifdef HAVE_LINUX_BTRACE
6822
8263b346
TBA
6823bool
6824linux_process_target::supports_btrace ()
6825{
6826 return true;
6827}
6828
79597bdd 6829btrace_target_info *
696c0d5e 6830linux_process_target::enable_btrace (thread_info *tp,
79597bdd
TBA
6831 const btrace_config *conf)
6832{
696c0d5e 6833 return linux_enable_btrace (tp->id, conf);
79597bdd
TBA
6834}
6835
969c39fb 6836/* See to_disable_btrace target method. */
9accd112 6837
79597bdd
TBA
6838int
6839linux_process_target::disable_btrace (btrace_target_info *tinfo)
969c39fb
MM
6840{
6841 enum btrace_error err;
6842
6843 err = linux_disable_btrace (tinfo);
6844 return (err == BTRACE_ERR_NONE ? 0 : -1);
6845}
6846
bc504a31 6847/* Encode an Intel Processor Trace configuration. */
b20a6524
MM
6848
6849static void
873a185b 6850linux_low_encode_pt_config (std::string *buffer,
b20a6524
MM
6851 const struct btrace_data_pt_config *config)
6852{
873a185b 6853 *buffer += "<pt-config>\n";
b20a6524
MM
6854
6855 switch (config->cpu.vendor)
6856 {
6857 case CV_INTEL:
873a185b
TT
6858 string_xml_appendf (*buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6859 "model=\"%u\" stepping=\"%u\"/>\n",
6860 config->cpu.family, config->cpu.model,
6861 config->cpu.stepping);
b20a6524
MM
6862 break;
6863
6864 default:
6865 break;
6866 }
6867
873a185b 6868 *buffer += "</pt-config>\n";
b20a6524
MM
6869}
6870
6871/* Encode a raw buffer. */
6872
6873static void
873a185b 6874linux_low_encode_raw (std::string *buffer, const gdb_byte *data,
b20a6524
MM
6875 unsigned int size)
6876{
6877 if (size == 0)
6878 return;
6879
268a13a5 6880 /* We use hex encoding - see gdbsupport/rsp-low.h. */
873a185b 6881 *buffer += "<raw>\n";
b20a6524
MM
6882
6883 while (size-- > 0)
6884 {
6885 char elem[2];
6886
6887 elem[0] = tohex ((*data >> 4) & 0xf);
6888 elem[1] = tohex (*data++ & 0xf);
6889
8b2d5ef8 6890 buffer->append (elem, 2);
b20a6524
MM
6891 }
6892
873a185b 6893 *buffer += "</raw>\n";
b20a6524
MM
6894}
6895
969c39fb
MM
6896/* See to_read_btrace target method. */
6897
79597bdd
TBA
6898int
6899linux_process_target::read_btrace (btrace_target_info *tinfo,
873a185b 6900 std::string *buffer,
79597bdd 6901 enum btrace_read_type type)
9accd112 6902{
734b0e4b 6903 struct btrace_data btrace;
969c39fb 6904 enum btrace_error err;
9accd112 6905
969c39fb
MM
6906 err = linux_read_btrace (&btrace, tinfo, type);
6907 if (err != BTRACE_ERR_NONE)
6908 {
6909 if (err == BTRACE_ERR_OVERFLOW)
873a185b 6910 *buffer += "E.Overflow.";
969c39fb 6911 else
873a185b 6912 *buffer += "E.Generic Error.";
969c39fb 6913
8dcc53b3 6914 return -1;
969c39fb 6915 }
9accd112 6916
734b0e4b
MM
6917 switch (btrace.format)
6918 {
6919 case BTRACE_FORMAT_NONE:
873a185b 6920 *buffer += "E.No Trace.";
8dcc53b3 6921 return -1;
734b0e4b
MM
6922
6923 case BTRACE_FORMAT_BTS:
873a185b
TT
6924 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6925 *buffer += "<btrace version=\"1.0\">\n";
9accd112 6926
46f29a9a 6927 for (const btrace_block &block : *btrace.variant.bts.blocks)
873a185b
TT
6928 string_xml_appendf (*buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6929 paddress (block.begin), paddress (block.end));
9accd112 6930
873a185b 6931 *buffer += "</btrace>\n";
734b0e4b
MM
6932 break;
6933
b20a6524 6934 case BTRACE_FORMAT_PT:
873a185b
TT
6935 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6936 *buffer += "<btrace version=\"1.0\">\n";
6937 *buffer += "<pt>\n";
b20a6524
MM
6938
6939 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
9accd112 6940
b20a6524
MM
6941 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6942 btrace.variant.pt.size);
6943
873a185b
TT
6944 *buffer += "</pt>\n";
6945 *buffer += "</btrace>\n";
b20a6524
MM
6946 break;
6947
6948 default:
873a185b 6949 *buffer += "E.Unsupported Trace Format.";
8dcc53b3 6950 return -1;
734b0e4b 6951 }
969c39fb
MM
6952
6953 return 0;
9accd112 6954}
f4abbc16
MM
6955
6956/* See to_btrace_conf target method. */
6957
79597bdd
TBA
6958int
6959linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
873a185b 6960 std::string *buffer)
f4abbc16
MM
6961{
6962 const struct btrace_config *conf;
6963
873a185b
TT
6964 *buffer += "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n";
6965 *buffer += "<btrace-conf version=\"1.0\">\n";
f4abbc16
MM
6966
6967 conf = linux_btrace_conf (tinfo);
6968 if (conf != NULL)
6969 {
6970 switch (conf->format)
6971 {
6972 case BTRACE_FORMAT_NONE:
6973 break;
6974
6975 case BTRACE_FORMAT_BTS:
873a185b
TT
6976 string_xml_appendf (*buffer, "<bts");
6977 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->bts.size);
6978 string_xml_appendf (*buffer, " />\n");
f4abbc16 6979 break;
b20a6524
MM
6980
6981 case BTRACE_FORMAT_PT:
873a185b
TT
6982 string_xml_appendf (*buffer, "<pt");
6983 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->pt.size);
6984 string_xml_appendf (*buffer, "/>\n");
b20a6524 6985 break;
f4abbc16
MM
6986 }
6987 }
6988
873a185b 6989 *buffer += "</btrace-conf>\n";
f4abbc16
MM
6990 return 0;
6991}
9accd112
MM
6992#endif /* HAVE_LINUX_BTRACE */
6993
7b669087
GB
6994/* See nat/linux-nat.h. */
6995
6996ptid_t
6997current_lwp_ptid (void)
6998{
6999 return ptid_of (current_thread);
7000}
7001
7f63b89b
TBA
7002const char *
7003linux_process_target::thread_name (ptid_t thread)
7004{
7005 return linux_proc_tid_get_name (thread);
7006}
7007
7008#if USE_THREAD_DB
7009bool
7010linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7011 int *handle_len)
7012{
7013 return thread_db_thread_handle (ptid, handle, handle_len);
7014}
7015#endif
7016
7b961964
SM
7017thread_info *
7018linux_process_target::thread_pending_parent (thread_info *thread)
7019{
7020 lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
7021
7022 if (parent == nullptr)
7023 return nullptr;
7024
7025 return get_lwp_thread (parent);
7026}
7027
df5ad102 7028thread_info *
faf44a31
PA
7029linux_process_target::thread_pending_child (thread_info *thread,
7030 target_waitkind *kind)
df5ad102 7031{
faf44a31 7032 lwp_info *child = get_thread_lwp (thread)->pending_child (kind);
df5ad102
SM
7033
7034 if (child == nullptr)
7035 return nullptr;
7036
7037 return get_lwp_thread (child);
7038}
7039
276d4552
YQ
7040/* Default implementation of linux_target_ops method "set_pc" for
7041 32-bit pc register which is literally named "pc". */
7042
7043void
7044linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7045{
7046 uint32_t newpc = pc;
7047
7048 supply_register_by_name (regcache, "pc", &newpc);
7049}
7050
7051/* Default implementation of linux_target_ops method "get_pc" for
7052 32-bit pc register which is literally named "pc". */
7053
7054CORE_ADDR
7055linux_get_pc_32bit (struct regcache *regcache)
7056{
7057 uint32_t pc;
7058
7059 collect_register_by_name (regcache, "pc", &pc);
c058728c 7060 threads_debug_printf ("stop pc is 0x%" PRIx32, pc);
276d4552
YQ
7061 return pc;
7062}
7063
6f69e520
YQ
7064/* Default implementation of linux_target_ops method "set_pc" for
7065 64-bit pc register which is literally named "pc". */
7066
7067void
7068linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7069{
7070 uint64_t newpc = pc;
7071
7072 supply_register_by_name (regcache, "pc", &newpc);
7073}
7074
7075/* Default implementation of linux_target_ops method "get_pc" for
7076 64-bit pc register which is literally named "pc". */
7077
7078CORE_ADDR
7079linux_get_pc_64bit (struct regcache *regcache)
7080{
7081 uint64_t pc;
7082
7083 collect_register_by_name (regcache, "pc", &pc);
c058728c 7084 threads_debug_printf ("stop pc is 0x%" PRIx64, pc);
6f69e520
YQ
7085 return pc;
7086}
7087
0570503d 7088/* See linux-low.h. */
974c89e0 7089
0570503d 7090int
43e5fbd8 7091linux_get_auxv (int pid, int wordsize, CORE_ADDR match, CORE_ADDR *valp)
974c89e0
AH
7092{
7093 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7094 int offset = 0;
7095
7096 gdb_assert (wordsize == 4 || wordsize == 8);
7097
43e5fbd8
TJB
7098 while (the_target->read_auxv (pid, offset, data, 2 * wordsize)
7099 == 2 * wordsize)
974c89e0
AH
7100 {
7101 if (wordsize == 4)
7102 {
0570503d 7103 uint32_t *data_p = (uint32_t *) data;
974c89e0 7104 if (data_p[0] == match)
0570503d
PFC
7105 {
7106 *valp = data_p[1];
7107 return 1;
7108 }
974c89e0
AH
7109 }
7110 else
7111 {
0570503d 7112 uint64_t *data_p = (uint64_t *) data;
974c89e0 7113 if (data_p[0] == match)
0570503d
PFC
7114 {
7115 *valp = data_p[1];
7116 return 1;
7117 }
974c89e0
AH
7118 }
7119
7120 offset += 2 * wordsize;
7121 }
7122
7123 return 0;
7124}
7125
7126/* See linux-low.h. */
7127
7128CORE_ADDR
43e5fbd8 7129linux_get_hwcap (int pid, int wordsize)
974c89e0 7130{
0570503d 7131 CORE_ADDR hwcap = 0;
43e5fbd8 7132 linux_get_auxv (pid, wordsize, AT_HWCAP, &hwcap);
0570503d 7133 return hwcap;
974c89e0
AH
7134}
7135
7136/* See linux-low.h. */
7137
7138CORE_ADDR
43e5fbd8 7139linux_get_hwcap2 (int pid, int wordsize)
974c89e0 7140{
0570503d 7141 CORE_ADDR hwcap2 = 0;
43e5fbd8 7142 linux_get_auxv (pid, wordsize, AT_HWCAP2, &hwcap2);
0570503d 7143 return hwcap2;
974c89e0 7144}
6f69e520 7145
3aee8918
PA
7146#ifdef HAVE_LINUX_REGSETS
7147void
7148initialize_regsets_info (struct regsets_info *info)
7149{
7150 for (info->num_regsets = 0;
7151 info->regsets[info->num_regsets].size >= 0;
7152 info->num_regsets++)
7153 ;
3aee8918
PA
7154}
7155#endif
7156
da6d8c04
DJ
7157void
7158initialize_low (void)
7159{
bd99dc85 7160 struct sigaction sigchld_action;
dd373349 7161
bd99dc85 7162 memset (&sigchld_action, 0, sizeof (sigchld_action));
ef0478f6 7163 set_target_ops (the_linux_target);
dd373349 7164
aa7c7447 7165 linux_ptrace_init_warnings ();
1b919490 7166 linux_proc_init_warnings ();
bd99dc85
PA
7167
7168 sigchld_action.sa_handler = sigchld_handler;
7169 sigemptyset (&sigchld_action.sa_mask);
7170 sigchld_action.sa_flags = SA_RESTART;
7171 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
7172
7173 initialize_low_arch ();
89245bc0
DB
7174
7175 linux_check_ptrace_features ();
da6d8c04 7176}