]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdbserver/linux-low.cc
Don't resume new threads if scheduler-locking is in effect
[thirdparty/binutils-gdb.git] / gdbserver / linux-low.cc
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
213516ef 2 Copyright (C) 1995-2023 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
268a13a5 22#include "gdbsupport/agent.h"
de0d863e 23#include "tdesc.h"
cdc8e9b2
JB
24#include "gdbsupport/event-loop.h"
25#include "gdbsupport/event-pipe.h"
268a13a5
TT
26#include "gdbsupport/rsp-low.h"
27#include "gdbsupport/signals-state-save-restore.h"
96d7229d
LM
28#include "nat/linux-nat.h"
29#include "nat/linux-waitpid.h"
268a13a5 30#include "gdbsupport/gdb_wait.h"
5826e159 31#include "nat/gdb_ptrace.h"
125f8a3d
GB
32#include "nat/linux-ptrace.h"
33#include "nat/linux-procfs.h"
8cc73a39 34#include "nat/linux-personality.h"
da6d8c04
DJ
35#include <signal.h>
36#include <sys/ioctl.h>
37#include <fcntl.h>
0a30fbc4 38#include <unistd.h>
fd500816 39#include <sys/syscall.h>
f9387fc3 40#include <sched.h>
07e059b5
VP
41#include <ctype.h>
42#include <pwd.h>
43#include <sys/types.h>
44#include <dirent.h>
53ce3c39 45#include <sys/stat.h>
efcbbd14 46#include <sys/vfs.h>
1570b33e 47#include <sys/uio.h>
268a13a5 48#include "gdbsupport/filestuff.h"
c144c7a0 49#include "tracepoint.h"
276d4552 50#include <inttypes.h>
268a13a5 51#include "gdbsupport/common-inferior.h"
2090129c 52#include "nat/fork-inferior.h"
268a13a5 53#include "gdbsupport/environ.h"
21987b9c 54#include "gdbsupport/gdb-sigmask.h"
268a13a5 55#include "gdbsupport/scoped_restore.h"
957f3f49
DE
56#ifndef ELFMAG0
57/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
58 then ELFMAG0 will have been defined. If it didn't get included by
59 gdb_proc_service.h then including it will likely introduce a duplicate
60 definition of elf_fpregset_t. */
61#include <elf.h>
62#endif
14d2069a 63#include "nat/linux-namespaces.h"
efcbbd14 64
fd462a61
DJ
65#ifndef O_LARGEFILE
66#define O_LARGEFILE 0
67#endif
1a981360 68
69f4c9cc
AH
69#ifndef AT_HWCAP2
70#define AT_HWCAP2 26
71#endif
72
db0dfaa0
LM
73/* Some targets did not define these ptrace constants from the start,
74 so gdbserver defines them locally here. In the future, these may
75 be removed after they are added to asm/ptrace.h. */
76#if !(defined(PT_TEXT_ADDR) \
77 || defined(PT_DATA_ADDR) \
78 || defined(PT_TEXT_END_ADDR))
79#if defined(__mcoldfire__)
80/* These are still undefined in 3.10 kernels. */
81#define PT_TEXT_ADDR 49*4
82#define PT_DATA_ADDR 50*4
83#define PT_TEXT_END_ADDR 51*4
db0dfaa0
LM
84/* These are still undefined in 3.10 kernels. */
85#elif defined(__TMS320C6X__)
86#define PT_TEXT_ADDR (0x10000*4)
87#define PT_DATA_ADDR (0x10004*4)
88#define PT_TEXT_END_ADDR (0x10008*4)
89#endif
90#endif
91
5203ae1e
TBA
92#if (defined(__UCLIBC__) \
93 && defined(HAS_NOMMU) \
94 && defined(PT_TEXT_ADDR) \
95 && defined(PT_DATA_ADDR) \
96 && defined(PT_TEXT_END_ADDR))
97#define SUPPORTS_READ_OFFSETS
98#endif
99
9accd112 100#ifdef HAVE_LINUX_BTRACE
125f8a3d 101# include "nat/linux-btrace.h"
268a13a5 102# include "gdbsupport/btrace-common.h"
9accd112
MM
103#endif
104
8365dcf5
TJB
105#ifndef HAVE_ELF32_AUXV_T
106/* Copied from glibc's elf.h. */
107typedef struct
108{
109 uint32_t a_type; /* Entry type */
110 union
111 {
112 uint32_t a_val; /* Integer value */
113 /* We use to have pointer elements added here. We cannot do that,
114 though, since it does not work when using 32-bit definitions
115 on 64-bit platforms and vice versa. */
116 } a_un;
117} Elf32_auxv_t;
118#endif
119
120#ifndef HAVE_ELF64_AUXV_T
121/* Copied from glibc's elf.h. */
122typedef struct
123{
124 uint64_t a_type; /* Entry type */
125 union
126 {
127 uint64_t a_val; /* Integer value */
128 /* We use to have pointer elements added here. We cannot do that,
129 though, since it does not work when using 32-bit definitions
130 on 64-bit platforms and vice versa. */
131 } a_un;
132} Elf64_auxv_t;
133#endif
134
ded48a5e
YQ
135/* Does the current host support PTRACE_GETREGSET? */
136int have_ptrace_getregset = -1;
137
8a841a35
PA
138/* Return TRUE if THREAD is the leader thread of the process. */
139
140static bool
141is_leader (thread_info *thread)
142{
143 ptid_t ptid = ptid_of (thread);
144 return ptid.pid () == ptid.lwp ();
145}
146
48989498
PA
147/* Return true if we should report thread exit events to GDB, for
148 THR. */
149
150static bool
151report_exit_events_for (thread_info *thr)
152{
153 client_state &cs = get_client_state ();
154
155 return (cs.report_thread_events
156 || (thr->thread_options & GDB_THREAD_OPTION_EXIT) != 0);
157}
158
cff068da
GB
159/* LWP accessors. */
160
161/* See nat/linux-nat.h. */
162
163ptid_t
164ptid_of_lwp (struct lwp_info *lwp)
165{
166 return ptid_of (get_lwp_thread (lwp));
167}
168
169/* See nat/linux-nat.h. */
170
4b134ca1
GB
171void
172lwp_set_arch_private_info (struct lwp_info *lwp,
173 struct arch_lwp_info *info)
174{
175 lwp->arch_private = info;
176}
177
178/* See nat/linux-nat.h. */
179
180struct arch_lwp_info *
181lwp_arch_private_info (struct lwp_info *lwp)
182{
183 return lwp->arch_private;
184}
185
186/* See nat/linux-nat.h. */
187
cff068da
GB
188int
189lwp_is_stopped (struct lwp_info *lwp)
190{
191 return lwp->stopped;
192}
193
194/* See nat/linux-nat.h. */
195
196enum target_stop_reason
197lwp_stop_reason (struct lwp_info *lwp)
198{
199 return lwp->stop_reason;
200}
201
0e00e962
AA
202/* See nat/linux-nat.h. */
203
204int
205lwp_is_stepping (struct lwp_info *lwp)
206{
207 return lwp->stepping;
208}
209
05044653
PA
210/* A list of all unknown processes which receive stop signals. Some
211 other process will presumably claim each of these as forked
212 children momentarily. */
24a09b5f 213
05044653
PA
214struct simple_pid_list
215{
216 /* The process ID. */
217 int pid;
218
219 /* The status as reported by waitpid. */
220 int status;
221
222 /* Next in chain. */
223 struct simple_pid_list *next;
224};
05c309a8 225static struct simple_pid_list *stopped_pids;
05044653
PA
226
227/* Trivial list manipulation functions to keep track of a list of new
228 stopped processes. */
229
230static void
231add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
232{
8d749320 233 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
05044653
PA
234
235 new_pid->pid = pid;
236 new_pid->status = status;
237 new_pid->next = *listp;
238 *listp = new_pid;
239}
240
241static int
242pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
243{
244 struct simple_pid_list **p;
245
246 for (p = listp; *p != NULL; p = &(*p)->next)
247 if ((*p)->pid == pid)
248 {
249 struct simple_pid_list *next = (*p)->next;
250
251 *statusp = (*p)->status;
252 xfree (*p);
253 *p = next;
254 return 1;
255 }
256 return 0;
257}
24a09b5f 258
bde24c0a
PA
259enum stopping_threads_kind
260 {
261 /* Not stopping threads presently. */
262 NOT_STOPPING_THREADS,
263
264 /* Stopping threads. */
265 STOPPING_THREADS,
266
267 /* Stopping and suspending threads. */
268 STOPPING_AND_SUSPENDING_THREADS
269 };
270
271/* This is set while stop_all_lwps is in effect. */
6bd434d6 272static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
273
274/* FIXME make into a target method? */
24a09b5f 275int using_threads = 1;
24a09b5f 276
fa593d66
PA
277/* True if we're presently stabilizing threads (moving them out of
278 jump pads). */
279static int stabilizing_threads;
280
f50bf8e5 281static void unsuspend_all_lwps (struct lwp_info *except);
95954743 282static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
00db26fa 283static int lwp_is_marked_dead (struct lwp_info *lwp);
d50171e4 284static int kill_lwp (unsigned long lwpid, int signo);
863d01bd 285static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
ece66d65 286static int linux_low_ptrace_options (int attached);
ced2dffb 287static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
d50171e4 288
582511be
PA
289/* When the event-loop is doing a step-over, this points at the thread
290 being stepped. */
6bd434d6 291static ptid_t step_over_bkpt;
582511be 292
bf9ae9d8
TBA
293bool
294linux_process_target::low_supports_breakpoints ()
295{
296 return false;
297}
d50171e4 298
bf9ae9d8
TBA
299CORE_ADDR
300linux_process_target::low_get_pc (regcache *regcache)
301{
302 return 0;
303}
304
305void
306linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
d50171e4 307{
bf9ae9d8 308 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
d50171e4 309}
0d62e5e8 310
7582c77c
TBA
311std::vector<CORE_ADDR>
312linux_process_target::low_get_next_pcs (regcache *regcache)
313{
314 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
315 "implemented");
316}
317
d4807ea2
TBA
318int
319linux_process_target::low_decr_pc_after_break ()
320{
321 return 0;
322}
323
c2d6af84
PA
324/* True if LWP is stopped in its stepping range. */
325
326static int
327lwp_in_step_range (struct lwp_info *lwp)
328{
329 CORE_ADDR pc = lwp->stop_pc;
330
331 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
332}
333
cdc8e9b2
JB
334/* The event pipe registered as a waitable file in the event loop. */
335static event_pipe linux_event_pipe;
bd99dc85
PA
336
337/* True if we're currently in async mode. */
cdc8e9b2 338#define target_is_async_p() (linux_event_pipe.is_open ())
bd99dc85 339
02fc4de7 340static void send_sigstop (struct lwp_info *lwp);
bd99dc85 341
d0722149
DE
342/* Return non-zero if HEADER is a 64-bit ELF file. */
343
344static int
214d508e 345elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 346{
214d508e
L
347 if (header->e_ident[EI_MAG0] == ELFMAG0
348 && header->e_ident[EI_MAG1] == ELFMAG1
349 && header->e_ident[EI_MAG2] == ELFMAG2
350 && header->e_ident[EI_MAG3] == ELFMAG3)
351 {
352 *machine = header->e_machine;
353 return header->e_ident[EI_CLASS] == ELFCLASS64;
354
355 }
356 *machine = EM_NONE;
357 return -1;
d0722149
DE
358}
359
360/* Return non-zero if FILE is a 64-bit ELF file,
361 zero if the file is not a 64-bit ELF file,
362 and -1 if the file is not accessible or doesn't exist. */
363
be07f1a2 364static int
214d508e 365elf_64_file_p (const char *file, unsigned int *machine)
d0722149 366{
957f3f49 367 Elf64_Ehdr header;
d0722149
DE
368 int fd;
369
370 fd = open (file, O_RDONLY);
371 if (fd < 0)
372 return -1;
373
374 if (read (fd, &header, sizeof (header)) != sizeof (header))
375 {
376 close (fd);
377 return 0;
378 }
379 close (fd);
380
214d508e 381 return elf_64_header_p (&header, machine);
d0722149
DE
382}
383
be07f1a2
PA
384/* Accepts an integer PID; Returns true if the executable PID is
385 running is a 64-bit ELF file.. */
386
387int
214d508e 388linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 389{
d8d2a3ee 390 char file[PATH_MAX];
be07f1a2
PA
391
392 sprintf (file, "/proc/%d/exe", pid);
214d508e 393 return elf_64_file_p (file, machine);
be07f1a2
PA
394}
395
fd000fb3
TBA
396void
397linux_process_target::delete_lwp (lwp_info *lwp)
bd99dc85 398{
fa96cb38
PA
399 struct thread_info *thr = get_lwp_thread (lwp);
400
c058728c 401 threads_debug_printf ("deleting %ld", lwpid_of (thr));
fa96cb38
PA
402
403 remove_thread (thr);
466eecee 404
fd000fb3 405 low_delete_thread (lwp->arch_private);
466eecee 406
013e3554 407 delete lwp;
bd99dc85
PA
408}
409
fd000fb3
TBA
410void
411linux_process_target::low_delete_thread (arch_lwp_info *info)
412{
413 /* Default implementation should be overridden if architecture-specific
414 info is being used. */
415 gdb_assert (info == nullptr);
416}
95954743 417
421490af
PA
418/* Open the /proc/PID/mem file for PROC. */
419
420static void
421open_proc_mem_file (process_info *proc)
422{
423 gdb_assert (proc->priv->mem_fd == -1);
424
425 char filename[64];
426 xsnprintf (filename, sizeof filename, "/proc/%d/mem", proc->pid);
427
428 proc->priv->mem_fd
429 = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
430}
431
fd000fb3 432process_info *
421490af 433linux_process_target::add_linux_process_no_mem_file (int pid, int attached)
95954743
PA
434{
435 struct process_info *proc;
436
95954743 437 proc = add_process (pid, attached);
8d749320 438 proc->priv = XCNEW (struct process_info_private);
95954743 439
fd000fb3 440 proc->priv->arch_private = low_new_process ();
421490af
PA
441 proc->priv->mem_fd = -1;
442
443 return proc;
444}
445
aa5ca48f 446
421490af
PA
447process_info *
448linux_process_target::add_linux_process (int pid, int attached)
449{
450 process_info *proc = add_linux_process_no_mem_file (pid, attached);
451 open_proc_mem_file (proc);
95954743
PA
452 return proc;
453}
454
f551c8ef
SM
455void
456linux_process_target::remove_linux_process (process_info *proc)
457{
458 if (proc->priv->mem_fd >= 0)
459 close (proc->priv->mem_fd);
460
461 this->low_delete_process (proc->priv->arch_private);
462
463 xfree (proc->priv);
464 proc->priv = nullptr;
465
466 remove_process (proc);
467}
468
fd000fb3
TBA
469arch_process_info *
470linux_process_target::low_new_process ()
471{
472 return nullptr;
473}
474
475void
476linux_process_target::low_delete_process (arch_process_info *info)
477{
478 /* Default implementation must be overridden if architecture-specific
479 info exists. */
480 gdb_assert (info == nullptr);
481}
482
483void
484linux_process_target::low_new_fork (process_info *parent, process_info *child)
485{
486 /* Nop. */
487}
488
797bcff5
TBA
489void
490linux_process_target::arch_setup_thread (thread_info *thread)
94585166 491{
24583e45
TBA
492 scoped_restore_current_thread restore_thread;
493 switch_to_thread (thread);
94585166 494
797bcff5 495 low_arch_setup ();
94585166
DB
496}
497
d16f3f6c
TBA
498int
499linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
500 int wstat)
24a09b5f 501{
c12a5089 502 client_state &cs = get_client_state ();
94585166 503 struct lwp_info *event_lwp = *orig_event_lwp;
89a5711c 504 int event = linux_ptrace_get_extended_event (wstat);
de0d863e 505 struct thread_info *event_thr = get_lwp_thread (event_lwp);
24a09b5f 506
183be222 507 gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
65706a29 508
82075af2
JS
509 /* All extended events we currently use are mid-syscall. Only
510 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
511 you have to be using PTRACE_SEIZE to get that. */
512 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
513
c269dbdb
DB
514 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
515 || (event == PTRACE_EVENT_CLONE))
24a09b5f
DJ
516 {
517 unsigned long new_pid;
05044653 518 int ret, status;
24a09b5f 519
de0d863e 520 /* Get the pid of the new lwp. */
d86d4aaf 521 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 522 &new_pid);
24a09b5f
DJ
523
524 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 525 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
526 {
527 /* The new child has a pending SIGSTOP. We can't affect it until it
528 hits the SIGSTOP, but we're already attached. */
529
97438e3f 530 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
531
532 if (ret == -1)
533 perror_with_name ("waiting for new child");
534 else if (ret != new_pid)
535 warning ("wait returned unexpected PID %d", ret);
da5898ce 536 else if (!WIFSTOPPED (status))
24a09b5f
DJ
537 warning ("wait returned unexpected status 0x%x", status);
538 }
539
393a6b59 540 if (debug_threads)
de0d863e 541 {
393a6b59
PA
542 debug_printf ("HEW: Got %s event from LWP %ld, new child is %ld\n",
543 (event == PTRACE_EVENT_FORK ? "fork"
544 : event == PTRACE_EVENT_VFORK ? "vfork"
545 : event == PTRACE_EVENT_CLONE ? "clone"
546 : "???"),
547 ptid_of (event_thr).lwp (),
548 new_pid);
549 }
550
551 ptid_t child_ptid = (event != PTRACE_EVENT_CLONE
552 ? ptid_t (new_pid, new_pid)
553 : ptid_t (ptid_of (event_thr).pid (), new_pid));
de0d863e 554
393a6b59
PA
555 lwp_info *child_lwp = add_lwp (child_ptid);
556 gdb_assert (child_lwp != NULL);
557 child_lwp->stopped = 1;
558 if (event != PTRACE_EVENT_CLONE)
559 child_lwp->must_set_ptrace_flags = 1;
560 child_lwp->status_pending_p = 0;
de0d863e 561
393a6b59 562 thread_info *child_thr = get_lwp_thread (child_lwp);
de0d863e 563
393a6b59
PA
564 /* If we're suspending all threads, leave this one suspended
565 too. If the fork/clone parent is stepping over a breakpoint,
566 all other threads have been suspended already. Leave the
567 child suspended too. */
568 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
569 || event_lwp->bp_reinsert != 0)
570 {
571 threads_debug_printf ("leaving child suspended");
572 child_lwp->suspended = 1;
573 }
574
575 if (event_lwp->bp_reinsert != 0
576 && supports_software_single_step ()
577 && event == PTRACE_EVENT_VFORK)
578 {
579 /* If we leave single-step breakpoints there, child will
580 hit it, so uninsert single-step breakpoints from parent
581 (and child). Once vfork child is done, reinsert
582 them back to parent. */
583 uninsert_single_step_breakpoints (event_thr);
584 }
585
586 if (event != PTRACE_EVENT_CLONE)
587 {
de0d863e
DB
588 /* Add the new process to the tables and clone the breakpoint
589 lists of the parent. We need to do this even if the new process
590 will be detached, since we will need the process object and the
591 breakpoints to remove any breakpoints from memory when we
592 detach, and the client side will access registers. */
393a6b59 593 process_info *child_proc = add_linux_process (new_pid, 0);
de0d863e 594 gdb_assert (child_proc != NULL);
863d01bd 595
393a6b59 596 process_info *parent_proc = get_thread_process (event_thr);
de0d863e 597 child_proc->attached = parent_proc->attached;
2e7b624b 598
63c40ec7 599 clone_all_breakpoints (child_thr, event_thr);
de0d863e 600
51a948fd
AB
601 target_desc_up tdesc = allocate_target_description ();
602 copy_target_description (tdesc.get (), parent_proc->tdesc);
603 child_proc->tdesc = tdesc.release ();
de0d863e 604
3a8a0396 605 /* Clone arch-specific process data. */
fd000fb3 606 low_new_fork (parent_proc, child_proc);
393a6b59 607 }
3a8a0396 608
393a6b59
PA
609 /* Save fork/clone info in the parent thread. */
610 if (event == PTRACE_EVENT_FORK)
611 event_lwp->waitstatus.set_forked (child_ptid);
612 else if (event == PTRACE_EVENT_VFORK)
613 event_lwp->waitstatus.set_vforked (child_ptid);
614 else if (event == PTRACE_EVENT_CLONE
615 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
616 event_lwp->waitstatus.set_thread_cloned (child_ptid);
617
618 if (event != PTRACE_EVENT_CLONE
619 || (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
620 {
de0d863e 621 /* The status_pending field contains bits denoting the
393a6b59
PA
622 extended event, so when the pending event is handled, the
623 handler will look at lwp->waitstatus. */
de0d863e
DB
624 event_lwp->status_pending_p = 1;
625 event_lwp->status_pending = wstat;
626
393a6b59
PA
627 /* Link the threads until the parent's event is passed on to
628 GDB. */
629 event_lwp->relative = child_lwp;
630 child_lwp->relative = event_lwp;
de0d863e
DB
631 }
632
393a6b59
PA
633 /* If the parent thread is doing step-over with single-step
634 breakpoints, the list of single-step breakpoints are cloned
635 from the parent's. Remove them from the child process.
636 In case of vfork, we'll reinsert them back once vforked
637 child is done. */
638 if (event_lwp->bp_reinsert != 0
639 && supports_software_single_step ())
640 {
641 /* The child process is forked and stopped, so it is safe
642 to access its memory without stopping all other threads
643 from other processes. */
644 delete_single_step_breakpoints (child_thr);
e27d73f6 645
393a6b59
PA
646 gdb_assert (has_single_step_breakpoints (event_thr));
647 gdb_assert (!has_single_step_breakpoints (child_thr));
648 }
bde24c0a 649
da5898ce
DJ
650 /* Normally we will get the pending SIGSTOP. But in some cases
651 we might get another signal delivered to the group first.
f21cc1a2 652 If we do get another signal, be sure not to lose it. */
20ba1ce6 653 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 654 {
393a6b59
PA
655 child_lwp->stop_expected = 1;
656 child_lwp->status_pending_p = 1;
657 child_lwp->status_pending = status;
da5898ce 658 }
393a6b59 659 else if (event == PTRACE_EVENT_CLONE && cs.report_thread_events)
65706a29 660 {
393a6b59
PA
661 child_lwp->waitstatus.set_thread_created ();
662 child_lwp->status_pending_p = 1;
663 child_lwp->status_pending = status;
65706a29 664 }
de0d863e 665
393a6b59
PA
666 if (event == PTRACE_EVENT_CLONE)
667 {
a0aad537 668#ifdef USE_THREAD_DB
393a6b59 669 thread_db_notice_clone (event_thr, child_ptid);
a0aad537 670#endif
393a6b59 671 }
86299109 672
393a6b59
PA
673 if (event == PTRACE_EVENT_CLONE
674 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) == 0)
675 {
676 threads_debug_printf
677 ("not reporting clone event from LWP %ld, new child is %ld\n",
678 ptid_of (event_thr).lwp (),
679 new_pid);
680 return 1;
681 }
682
683 /* Leave the child stopped until GDB processes the parent
684 event. */
685 child_thr->last_resume_kind = resume_stop;
686 child_thr->last_status.set_stopped (GDB_SIGNAL_0);
687
688 /* Report the event. */
689 threads_debug_printf
690 ("reporting %s event from LWP %ld, new child is %ld\n",
691 (event == PTRACE_EVENT_FORK ? "fork"
692 : event == PTRACE_EVENT_VFORK ? "vfork"
693 : event == PTRACE_EVENT_CLONE ? "clone"
694 : "???"),
695 ptid_of (event_thr).lwp (),
696 new_pid);
697 return 0;
24a09b5f 698 }
c269dbdb
DB
699 else if (event == PTRACE_EVENT_VFORK_DONE)
700 {
183be222 701 event_lwp->waitstatus.set_vfork_done ();
c269dbdb 702
7582c77c 703 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
2e7b624b 704 {
3b9a79ef 705 reinsert_single_step_breakpoints (event_thr);
2e7b624b 706
3b9a79ef 707 gdb_assert (has_single_step_breakpoints (event_thr));
2e7b624b
YQ
708 }
709
c269dbdb
DB
710 /* Report the event. */
711 return 0;
712 }
c12a5089 713 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
94585166
DB
714 {
715 struct process_info *proc;
f27866ba 716 std::vector<int> syscalls_to_catch;
94585166
DB
717 ptid_t event_ptid;
718 pid_t event_pid;
719
c058728c
SM
720 threads_debug_printf ("Got exec event from LWP %ld",
721 lwpid_of (event_thr));
94585166
DB
722
723 /* Get the event ptid. */
724 event_ptid = ptid_of (event_thr);
e99b03dc 725 event_pid = event_ptid.pid ();
94585166 726
82075af2 727 /* Save the syscall list from the execing process. */
94585166 728 proc = get_thread_process (event_thr);
f27866ba 729 syscalls_to_catch = std::move (proc->syscalls_to_catch);
82075af2
JS
730
731 /* Delete the execing process and all its threads. */
d16f3f6c 732 mourn (proc);
24583e45 733 switch_to_thread (nullptr);
94585166
DB
734
735 /* Create a new process/lwp/thread. */
fd000fb3 736 proc = add_linux_process (event_pid, 0);
94585166
DB
737 event_lwp = add_lwp (event_ptid);
738 event_thr = get_lwp_thread (event_lwp);
739 gdb_assert (current_thread == event_thr);
797bcff5 740 arch_setup_thread (event_thr);
94585166
DB
741
742 /* Set the event status. */
183be222
SM
743 event_lwp->waitstatus.set_execd
744 (make_unique_xstrdup
745 (linux_proc_pid_to_exec_file (lwpid_of (event_thr))));
94585166
DB
746
747 /* Mark the exec status as pending. */
748 event_lwp->stopped = 1;
749 event_lwp->status_pending_p = 1;
750 event_lwp->status_pending = wstat;
751 event_thr->last_resume_kind = resume_continue;
183be222 752 event_thr->last_status.set_ignore ();
94585166 753
82075af2
JS
754 /* Update syscall state in the new lwp, effectively mid-syscall too. */
755 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
756
757 /* Restore the list to catch. Don't rely on the client, which is free
758 to avoid sending a new list when the architecture doesn't change.
759 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
f27866ba 760 proc->syscalls_to_catch = std::move (syscalls_to_catch);
82075af2 761
94585166
DB
762 /* Report the event. */
763 *orig_event_lwp = event_lwp;
764 return 0;
765 }
de0d863e 766
f34652de 767 internal_error (_("unknown ptrace event %d"), event);
24a09b5f
DJ
768}
769
df95181f
TBA
770CORE_ADDR
771linux_process_target::get_pc (lwp_info *lwp)
d50171e4 772{
a9deee17
PA
773 process_info *proc = get_thread_process (get_lwp_thread (lwp));
774 gdb_assert (!proc->starting_up);
d50171e4 775
bf9ae9d8 776 if (!low_supports_breakpoints ())
d50171e4
PA
777 return 0;
778
24583e45
TBA
779 scoped_restore_current_thread restore_thread;
780 switch_to_thread (get_lwp_thread (lwp));
d50171e4 781
a9deee17
PA
782 struct regcache *regcache = get_thread_regcache (current_thread, 1);
783 CORE_ADDR pc = low_get_pc (regcache);
d50171e4 784
c058728c 785 threads_debug_printf ("pc is 0x%lx", (long) pc);
d50171e4 786
d50171e4
PA
787 return pc;
788}
789
9eedd27d
TBA
790void
791linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
82075af2 792{
82075af2
JS
793 struct regcache *regcache;
794
24583e45
TBA
795 scoped_restore_current_thread restore_thread;
796 switch_to_thread (get_lwp_thread (lwp));
82075af2
JS
797
798 regcache = get_thread_regcache (current_thread, 1);
9eedd27d 799 low_get_syscall_trapinfo (regcache, sysno);
82075af2 800
c058728c 801 threads_debug_printf ("get_syscall_trapinfo sysno %d", *sysno);
82075af2
JS
802}
803
9eedd27d
TBA
804void
805linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
806{
807 /* By default, report an unknown system call number. */
808 *sysno = UNKNOWN_SYSCALL;
809}
810
df95181f
TBA
811bool
812linux_process_target::save_stop_reason (lwp_info *lwp)
0d62e5e8 813{
582511be
PA
814 CORE_ADDR pc;
815 CORE_ADDR sw_breakpoint_pc;
3e572f71
PA
816#if USE_SIGTRAP_SIGINFO
817 siginfo_t siginfo;
818#endif
d50171e4 819
bf9ae9d8 820 if (!low_supports_breakpoints ())
df95181f 821 return false;
0d62e5e8 822
a9deee17
PA
823 process_info *proc = get_thread_process (get_lwp_thread (lwp));
824 if (proc->starting_up)
825 {
826 /* Claim we have the stop PC so that the caller doesn't try to
827 fetch it itself. */
828 return true;
829 }
830
582511be 831 pc = get_pc (lwp);
d4807ea2 832 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
d50171e4 833
582511be 834 /* breakpoint_at reads from the current thread. */
24583e45
TBA
835 scoped_restore_current_thread restore_thread;
836 switch_to_thread (get_lwp_thread (lwp));
47c0c975 837
3e572f71
PA
838#if USE_SIGTRAP_SIGINFO
839 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
840 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
841 {
842 if (siginfo.si_signo == SIGTRAP)
843 {
e7ad2f14
PA
844 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
845 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 846 {
e7ad2f14
PA
847 /* The si_code is ambiguous on this arch -- check debug
848 registers. */
849 if (!check_stopped_by_watchpoint (lwp))
850 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
851 }
852 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
853 {
854 /* If we determine the LWP stopped for a SW breakpoint,
855 trust it. Particularly don't check watchpoint
856 registers, because at least on s390, we'd find
857 stopped-by-watchpoint as long as there's a watchpoint
858 set. */
3e572f71 859 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
3e572f71 860 }
e7ad2f14 861 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 862 {
e7ad2f14
PA
863 /* This can indicate either a hardware breakpoint or
864 hardware watchpoint. Check debug registers. */
865 if (!check_stopped_by_watchpoint (lwp))
866 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
3e572f71 867 }
2bf6fb9d
PA
868 else if (siginfo.si_code == TRAP_TRACE)
869 {
e7ad2f14
PA
870 /* We may have single stepped an instruction that
871 triggered a watchpoint. In that case, on some
872 architectures (such as x86), instead of TRAP_HWBKPT,
873 si_code indicates TRAP_TRACE, and we need to check
874 the debug registers separately. */
875 if (!check_stopped_by_watchpoint (lwp))
876 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
2bf6fb9d 877 }
3e572f71
PA
878 }
879 }
880#else
582511be
PA
881 /* We may have just stepped a breakpoint instruction. E.g., in
882 non-stop mode, GDB first tells the thread A to step a range, and
883 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
884 case we need to report the breakpoint PC. */
885 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
d7146cda 886 && low_breakpoint_at (sw_breakpoint_pc))
e7ad2f14
PA
887 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
888
889 if (hardware_breakpoint_inserted_here (pc))
890 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
891
892 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
893 check_stopped_by_watchpoint (lwp);
894#endif
895
896 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
582511be 897 {
c058728c
SM
898 threads_debug_printf
899 ("%s stopped by software breakpoint",
900 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
582511be
PA
901
902 /* Back up the PC if necessary. */
903 if (pc != sw_breakpoint_pc)
e7ad2f14 904 {
582511be
PA
905 struct regcache *regcache
906 = get_thread_regcache (current_thread, 1);
bf9ae9d8 907 low_set_pc (regcache, sw_breakpoint_pc);
582511be
PA
908 }
909
e7ad2f14
PA
910 /* Update this so we record the correct stop PC below. */
911 pc = sw_breakpoint_pc;
582511be 912 }
e7ad2f14 913 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
c058728c
SM
914 threads_debug_printf
915 ("%s stopped by hardware breakpoint",
916 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14 917 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
c058728c
SM
918 threads_debug_printf
919 ("%s stopped by hardware watchpoint",
920 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14 921 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
c058728c
SM
922 threads_debug_printf
923 ("%s stopped by trace",
924 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14
PA
925
926 lwp->stop_pc = pc;
df95181f 927 return true;
0d62e5e8 928}
ce3a066d 929
fd000fb3
TBA
930lwp_info *
931linux_process_target::add_lwp (ptid_t ptid)
611cb4a5 932{
c360a473 933 lwp_info *lwp = new lwp_info;
0d62e5e8 934
754e3168
AH
935 lwp->thread = add_thread (ptid, lwp);
936
fd000fb3 937 low_new_thread (lwp);
aa5ca48f 938
54a0b537 939 return lwp;
0d62e5e8 940}
611cb4a5 941
fd000fb3
TBA
942void
943linux_process_target::low_new_thread (lwp_info *info)
944{
945 /* Nop. */
946}
947
2090129c
SDJ
948/* Callback to be used when calling fork_inferior, responsible for
949 actually initiating the tracing of the inferior. */
950
951static void
952linux_ptrace_fun ()
953{
954 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
955 (PTRACE_TYPE_ARG4) 0) < 0)
50fa3001 956 trace_start_error_with_name ("ptrace");
2090129c
SDJ
957
958 if (setpgid (0, 0) < 0)
959 trace_start_error_with_name ("setpgid");
960
961 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
962 stdout to stderr so that inferior i/o doesn't corrupt the connection.
963 Also, redirect stdin to /dev/null. */
964 if (remote_connection_is_stdio ())
965 {
966 if (close (0) < 0)
967 trace_start_error_with_name ("close");
968 if (open ("/dev/null", O_RDONLY) < 0)
969 trace_start_error_with_name ("open");
970 if (dup2 (2, 1) < 0)
971 trace_start_error_with_name ("dup2");
972 if (write (2, "stdin/stdout redirected\n",
973 sizeof ("stdin/stdout redirected\n") - 1) < 0)
974 {
975 /* Errors ignored. */;
976 }
977 }
978}
979
da6d8c04 980/* Start an inferior process and returns its pid.
2090129c
SDJ
981 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
982 are its arguments. */
da6d8c04 983
15295543
TBA
984int
985linux_process_target::create_inferior (const char *program,
986 const std::vector<char *> &program_args)
da6d8c04 987{
c12a5089 988 client_state &cs = get_client_state ();
a6dbe5df 989 struct lwp_info *new_lwp;
da6d8c04 990 int pid;
95954743 991 ptid_t ptid;
03583c20 992
41272101
TT
993 {
994 maybe_disable_address_space_randomization restore_personality
c12a5089 995 (cs.disable_randomization);
bea571eb 996 std::string str_program_args = construct_inferior_arguments (program_args);
41272101
TT
997
998 pid = fork_inferior (program,
999 str_program_args.c_str (),
1000 get_environ ()->envp (), linux_ptrace_fun,
1001 NULL, NULL, NULL, NULL);
1002 }
03583c20 1003
421490af
PA
1004 /* When spawning a new process, we can't open the mem file yet. We
1005 still have to nurse the process through the shell, and that execs
1006 a couple times. The address space a /proc/PID/mem file is
1007 accessing is destroyed on exec. */
1008 process_info *proc = add_linux_process_no_mem_file (pid, 0);
95954743 1009
184ea2f7 1010 ptid = ptid_t (pid, pid);
95954743 1011 new_lwp = add_lwp (ptid);
a6dbe5df 1012 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 1013
2090129c
SDJ
1014 post_fork_inferior (pid, program);
1015
421490af
PA
1016 /* PROC is now past the shell running the program we want, so we can
1017 open the /proc/PID/mem file. */
1018 open_proc_mem_file (proc);
1019
a9fa9f7d 1020 return pid;
da6d8c04
DJ
1021}
1022
ece66d65
JS
1023/* Implement the post_create_inferior target_ops method. */
1024
6dee9afb
TBA
1025void
1026linux_process_target::post_create_inferior ()
ece66d65
JS
1027{
1028 struct lwp_info *lwp = get_thread_lwp (current_thread);
1029
797bcff5 1030 low_arch_setup ();
ece66d65
JS
1031
1032 if (lwp->must_set_ptrace_flags)
1033 {
1034 struct process_info *proc = current_process ();
1035 int options = linux_low_ptrace_options (proc->attached);
1036
1037 linux_enable_event_reporting (lwpid_of (current_thread), options);
1038 lwp->must_set_ptrace_flags = 0;
1039 }
1040}
1041
7ae1a6a6 1042int
fd000fb3 1043linux_process_target::attach_lwp (ptid_t ptid)
da6d8c04 1044{
54a0b537 1045 struct lwp_info *new_lwp;
e38504b3 1046 int lwpid = ptid.lwp ();
611cb4a5 1047
b8e1b30e 1048 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 1049 != 0)
7ae1a6a6 1050 return errno;
24a09b5f 1051
b3312d80 1052 new_lwp = add_lwp (ptid);
0d62e5e8 1053
a6dbe5df
PA
1054 /* We need to wait for SIGSTOP before being able to make the next
1055 ptrace call on this LWP. */
1056 new_lwp->must_set_ptrace_flags = 1;
1057
644cebc9 1058 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2 1059 {
c058728c 1060 threads_debug_printf ("Attached to a stopped process");
c14d7ab2
PA
1061
1062 /* The process is definitely stopped. It is in a job control
1063 stop, unless the kernel predates the TASK_STOPPED /
1064 TASK_TRACED distinction, in which case it might be in a
1065 ptrace stop. Make sure it is in a ptrace stop; from there we
1066 can kill it, signal it, et cetera.
1067
1068 First make sure there is a pending SIGSTOP. Since we are
1069 already attached, the process can not transition from stopped
1070 to running without a PTRACE_CONT; so we know this signal will
1071 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1072 probably already in the queue (unless this kernel is old
1073 enough to use TASK_STOPPED for ptrace stops); but since
1074 SIGSTOP is not an RT signal, it can only be queued once. */
1075 kill_lwp (lwpid, SIGSTOP);
1076
1077 /* Finally, resume the stopped process. This will deliver the
1078 SIGSTOP (or a higher priority signal, just like normal
1079 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 1080 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
1081 }
1082
0d62e5e8 1083 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
1084 brings it to a halt.
1085
1086 There are several cases to consider here:
1087
1088 1) gdbserver has already attached to the process and is being notified
1b3f6016 1089 of a new thread that is being created.
d50171e4
PA
1090 In this case we should ignore that SIGSTOP and resume the
1091 process. This is handled below by setting stop_expected = 1,
8336d594 1092 and the fact that add_thread sets last_resume_kind ==
d50171e4 1093 resume_continue.
0e21c1ec
DE
1094
1095 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
1096 to it via attach_inferior.
1097 In this case we want the process thread to stop.
d50171e4
PA
1098 This is handled by having linux_attach set last_resume_kind ==
1099 resume_stop after we return.
e3deef73
LM
1100
1101 If the pid we are attaching to is also the tgid, we attach to and
1102 stop all the existing threads. Otherwise, we attach to pid and
1103 ignore any other threads in the same group as this pid.
0e21c1ec
DE
1104
1105 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
1106 existing threads.
1107 In this case we want the thread to stop.
1108 FIXME: This case is currently not properly handled.
1109 We should wait for the SIGSTOP but don't. Things work apparently
1110 because enough time passes between when we ptrace (ATTACH) and when
1111 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
1112
1113 On the other hand, if we are currently trying to stop all threads, we
1114 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 1115 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
1116 end of the list, and so the new thread has not yet reached
1117 wait_for_sigstop (but will). */
d50171e4 1118 new_lwp->stop_expected = 1;
0d62e5e8 1119
7ae1a6a6 1120 return 0;
95954743
PA
1121}
1122
8784d563
PA
1123/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1124 already attached. Returns true if a new LWP is found, false
1125 otherwise. */
1126
1127static int
1128attach_proc_task_lwp_callback (ptid_t ptid)
1129{
1130 /* Is this a new thread? */
1131 if (find_thread_ptid (ptid) == NULL)
1132 {
e38504b3 1133 int lwpid = ptid.lwp ();
8784d563
PA
1134 int err;
1135
c058728c 1136 threads_debug_printf ("Found new lwp %d", lwpid);
8784d563 1137
fd000fb3 1138 err = the_linux_target->attach_lwp (ptid);
8784d563
PA
1139
1140 /* Be quiet if we simply raced with the thread exiting. EPERM
1141 is returned if the thread's task still exists, and is marked
1142 as exited or zombie, as well as other conditions, so in that
1143 case, confirm the status in /proc/PID/status. */
1144 if (err == ESRCH
1145 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
c058728c
SM
1146 threads_debug_printf
1147 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1148 lwpid, err, safe_strerror (err));
8784d563
PA
1149 else if (err != 0)
1150 {
4d9b86e1 1151 std::string reason
50fa3001 1152 = linux_ptrace_attach_fail_reason_string (ptid, err);
4d9b86e1
SM
1153
1154 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
8784d563
PA
1155 }
1156
1157 return 1;
1158 }
1159 return 0;
1160}
1161
500c1d85
PA
1162static void async_file_mark (void);
1163
e3deef73
LM
1164/* Attach to PID. If PID is the tgid, attach to it and all
1165 of its threads. */
1166
ef03dad8
TBA
1167int
1168linux_process_target::attach (unsigned long pid)
0d62e5e8 1169{
500c1d85
PA
1170 struct process_info *proc;
1171 struct thread_info *initial_thread;
184ea2f7 1172 ptid_t ptid = ptid_t (pid, pid);
7ae1a6a6
PA
1173 int err;
1174
421490af
PA
1175 /* Delay opening the /proc/PID/mem file until we've successfully
1176 attached. */
1177 proc = add_linux_process_no_mem_file (pid, 1);
df0da8a2 1178
e3deef73
LM
1179 /* Attach to PID. We will check for other threads
1180 soon. */
fd000fb3 1181 err = attach_lwp (ptid);
7ae1a6a6 1182 if (err != 0)
4d9b86e1 1183 {
f551c8ef 1184 this->remove_linux_process (proc);
4d9b86e1 1185
50fa3001
SDJ
1186 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1187 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
4d9b86e1 1188 }
7ae1a6a6 1189
421490af
PA
1190 open_proc_mem_file (proc);
1191
500c1d85
PA
1192 /* Don't ignore the initial SIGSTOP if we just attached to this
1193 process. It will be collected by wait shortly. */
184ea2f7 1194 initial_thread = find_thread_ptid (ptid_t (pid, pid));
59487af3 1195 gdb_assert (initial_thread != nullptr);
500c1d85 1196 initial_thread->last_resume_kind = resume_stop;
0d62e5e8 1197
8784d563
PA
1198 /* We must attach to every LWP. If /proc is mounted, use that to
1199 find them now. On the one hand, the inferior may be using raw
1200 clone instead of using pthreads. On the other hand, even if it
1201 is using pthreads, GDB may not be connected yet (thread_db needs
1202 to do symbol lookups, through qSymbol). Also, thread_db walks
1203 structures in the inferior's address space to find the list of
1204 threads/LWPs, and those structures may well be corrupted. Note
1205 that once thread_db is loaded, we'll still use it to list threads
1206 and associate pthread info with each LWP. */
1207 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
500c1d85
PA
1208
1209 /* GDB will shortly read the xml target description for this
1210 process, to figure out the process' architecture. But the target
1211 description is only filled in when the first process/thread in
1212 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1213 that now, otherwise, if GDB is fast enough, it could read the
1214 target description _before_ that initial stop. */
1215 if (non_stop)
1216 {
1217 struct lwp_info *lwp;
1218 int wstat, lwpid;
f2907e49 1219 ptid_t pid_ptid = ptid_t (pid);
500c1d85 1220
d16f3f6c 1221 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
500c1d85
PA
1222 gdb_assert (lwpid > 0);
1223
f2907e49 1224 lwp = find_lwp_pid (ptid_t (lwpid));
59487af3 1225 gdb_assert (lwp != nullptr);
500c1d85
PA
1226
1227 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1228 {
1229 lwp->status_pending_p = 1;
1230 lwp->status_pending = wstat;
1231 }
1232
1233 initial_thread->last_resume_kind = resume_continue;
1234
1235 async_file_mark ();
1236
1237 gdb_assert (proc->tdesc != NULL);
1238 }
1239
95954743
PA
1240 return 0;
1241}
1242
95954743 1243static int
e4eb0dec 1244last_thread_of_process_p (int pid)
95954743 1245{
e4eb0dec 1246 bool seen_one = false;
95954743 1247
da4ae14a 1248 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
95954743 1249 {
e4eb0dec
SM
1250 if (!seen_one)
1251 {
1252 /* This is the first thread of this process we see. */
1253 seen_one = true;
1254 return false;
1255 }
1256 else
1257 {
1258 /* This is the second thread of this process we see. */
1259 return true;
1260 }
1261 });
da6d8c04 1262
e4eb0dec 1263 return thread == NULL;
95954743
PA
1264}
1265
da84f473
PA
1266/* Kill LWP. */
1267
1268static void
1269linux_kill_one_lwp (struct lwp_info *lwp)
1270{
d86d4aaf
DE
1271 struct thread_info *thr = get_lwp_thread (lwp);
1272 int pid = lwpid_of (thr);
da84f473
PA
1273
1274 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1275 there is no signal context, and ptrace(PTRACE_KILL) (or
1276 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1277 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1278 alternative is to kill with SIGKILL. We only need one SIGKILL
1279 per process, not one for each thread. But since we still support
4a6ed09b
PA
1280 support debugging programs using raw clone without CLONE_THREAD,
1281 we send one for each thread. For years, we used PTRACE_KILL
1282 only, so we're being a bit paranoid about some old kernels where
1283 PTRACE_KILL might work better (dubious if there are any such, but
1284 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1285 second, and so we're fine everywhere. */
da84f473
PA
1286
1287 errno = 0;
69ff6be5 1288 kill_lwp (pid, SIGKILL);
da84f473 1289 if (debug_threads)
ce9e3fe7
PA
1290 {
1291 int save_errno = errno;
1292
c058728c
SM
1293 threads_debug_printf ("kill_lwp (SIGKILL) %s, 0, 0 (%s)",
1294 target_pid_to_str (ptid_of (thr)).c_str (),
1295 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1296 }
da84f473
PA
1297
1298 errno = 0;
b8e1b30e 1299 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 1300 if (debug_threads)
ce9e3fe7
PA
1301 {
1302 int save_errno = errno;
1303
c058728c
SM
1304 threads_debug_printf ("PTRACE_KILL %s, 0, 0 (%s)",
1305 target_pid_to_str (ptid_of (thr)).c_str (),
1306 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1307 }
da84f473
PA
1308}
1309
e76126e8
PA
1310/* Kill LWP and wait for it to die. */
1311
1312static void
1313kill_wait_lwp (struct lwp_info *lwp)
1314{
1315 struct thread_info *thr = get_lwp_thread (lwp);
e99b03dc 1316 int pid = ptid_of (thr).pid ();
e38504b3 1317 int lwpid = ptid_of (thr).lwp ();
e76126e8
PA
1318 int wstat;
1319 int res;
1320
c058728c 1321 threads_debug_printf ("killing lwp %d, for pid: %d", lwpid, pid);
e76126e8
PA
1322
1323 do
1324 {
1325 linux_kill_one_lwp (lwp);
1326
1327 /* Make sure it died. Notes:
1328
1329 - The loop is most likely unnecessary.
1330
d16f3f6c 1331 - We don't use wait_for_event as that could delete lwps
e76126e8
PA
1332 while we're iterating over them. We're not interested in
1333 any pending status at this point, only in making sure all
1334 wait status on the kernel side are collected until the
1335 process is reaped.
1336
1337 - We don't use __WALL here as the __WALL emulation relies on
1338 SIGCHLD, and killing a stopped process doesn't generate
1339 one, nor an exit status.
1340 */
1341 res = my_waitpid (lwpid, &wstat, 0);
1342 if (res == -1 && errno == ECHILD)
1343 res = my_waitpid (lwpid, &wstat, __WCLONE);
1344 } while (res > 0 && WIFSTOPPED (wstat));
1345
586b02a9
PA
1346 /* Even if it was stopped, the child may have already disappeared.
1347 E.g., if it was killed by SIGKILL. */
1348 if (res < 0 && errno != ECHILD)
1349 perror_with_name ("kill_wait_lwp");
e76126e8
PA
1350}
1351
578290ec 1352/* Callback for `for_each_thread'. Kills an lwp of a given process,
da84f473 1353 except the leader. */
95954743 1354
578290ec
SM
1355static void
1356kill_one_lwp_callback (thread_info *thread, int pid)
da6d8c04 1357{
54a0b537 1358 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 1359
fd500816
DJ
1360 /* We avoid killing the first thread here, because of a Linux kernel (at
1361 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1362 the children get a chance to be reaped, it will remain a zombie
1363 forever. */
95954743 1364
d86d4aaf 1365 if (lwpid_of (thread) == pid)
95954743 1366 {
c058728c
SM
1367 threads_debug_printf ("is last of process %s",
1368 target_pid_to_str (thread->id).c_str ());
578290ec 1369 return;
95954743 1370 }
fd500816 1371
e76126e8 1372 kill_wait_lwp (lwp);
da6d8c04
DJ
1373}
1374
c6885a57
TBA
1375int
1376linux_process_target::kill (process_info *process)
0d62e5e8 1377{
a780ef4f 1378 int pid = process->pid;
9d606399 1379
f9e39928
PA
1380 /* If we're killing a running inferior, make sure it is stopped
1381 first, as PTRACE_KILL will not work otherwise. */
7984d532 1382 stop_all_lwps (0, NULL);
f9e39928 1383
578290ec
SM
1384 for_each_thread (pid, [&] (thread_info *thread)
1385 {
1386 kill_one_lwp_callback (thread, pid);
1387 });
fd500816 1388
54a0b537 1389 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1390 thread in the list, so do so now. */
a780ef4f 1391 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
bd99dc85 1392
784867a5 1393 if (lwp == NULL)
c058728c 1394 threads_debug_printf ("cannot find lwp for pid: %d", pid);
784867a5 1395 else
e76126e8 1396 kill_wait_lwp (lwp);
2d717e4f 1397
8adb37b9 1398 mourn (process);
f9e39928
PA
1399
1400 /* Since we presently can only stop all lwps of all processes, we
1401 need to unstop lwps of other processes. */
7984d532 1402 unstop_all_lwps (0, NULL);
95954743 1403 return 0;
0d62e5e8
DJ
1404}
1405
9b224c5e
PA
1406/* Get pending signal of THREAD, for detaching purposes. This is the
1407 signal the thread last stopped for, which we need to deliver to the
1408 thread when detaching, otherwise, it'd be suppressed/lost. */
1409
1410static int
1411get_detach_signal (struct thread_info *thread)
1412{
c12a5089 1413 client_state &cs = get_client_state ();
a493e3e2 1414 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1415 int status;
1416 struct lwp_info *lp = get_thread_lwp (thread);
1417
1418 if (lp->status_pending_p)
1419 status = lp->status_pending;
1420 else
1421 {
1422 /* If the thread had been suspended by gdbserver, and it stopped
1423 cleanly, then it'll have stopped with SIGSTOP. But we don't
1424 want to deliver that SIGSTOP. */
183be222
SM
1425 if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1426 || thread->last_status.sig () == GDB_SIGNAL_0)
9b224c5e
PA
1427 return 0;
1428
1429 /* Otherwise, we may need to deliver the signal we
1430 intercepted. */
1431 status = lp->last_status;
1432 }
1433
1434 if (!WIFSTOPPED (status))
1435 {
c058728c
SM
1436 threads_debug_printf ("lwp %s hasn't stopped: no pending signal",
1437 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e
PA
1438 return 0;
1439 }
1440
1441 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1442 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e 1443 {
c058728c
SM
1444 threads_debug_printf ("lwp %s had stopped with extended "
1445 "status: no pending signal",
1446 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e
PA
1447 return 0;
1448 }
1449
2ea28649 1450 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e 1451
c12a5089 1452 if (cs.program_signals_p && !cs.program_signals[signo])
9b224c5e 1453 {
c058728c
SM
1454 threads_debug_printf ("lwp %s had signal %s, but it is in nopass state",
1455 target_pid_to_str (ptid_of (thread)).c_str (),
1456 gdb_signal_to_string (signo));
9b224c5e
PA
1457 return 0;
1458 }
c12a5089 1459 else if (!cs.program_signals_p
9b224c5e
PA
1460 /* If we have no way to know which signals GDB does not
1461 want to have passed to the program, assume
1462 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1463 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e 1464 {
c058728c
SM
1465 threads_debug_printf ("lwp %s had signal %s, "
1466 "but we don't know if we should pass it. "
1467 "Default to not.",
1468 target_pid_to_str (ptid_of (thread)).c_str (),
1469 gdb_signal_to_string (signo));
9b224c5e
PA
1470 return 0;
1471 }
1472 else
1473 {
c058728c
SM
1474 threads_debug_printf ("lwp %s has pending signal %s: delivering it",
1475 target_pid_to_str (ptid_of (thread)).c_str (),
1476 gdb_signal_to_string (signo));
9b224c5e
PA
1477
1478 return WSTOPSIG (status);
1479 }
1480}
1481
fd000fb3
TBA
1482void
1483linux_process_target::detach_one_lwp (lwp_info *lwp)
6ad8ae5c 1484{
ced2dffb 1485 struct thread_info *thread = get_lwp_thread (lwp);
9b224c5e 1486 int sig;
ced2dffb 1487 int lwpid;
6ad8ae5c 1488
9b224c5e 1489 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1490 if (lwp->stop_expected)
ae13219e 1491 {
c058728c
SM
1492 threads_debug_printf ("Sending SIGCONT to %s",
1493 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e 1494
d86d4aaf 1495 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1496 lwp->stop_expected = 0;
ae13219e
DJ
1497 }
1498
9b224c5e
PA
1499 /* Pass on any pending signal for this thread. */
1500 sig = get_detach_signal (thread);
1501
ced2dffb
PA
1502 /* Preparing to resume may try to write registers, and fail if the
1503 lwp is zombie. If that happens, ignore the error. We'll handle
1504 it below, when detach fails with ESRCH. */
a70b8144 1505 try
ced2dffb
PA
1506 {
1507 /* Flush any pending changes to the process's registers. */
1508 regcache_invalidate_thread (thread);
1509
1510 /* Finally, let it resume. */
d7599cc0 1511 low_prepare_to_resume (lwp);
ced2dffb 1512 }
230d2906 1513 catch (const gdb_exception_error &ex)
ced2dffb
PA
1514 {
1515 if (!check_ptrace_stopped_lwp_gone (lwp))
eedc3f4f 1516 throw;
ced2dffb 1517 }
ced2dffb
PA
1518
1519 lwpid = lwpid_of (thread);
1520 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1521 (PTRACE_TYPE_ARG4) (long) sig) < 0)
ced2dffb
PA
1522 {
1523 int save_errno = errno;
1524
1525 /* We know the thread exists, so ESRCH must mean the lwp is
1526 zombie. This can happen if one of the already-detached
1527 threads exits the whole thread group. In that case we're
1528 still attached, and must reap the lwp. */
1529 if (save_errno == ESRCH)
1530 {
1531 int ret, status;
1532
1533 ret = my_waitpid (lwpid, &status, __WALL);
1534 if (ret == -1)
1535 {
1536 warning (_("Couldn't reap LWP %d while detaching: %s"),
6d91ce9a 1537 lwpid, safe_strerror (errno));
ced2dffb
PA
1538 }
1539 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1540 {
1541 warning (_("Reaping LWP %d while detaching "
1542 "returned unexpected status 0x%x"),
1543 lwpid, status);
1544 }
1545 }
1546 else
1547 {
1548 error (_("Can't detach %s: %s"),
61d7f128 1549 target_pid_to_str (ptid_of (thread)).c_str (),
6d91ce9a 1550 safe_strerror (save_errno));
ced2dffb
PA
1551 }
1552 }
c058728c
SM
1553 else
1554 threads_debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)",
1555 target_pid_to_str (ptid_of (thread)).c_str (),
1556 strsignal (sig));
bd99dc85
PA
1557
1558 delete_lwp (lwp);
ced2dffb
PA
1559}
1560
9061c9cf
TBA
1561int
1562linux_process_target::detach (process_info *process)
95954743 1563{
ced2dffb 1564 struct lwp_info *main_lwp;
95954743 1565
863d01bd
PA
1566 /* As there's a step over already in progress, let it finish first,
1567 otherwise nesting a stabilize_threads operation on top gets real
1568 messy. */
1569 complete_ongoing_step_over ();
1570
f9e39928 1571 /* Stop all threads before detaching. First, ptrace requires that
30baf67b 1572 the thread is stopped to successfully detach. Second, thread_db
f9e39928
PA
1573 may need to uninstall thread event breakpoints from memory, which
1574 only works with a stopped process anyway. */
7984d532 1575 stop_all_lwps (0, NULL);
f9e39928 1576
ca5c370d 1577#ifdef USE_THREAD_DB
8336d594 1578 thread_db_detach (process);
ca5c370d
PA
1579#endif
1580
fa593d66 1581 /* Stabilize threads (move out of jump pads). */
5c9eb2f2 1582 target_stabilize_threads ();
fa593d66 1583
ced2dffb
PA
1584 /* Detach from the clone lwps first. If the thread group exits just
1585 while we're detaching, we must reap the clone lwps before we're
1586 able to reap the leader. */
fd000fb3
TBA
1587 for_each_thread (process->pid, [this] (thread_info *thread)
1588 {
1589 /* We don't actually detach from the thread group leader just yet.
1590 If the thread group exits, we must reap the zombie clone lwps
1591 before we're able to reap the leader. */
1592 if (thread->id.pid () == thread->id.lwp ())
1593 return;
1594
1595 lwp_info *lwp = get_thread_lwp (thread);
1596 detach_one_lwp (lwp);
1597 });
ced2dffb 1598
ef2ddb33 1599 main_lwp = find_lwp_pid (ptid_t (process->pid));
59487af3 1600 gdb_assert (main_lwp != nullptr);
fd000fb3 1601 detach_one_lwp (main_lwp);
8336d594 1602
8adb37b9 1603 mourn (process);
f9e39928
PA
1604
1605 /* Since we presently can only stop all lwps of all processes, we
1606 need to unstop lwps of other processes. */
7984d532 1607 unstop_all_lwps (0, NULL);
f9e39928
PA
1608 return 0;
1609}
1610
1611/* Remove all LWPs that belong to process PROC from the lwp list. */
1612
8adb37b9
TBA
1613void
1614linux_process_target::mourn (process_info *process)
8336d594 1615{
8336d594
PA
1616#ifdef USE_THREAD_DB
1617 thread_db_mourn (process);
1618#endif
1619
fd000fb3 1620 for_each_thread (process->pid, [this] (thread_info *thread)
6b2a85da
SM
1621 {
1622 delete_lwp (get_thread_lwp (thread));
1623 });
f9e39928 1624
f551c8ef 1625 this->remove_linux_process (process);
8336d594
PA
1626}
1627
95a49a39
TBA
1628void
1629linux_process_target::join (int pid)
444d6139 1630{
444d6139
PA
1631 int status, ret;
1632
1633 do {
d105de22 1634 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1635 if (WIFEXITED (status) || WIFSIGNALED (status))
1636 break;
1637 } while (ret != -1 || errno != ECHILD);
1638}
1639
13d3d99b
TBA
1640/* Return true if the given thread is still alive. */
1641
1642bool
1643linux_process_target::thread_alive (ptid_t ptid)
0d62e5e8 1644{
95954743
PA
1645 struct lwp_info *lwp = find_lwp_pid (ptid);
1646
1647 /* We assume we always know if a thread exits. If a whole process
1648 exited but we still haven't been able to report it to GDB, we'll
1649 hold on to the last lwp of the dead process. */
1650 if (lwp != NULL)
00db26fa 1651 return !lwp_is_marked_dead (lwp);
0d62e5e8
DJ
1652 else
1653 return 0;
1654}
1655
df95181f
TBA
1656bool
1657linux_process_target::thread_still_has_status_pending (thread_info *thread)
582511be
PA
1658{
1659 struct lwp_info *lp = get_thread_lwp (thread);
1660
1661 if (!lp->status_pending_p)
1662 return 0;
1663
582511be 1664 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1665 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1666 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be 1667 {
582511be
PA
1668 CORE_ADDR pc;
1669 int discard = 0;
1670
1671 gdb_assert (lp->last_status != 0);
1672
1673 pc = get_pc (lp);
1674
24583e45
TBA
1675 scoped_restore_current_thread restore_thread;
1676 switch_to_thread (thread);
582511be
PA
1677
1678 if (pc != lp->stop_pc)
1679 {
c058728c
SM
1680 threads_debug_printf ("PC of %ld changed",
1681 lwpid_of (thread));
582511be
PA
1682 discard = 1;
1683 }
3e572f71
PA
1684
1685#if !USE_SIGTRAP_SIGINFO
15c66dd6 1686 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
d7146cda 1687 && !low_breakpoint_at (pc))
582511be 1688 {
c058728c
SM
1689 threads_debug_printf ("previous SW breakpoint of %ld gone",
1690 lwpid_of (thread));
582511be
PA
1691 discard = 1;
1692 }
15c66dd6 1693 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1694 && !hardware_breakpoint_inserted_here (pc))
1695 {
c058728c
SM
1696 threads_debug_printf ("previous HW breakpoint of %ld gone",
1697 lwpid_of (thread));
582511be
PA
1698 discard = 1;
1699 }
3e572f71 1700#endif
582511be 1701
582511be
PA
1702 if (discard)
1703 {
c058728c 1704 threads_debug_printf ("discarding pending breakpoint status");
582511be
PA
1705 lp->status_pending_p = 0;
1706 return 0;
1707 }
1708 }
1709
1710 return 1;
1711}
1712
a681f9c9
PA
1713/* Returns true if LWP is resumed from the client's perspective. */
1714
1715static int
1716lwp_resumed (struct lwp_info *lwp)
1717{
1718 struct thread_info *thread = get_lwp_thread (lwp);
1719
1720 if (thread->last_resume_kind != resume_stop)
1721 return 1;
1722
1723 /* Did gdb send us a `vCont;t', but we haven't reported the
1724 corresponding stop to gdb yet? If so, the thread is still
1725 resumed/running from gdb's perspective. */
1726 if (thread->last_resume_kind == resume_stop
183be222 1727 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
a681f9c9
PA
1728 return 1;
1729
1730 return 0;
1731}
1732
df95181f
TBA
1733bool
1734linux_process_target::status_pending_p_callback (thread_info *thread,
1735 ptid_t ptid)
0d62e5e8 1736{
582511be 1737 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1738
1739 /* Check if we're only interested in events from a specific process
afa8d396 1740 or a specific LWP. */
83e1b6c1 1741 if (!thread->id.matches (ptid))
95954743 1742 return 0;
0d62e5e8 1743
a681f9c9
PA
1744 if (!lwp_resumed (lp))
1745 return 0;
1746
582511be 1747 if (lp->status_pending_p
df95181f 1748 && !thread_still_has_status_pending (thread))
582511be 1749 {
df95181f 1750 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
582511be
PA
1751 return 0;
1752 }
0d62e5e8 1753
582511be 1754 return lp->status_pending_p;
0d62e5e8
DJ
1755}
1756
95954743
PA
1757struct lwp_info *
1758find_lwp_pid (ptid_t ptid)
1759{
d4895ba2
SM
1760 long lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1761 thread_info *thread = find_thread ([lwp] (thread_info *thr_arg)
454296a2 1762 {
da4ae14a 1763 return thr_arg->id.lwp () == lwp;
454296a2 1764 });
d86d4aaf
DE
1765
1766 if (thread == NULL)
1767 return NULL;
1768
9c80ecd6 1769 return get_thread_lwp (thread);
95954743
PA
1770}
1771
fa96cb38 1772/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1773
fa96cb38
PA
1774static int
1775num_lwps (int pid)
1776{
fa96cb38 1777 int count = 0;
0d62e5e8 1778
4d3bb80e
SM
1779 for_each_thread (pid, [&] (thread_info *thread)
1780 {
9c80ecd6 1781 count++;
4d3bb80e 1782 });
3aee8918 1783
fa96cb38
PA
1784 return count;
1785}
d61ddec4 1786
6d4ee8c6
GB
1787/* See nat/linux-nat.h. */
1788
1789struct lwp_info *
1790iterate_over_lwps (ptid_t filter,
d3a70e03 1791 gdb::function_view<iterate_over_lwps_ftype> callback)
6d4ee8c6 1792{
da4ae14a 1793 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
6d1e5673 1794 {
da4ae14a 1795 lwp_info *lwp = get_thread_lwp (thr_arg);
6d1e5673 1796
d3a70e03 1797 return callback (lwp);
6d1e5673 1798 });
6d4ee8c6 1799
9c80ecd6 1800 if (thread == NULL)
6d4ee8c6
GB
1801 return NULL;
1802
9c80ecd6 1803 return get_thread_lwp (thread);
6d4ee8c6
GB
1804}
1805
fd000fb3
TBA
1806void
1807linux_process_target::check_zombie_leaders ()
fa96cb38 1808{
aa40a989
PA
1809 for_each_process ([this] (process_info *proc)
1810 {
1811 pid_t leader_pid = pid_of (proc);
1812 lwp_info *leader_lp = find_lwp_pid (ptid_t (leader_pid));
1813
1814 threads_debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1815 "num_lwps=%d, zombie=%d",
1816 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1817 linux_proc_pid_is_zombie (leader_pid));
1818
1819 if (leader_lp != NULL && !leader_lp->stopped
1820 /* Check if there are other threads in the group, as we may
8a841a35
PA
1821 have raced with the inferior simply exiting. Note this
1822 isn't a watertight check. If the inferior is
1823 multi-threaded and is exiting, it may be we see the
1824 leader as zombie before we reap all the non-leader
1825 threads. See comments below. */
aa40a989
PA
1826 && !last_thread_of_process_p (leader_pid)
1827 && linux_proc_pid_is_zombie (leader_pid))
1828 {
8a841a35
PA
1829 /* A zombie leader in a multi-threaded program can mean one
1830 of three things:
1831
1832 #1 - Only the leader exited, not the whole program, e.g.,
1833 with pthread_exit. Since we can't reap the leader's exit
1834 status until all other threads are gone and reaped too,
1835 we want to delete the zombie leader right away, as it
1836 can't be debugged, we can't read its registers, etc.
1837 This is the main reason we check for zombie leaders
1838 disappearing.
1839
1840 #2 - The whole thread-group/process exited (a group exit,
1841 via e.g. exit(3), and there is (or will be shortly) an
1842 exit reported for each thread in the process, and then
1843 finally an exit for the leader once the non-leaders are
1844 reaped.
1845
1846 #3 - There are 3 or more threads in the group, and a
1847 thread other than the leader exec'd. See comments on
1848 exec events at the top of the file.
1849
1850 Ideally we would never delete the leader for case #2.
1851 Instead, we want to collect the exit status of each
1852 non-leader thread, and then finally collect the exit
1853 status of the leader as normal and use its exit code as
1854 whole-process exit code. Unfortunately, there's no
1855 race-free way to distinguish cases #1 and #2. We can't
1856 assume the exit events for the non-leaders threads are
1857 already pending in the kernel, nor can we assume the
1858 non-leader threads are in zombie state already. Between
1859 the leader becoming zombie and the non-leaders exiting
1860 and becoming zombie themselves, there's a small time
1861 window, so such a check would be racy. Temporarily
1862 pausing all threads and checking to see if all threads
1863 exit or not before re-resuming them would work in the
1864 case that all threads are running right now, but it
1865 wouldn't work if some thread is currently already
1866 ptrace-stopped, e.g., due to scheduler-locking.
1867
1868 So what we do is we delete the leader anyhow, and then
1869 later on when we see its exit status, we re-add it back.
1870 We also make sure that we only report a whole-process
1871 exit when we see the leader exiting, as opposed to when
1872 the last LWP in the LWP list exits, which can be a
1873 non-leader if we deleted the leader here. */
aa40a989 1874 threads_debug_printf ("Thread group leader %d zombie "
8a841a35
PA
1875 "(it exited, or another thread execd), "
1876 "deleting it.",
aa40a989 1877 leader_pid);
aa40a989
PA
1878 delete_lwp (leader_lp);
1879 }
9179355e 1880 });
fa96cb38 1881}
c3adc08c 1882
a1385b7b
SM
1883/* Callback for `find_thread'. Returns the first LWP that is not
1884 stopped. */
d50171e4 1885
a1385b7b
SM
1886static bool
1887not_stopped_callback (thread_info *thread, ptid_t filter)
fa96cb38 1888{
a1385b7b
SM
1889 if (!thread->id.matches (filter))
1890 return false;
47c0c975 1891
a1385b7b 1892 lwp_info *lwp = get_thread_lwp (thread);
fa96cb38 1893
a1385b7b 1894 return !lwp->stopped;
0d62e5e8 1895}
611cb4a5 1896
863d01bd
PA
1897/* Increment LWP's suspend count. */
1898
1899static void
1900lwp_suspended_inc (struct lwp_info *lwp)
1901{
1902 lwp->suspended++;
1903
c058728c
SM
1904 if (lwp->suspended > 4)
1905 threads_debug_printf
1906 ("LWP %ld has a suspiciously high suspend count, suspended=%d",
1907 lwpid_of (get_lwp_thread (lwp)), lwp->suspended);
863d01bd
PA
1908}
1909
1910/* Decrement LWP's suspend count. */
1911
1912static void
1913lwp_suspended_decr (struct lwp_info *lwp)
1914{
1915 lwp->suspended--;
1916
1917 if (lwp->suspended < 0)
1918 {
1919 struct thread_info *thread = get_lwp_thread (lwp);
1920
f34652de 1921 internal_error ("unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
863d01bd
PA
1922 lwp->suspended);
1923 }
1924}
1925
219f2f23
PA
1926/* This function should only be called if the LWP got a SIGTRAP.
1927
1928 Handle any tracepoint steps or hits. Return true if a tracepoint
1929 event was handled, 0 otherwise. */
1930
1931static int
1932handle_tracepoints (struct lwp_info *lwp)
1933{
1934 struct thread_info *tinfo = get_lwp_thread (lwp);
1935 int tpoint_related_event = 0;
1936
582511be
PA
1937 gdb_assert (lwp->suspended == 0);
1938
7984d532
PA
1939 /* If this tracepoint hit causes a tracing stop, we'll immediately
1940 uninsert tracepoints. To do this, we temporarily pause all
1941 threads, unpatch away, and then unpause threads. We need to make
1942 sure the unpausing doesn't resume LWP too. */
863d01bd 1943 lwp_suspended_inc (lwp);
7984d532 1944
219f2f23
PA
1945 /* And we need to be sure that any all-threads-stopping doesn't try
1946 to move threads out of the jump pads, as it could deadlock the
1947 inferior (LWP could be in the jump pad, maybe even holding the
1948 lock.) */
1949
1950 /* Do any necessary step collect actions. */
1951 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1952
fa593d66
PA
1953 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1954
219f2f23
PA
1955 /* See if we just hit a tracepoint and do its main collect
1956 actions. */
1957 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1958
863d01bd 1959 lwp_suspended_decr (lwp);
7984d532
PA
1960
1961 gdb_assert (lwp->suspended == 0);
229d26fc
SM
1962 gdb_assert (!stabilizing_threads
1963 || (lwp->collecting_fast_tracepoint
1964 != fast_tpoint_collect_result::not_collecting));
7984d532 1965
219f2f23
PA
1966 if (tpoint_related_event)
1967 {
c058728c 1968 threads_debug_printf ("got a tracepoint event");
219f2f23
PA
1969 return 1;
1970 }
1971
1972 return 0;
1973}
1974
13e567af
TBA
1975fast_tpoint_collect_result
1976linux_process_target::linux_fast_tracepoint_collecting
1977 (lwp_info *lwp, fast_tpoint_collect_status *status)
fa593d66
PA
1978{
1979 CORE_ADDR thread_area;
d86d4aaf 1980 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66 1981
fa593d66
PA
1982 /* Get the thread area address. This is used to recognize which
1983 thread is which when tracing with the in-process agent library.
1984 We don't read anything from the address, and treat it as opaque;
1985 it's the address itself that we assume is unique per-thread. */
13e567af 1986 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
229d26fc 1987 return fast_tpoint_collect_result::not_collecting;
fa593d66
PA
1988
1989 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1990}
1991
13e567af
TBA
1992int
1993linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1994{
1995 return -1;
1996}
1997
d16f3f6c
TBA
1998bool
1999linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
fa593d66 2000{
24583e45
TBA
2001 scoped_restore_current_thread restore_thread;
2002 switch_to_thread (get_lwp_thread (lwp));
fa593d66
PA
2003
2004 if ((wstat == NULL
2005 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2006 && supports_fast_tracepoints ()
58b4daa5 2007 && agent_loaded_p ())
fa593d66
PA
2008 {
2009 struct fast_tpoint_collect_status status;
fa593d66 2010
c058728c
SM
2011 threads_debug_printf
2012 ("Checking whether LWP %ld needs to move out of the jump pad.",
2013 lwpid_of (current_thread));
fa593d66 2014
229d26fc
SM
2015 fast_tpoint_collect_result r
2016 = linux_fast_tracepoint_collecting (lwp, &status);
fa593d66
PA
2017
2018 if (wstat == NULL
2019 || (WSTOPSIG (*wstat) != SIGILL
2020 && WSTOPSIG (*wstat) != SIGFPE
2021 && WSTOPSIG (*wstat) != SIGSEGV
2022 && WSTOPSIG (*wstat) != SIGBUS))
2023 {
2024 lwp->collecting_fast_tracepoint = r;
2025
229d26fc 2026 if (r != fast_tpoint_collect_result::not_collecting)
fa593d66 2027 {
229d26fc
SM
2028 if (r == fast_tpoint_collect_result::before_insn
2029 && lwp->exit_jump_pad_bkpt == NULL)
fa593d66
PA
2030 {
2031 /* Haven't executed the original instruction yet.
2032 Set breakpoint there, and wait till it's hit,
2033 then single-step until exiting the jump pad. */
2034 lwp->exit_jump_pad_bkpt
2035 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2036 }
2037
c058728c
SM
2038 threads_debug_printf
2039 ("Checking whether LWP %ld needs to move out of the jump pad..."
2040 " it does", lwpid_of (current_thread));
fa593d66 2041
d16f3f6c 2042 return true;
fa593d66
PA
2043 }
2044 }
2045 else
2046 {
2047 /* If we get a synchronous signal while collecting, *and*
2048 while executing the (relocated) original instruction,
2049 reset the PC to point at the tpoint address, before
2050 reporting to GDB. Otherwise, it's an IPA lib bug: just
2051 report the signal to GDB, and pray for the best. */
2052
229d26fc
SM
2053 lwp->collecting_fast_tracepoint
2054 = fast_tpoint_collect_result::not_collecting;
fa593d66 2055
229d26fc 2056 if (r != fast_tpoint_collect_result::not_collecting
fa593d66
PA
2057 && (status.adjusted_insn_addr <= lwp->stop_pc
2058 && lwp->stop_pc < status.adjusted_insn_addr_end))
2059 {
2060 siginfo_t info;
2061 struct regcache *regcache;
2062
2063 /* The si_addr on a few signals references the address
2064 of the faulting instruction. Adjust that as
2065 well. */
2066 if ((WSTOPSIG (*wstat) == SIGILL
2067 || WSTOPSIG (*wstat) == SIGFPE
2068 || WSTOPSIG (*wstat) == SIGBUS
2069 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 2070 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2071 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
2072 /* Final check just to make sure we don't clobber
2073 the siginfo of non-kernel-sent signals. */
2074 && (uintptr_t) info.si_addr == lwp->stop_pc)
2075 {
2076 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 2077 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2078 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
2079 }
2080
0bfdf32f 2081 regcache = get_thread_regcache (current_thread, 1);
bf9ae9d8 2082 low_set_pc (regcache, status.tpoint_addr);
fa593d66
PA
2083 lwp->stop_pc = status.tpoint_addr;
2084
2085 /* Cancel any fast tracepoint lock this thread was
2086 holding. */
2087 force_unlock_trace_buffer ();
2088 }
2089
2090 if (lwp->exit_jump_pad_bkpt != NULL)
2091 {
c058728c
SM
2092 threads_debug_printf
2093 ("Cancelling fast exit-jump-pad: removing bkpt."
2094 "stopping all threads momentarily.");
fa593d66
PA
2095
2096 stop_all_lwps (1, lwp);
fa593d66
PA
2097
2098 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2099 lwp->exit_jump_pad_bkpt = NULL;
2100
2101 unstop_all_lwps (1, lwp);
2102
2103 gdb_assert (lwp->suspended >= 0);
2104 }
2105 }
2106 }
2107
c058728c
SM
2108 threads_debug_printf
2109 ("Checking whether LWP %ld needs to move out of the jump pad... no",
2110 lwpid_of (current_thread));
0cccb683 2111
d16f3f6c 2112 return false;
fa593d66
PA
2113}
2114
2115/* Enqueue one signal in the "signals to report later when out of the
2116 jump pad" list. */
2117
2118static void
2119enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2120{
d86d4aaf 2121 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66 2122
c058728c
SM
2123 threads_debug_printf ("Deferring signal %d for LWP %ld.",
2124 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2125
2126 if (debug_threads)
2127 {
013e3554 2128 for (const auto &sig : lwp->pending_signals_to_report)
c058728c 2129 threads_debug_printf (" Already queued %d", sig.signal);
fa593d66 2130
c058728c 2131 threads_debug_printf (" (no more currently queued signals)");
fa593d66
PA
2132 }
2133
1a981360
PA
2134 /* Don't enqueue non-RT signals if they are already in the deferred
2135 queue. (SIGSTOP being the easiest signal to see ending up here
2136 twice) */
2137 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2138 {
013e3554 2139 for (const auto &sig : lwp->pending_signals_to_report)
1a981360 2140 {
013e3554 2141 if (sig.signal == WSTOPSIG (*wstat))
1a981360 2142 {
c058728c
SM
2143 threads_debug_printf
2144 ("Not requeuing already queued non-RT signal %d for LWP %ld",
2145 sig.signal, lwpid_of (thread));
1a981360
PA
2146 return;
2147 }
2148 }
2149 }
2150
013e3554 2151 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
8d749320 2152
d86d4aaf 2153 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554 2154 &lwp->pending_signals_to_report.back ().info);
fa593d66
PA
2155}
2156
2157/* Dequeue one signal from the "signals to report later when out of
2158 the jump pad" list. */
2159
2160static int
2161dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2162{
d86d4aaf
DE
2163 struct thread_info *thread = get_lwp_thread (lwp);
2164
013e3554 2165 if (!lwp->pending_signals_to_report.empty ())
fa593d66 2166 {
013e3554 2167 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
fa593d66 2168
013e3554
TBA
2169 *wstat = W_STOPCODE (p_sig.signal);
2170 if (p_sig.info.si_signo != 0)
d86d4aaf 2171 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554
TBA
2172 &p_sig.info);
2173
2174 lwp->pending_signals_to_report.pop_front ();
fa593d66 2175
c058728c
SM
2176 threads_debug_printf ("Reporting deferred signal %d for LWP %ld.",
2177 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2178
2179 if (debug_threads)
2180 {
013e3554 2181 for (const auto &sig : lwp->pending_signals_to_report)
c058728c 2182 threads_debug_printf (" Still queued %d", sig.signal);
fa593d66 2183
c058728c 2184 threads_debug_printf (" (no more queued signals)");
fa593d66
PA
2185 }
2186
2187 return 1;
2188 }
2189
2190 return 0;
2191}
2192
ac1bbaca
TBA
2193bool
2194linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
582511be 2195{
24583e45
TBA
2196 scoped_restore_current_thread restore_thread;
2197 switch_to_thread (get_lwp_thread (child));
d50171e4 2198
ac1bbaca
TBA
2199 if (low_stopped_by_watchpoint ())
2200 {
2201 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2202 child->stopped_data_address = low_stopped_data_address ();
2203 }
582511be 2204
ac1bbaca
TBA
2205 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2206}
d50171e4 2207
ac1bbaca
TBA
2208bool
2209linux_process_target::low_stopped_by_watchpoint ()
2210{
2211 return false;
2212}
d50171e4 2213
ac1bbaca
TBA
2214CORE_ADDR
2215linux_process_target::low_stopped_data_address ()
2216{
2217 return 0;
c4d9ceb6
YQ
2218}
2219
de0d863e
DB
2220/* Return the ptrace options that we want to try to enable. */
2221
2222static int
2223linux_low_ptrace_options (int attached)
2224{
c12a5089 2225 client_state &cs = get_client_state ();
de0d863e
DB
2226 int options = 0;
2227
2228 if (!attached)
2229 options |= PTRACE_O_EXITKILL;
2230
c12a5089 2231 if (cs.report_fork_events)
de0d863e
DB
2232 options |= PTRACE_O_TRACEFORK;
2233
c12a5089 2234 if (cs.report_vfork_events)
c269dbdb
DB
2235 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2236
c12a5089 2237 if (cs.report_exec_events)
94585166
DB
2238 options |= PTRACE_O_TRACEEXEC;
2239
82075af2
JS
2240 options |= PTRACE_O_TRACESYSGOOD;
2241
de0d863e
DB
2242 return options;
2243}
2244
1a48f002 2245void
d16f3f6c 2246linux_process_target::filter_event (int lwpid, int wstat)
fa96cb38
PA
2247{
2248 struct lwp_info *child;
2249 struct thread_info *thread;
582511be 2250 int have_stop_pc = 0;
fa96cb38 2251
f2907e49 2252 child = find_lwp_pid (ptid_t (lwpid));
fa96cb38 2253
5406bc3f
PA
2254 /* Check for events reported by anything not in our LWP list. */
2255 if (child == nullptr)
94585166 2256 {
5406bc3f
PA
2257 if (WIFSTOPPED (wstat))
2258 {
2259 if (WSTOPSIG (wstat) == SIGTRAP
2260 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2261 {
2262 /* A non-leader thread exec'ed after we've seen the
2263 leader zombie, and removed it from our lists (in
2264 check_zombie_leaders). The non-leader thread changes
2265 its tid to the tgid. */
2266 threads_debug_printf
2267 ("Re-adding thread group leader LWP %d after exec.",
2268 lwpid);
94585166 2269
5406bc3f
PA
2270 child = add_lwp (ptid_t (lwpid, lwpid));
2271 child->stopped = 1;
2272 switch_to_thread (child->thread);
2273 }
2274 else
2275 {
2276 /* A process we are controlling has forked and the new
2277 child's stop was reported to us by the kernel. Save
2278 its PID and go back to waiting for the fork event to
2279 be reported - the stopped process might be returned
2280 from waitpid before or after the fork event is. */
2281 threads_debug_printf
2282 ("Saving LWP %d status %s in stopped_pids list",
2283 lwpid, status_to_str (wstat).c_str ());
2284 add_to_pid_list (&stopped_pids, lwpid, wstat);
2285 }
2286 }
2287 else
2288 {
2289 /* Don't report an event for the exit of an LWP not in our
2290 list, i.e. not part of any inferior we're debugging.
2291 This can happen if we detach from a program we originally
8a841a35
PA
2292 forked and then it exits. However, note that we may have
2293 earlier deleted a leader of an inferior we're debugging,
2294 in check_zombie_leaders. Re-add it back here if so. */
2295 find_process ([&] (process_info *proc)
2296 {
2297 if (proc->pid == lwpid)
2298 {
2299 threads_debug_printf
2300 ("Re-adding thread group leader LWP %d after exit.",
2301 lwpid);
2302
2303 child = add_lwp (ptid_t (lwpid, lwpid));
2304 return true;
2305 }
2306 return false;
2307 });
5406bc3f 2308 }
94585166 2309
5406bc3f
PA
2310 if (child == nullptr)
2311 return;
fa96cb38 2312 }
fa96cb38
PA
2313
2314 thread = get_lwp_thread (child);
2315
2316 child->stopped = 1;
2317
2318 child->last_status = wstat;
2319
582511be
PA
2320 /* Check if the thread has exited. */
2321 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2322 {
c058728c 2323 threads_debug_printf ("%d exited", lwpid);
f50bf8e5
YQ
2324
2325 if (finish_step_over (child))
2326 {
2327 /* Unsuspend all other LWPs, and set them back running again. */
2328 unsuspend_all_lwps (child);
2329 }
2330
8a841a35
PA
2331 /* If this is not the leader LWP, then the exit signal was not
2332 the end of the debugged application and should be ignored,
2333 unless GDB wants to hear about thread exits. */
48989498 2334 if (report_exit_events_for (thread) || is_leader (thread))
582511be 2335 {
65706a29
PA
2336 /* Since events are serialized to GDB core, and we can't
2337 report this one right now. Leave the status pending for
2338 the next time we're able to report it. */
2339 mark_lwp_dead (child, wstat);
1a48f002 2340 return;
582511be
PA
2341 }
2342 else
2343 {
65706a29 2344 delete_lwp (child);
1a48f002 2345 return;
582511be
PA
2346 }
2347 }
2348
2349 gdb_assert (WIFSTOPPED (wstat));
2350
fa96cb38
PA
2351 if (WIFSTOPPED (wstat))
2352 {
2353 struct process_info *proc;
2354
c06cbd92 2355 /* Architecture-specific setup after inferior is running. */
fa96cb38 2356 proc = find_process_pid (pid_of (thread));
c06cbd92 2357 if (proc->tdesc == NULL)
fa96cb38 2358 {
c06cbd92
YQ
2359 if (proc->attached)
2360 {
c06cbd92
YQ
2361 /* This needs to happen after we have attached to the
2362 inferior and it is stopped for the first time, but
2363 before we access any inferior registers. */
797bcff5 2364 arch_setup_thread (thread);
c06cbd92
YQ
2365 }
2366 else
2367 {
2368 /* The process is started, but GDBserver will do
2369 architecture-specific setup after the program stops at
2370 the first instruction. */
2371 child->status_pending_p = 1;
2372 child->status_pending = wstat;
1a48f002 2373 return;
c06cbd92 2374 }
fa96cb38
PA
2375 }
2376 }
2377
fa96cb38
PA
2378 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2379 {
beed38b8 2380 struct process_info *proc = find_process_pid (pid_of (thread));
de0d863e 2381 int options = linux_low_ptrace_options (proc->attached);
beed38b8 2382
de0d863e 2383 linux_enable_event_reporting (lwpid, options);
fa96cb38
PA
2384 child->must_set_ptrace_flags = 0;
2385 }
2386
82075af2
JS
2387 /* Always update syscall_state, even if it will be filtered later. */
2388 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2389 {
2390 child->syscall_state
2391 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2392 ? TARGET_WAITKIND_SYSCALL_RETURN
2393 : TARGET_WAITKIND_SYSCALL_ENTRY);
2394 }
2395 else
2396 {
2397 /* Almost all other ptrace-stops are known to be outside of system
2398 calls, with further exceptions in handle_extended_wait. */
2399 child->syscall_state = TARGET_WAITKIND_IGNORE;
2400 }
2401
e7ad2f14
PA
2402 /* Be careful to not overwrite stop_pc until save_stop_reason is
2403 called. */
fa96cb38 2404 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2405 && linux_is_extended_waitstatus (wstat))
fa96cb38 2406 {
582511be 2407 child->stop_pc = get_pc (child);
94585166 2408 if (handle_extended_wait (&child, wstat))
de0d863e
DB
2409 {
2410 /* The event has been handled, so just return without
2411 reporting it. */
1a48f002 2412 return;
de0d863e 2413 }
fa96cb38
PA
2414 }
2415
80aea927 2416 if (linux_wstatus_maybe_breakpoint (wstat))
582511be 2417 {
e7ad2f14 2418 if (save_stop_reason (child))
582511be
PA
2419 have_stop_pc = 1;
2420 }
2421
2422 if (!have_stop_pc)
2423 child->stop_pc = get_pc (child);
2424
fa96cb38
PA
2425 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2426 && child->stop_expected)
2427 {
c058728c
SM
2428 threads_debug_printf ("Expected stop.");
2429
fa96cb38
PA
2430 child->stop_expected = 0;
2431
2432 if (thread->last_resume_kind == resume_stop)
2433 {
2434 /* We want to report the stop to the core. Treat the
2435 SIGSTOP as a normal event. */
c058728c
SM
2436 threads_debug_printf ("resume_stop SIGSTOP caught for %s.",
2437 target_pid_to_str (ptid_of (thread)).c_str ());
fa96cb38
PA
2438 }
2439 else if (stopping_threads != NOT_STOPPING_THREADS)
2440 {
2441 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2442 pending. */
c058728c
SM
2443 threads_debug_printf ("SIGSTOP caught for %s while stopping threads.",
2444 target_pid_to_str (ptid_of (thread)).c_str ());
1a48f002 2445 return;
fa96cb38
PA
2446 }
2447 else
2448 {
2bf6fb9d 2449 /* This is a delayed SIGSTOP. Filter out the event. */
c058728c 2450 threads_debug_printf ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2bf6fb9d 2451 child->stepping ? "step" : "continue",
61d7f128 2452 target_pid_to_str (ptid_of (thread)).c_str ());
2bf6fb9d 2453
df95181f 2454 resume_one_lwp (child, child->stepping, 0, NULL);
1a48f002 2455 return;
fa96cb38
PA
2456 }
2457 }
2458
582511be
PA
2459 child->status_pending_p = 1;
2460 child->status_pending = wstat;
1a48f002 2461 return;
fa96cb38
PA
2462}
2463
b31cdfa6
TBA
2464bool
2465linux_process_target::maybe_hw_step (thread_info *thread)
f79b145d 2466{
b31cdfa6
TBA
2467 if (supports_hardware_single_step ())
2468 return true;
f79b145d
YQ
2469 else
2470 {
3b9a79ef 2471 /* GDBserver must insert single-step breakpoint for software
f79b145d 2472 single step. */
3b9a79ef 2473 gdb_assert (has_single_step_breakpoints (thread));
b31cdfa6 2474 return false;
f79b145d
YQ
2475 }
2476}
2477
df95181f
TBA
2478void
2479linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
20ba1ce6 2480{
20ba1ce6
PA
2481 struct lwp_info *lp = get_thread_lwp (thread);
2482
2483 if (lp->stopped
863d01bd 2484 && !lp->suspended
20ba1ce6 2485 && !lp->status_pending_p
183be222 2486 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
20ba1ce6 2487 {
8901d193
YQ
2488 int step = 0;
2489
2490 if (thread->last_resume_kind == resume_step)
b6d8d612
KB
2491 {
2492 if (supports_software_single_step ())
2493 install_software_single_step_breakpoints (lp);
2494
2495 step = maybe_hw_step (thread);
2496 }
20ba1ce6 2497
c058728c
SM
2498 threads_debug_printf ("resuming stopped-resumed LWP %s at %s: step=%d",
2499 target_pid_to_str (ptid_of (thread)).c_str (),
2500 paddress (lp->stop_pc), step);
20ba1ce6 2501
df95181f 2502 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
20ba1ce6
PA
2503 }
2504}
2505
d16f3f6c
TBA
2506int
2507linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2508 ptid_t filter_ptid,
2509 int *wstatp, int options)
0d62e5e8 2510{
d86d4aaf 2511 struct thread_info *event_thread;
d50171e4 2512 struct lwp_info *event_child, *requested_child;
fa96cb38 2513 sigset_t block_mask, prev_mask;
d50171e4 2514
fa96cb38 2515 retry:
d86d4aaf
DE
2516 /* N.B. event_thread points to the thread_info struct that contains
2517 event_child. Keep them in sync. */
2518 event_thread = NULL;
d50171e4
PA
2519 event_child = NULL;
2520 requested_child = NULL;
0d62e5e8 2521
95954743 2522 /* Check for a lwp with a pending status. */
bd99dc85 2523
d7e15655 2524 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
0d62e5e8 2525 {
83e1b6c1
SM
2526 event_thread = find_thread_in_random ([&] (thread_info *thread)
2527 {
2528 return status_pending_p_callback (thread, filter_ptid);
2529 });
2530
d86d4aaf 2531 if (event_thread != NULL)
c058728c
SM
2532 {
2533 event_child = get_thread_lwp (event_thread);
2534 threads_debug_printf ("Got a pending child %ld", lwpid_of (event_thread));
2535 }
0d62e5e8 2536 }
d7e15655 2537 else if (filter_ptid != null_ptid)
0d62e5e8 2538 {
fa96cb38 2539 requested_child = find_lwp_pid (filter_ptid);
59487af3 2540 gdb_assert (requested_child != nullptr);
d50171e4 2541
bde24c0a 2542 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66 2543 && requested_child->status_pending_p
229d26fc
SM
2544 && (requested_child->collecting_fast_tracepoint
2545 != fast_tpoint_collect_result::not_collecting))
fa593d66
PA
2546 {
2547 enqueue_one_deferred_signal (requested_child,
2548 &requested_child->status_pending);
2549 requested_child->status_pending_p = 0;
2550 requested_child->status_pending = 0;
df95181f 2551 resume_one_lwp (requested_child, 0, 0, NULL);
fa593d66
PA
2552 }
2553
2554 if (requested_child->suspended
2555 && requested_child->status_pending_p)
38e08fca 2556 {
f34652de 2557 internal_error ("requesting an event out of a"
38e08fca
GB
2558 " suspended child?");
2559 }
fa593d66 2560
d50171e4 2561 if (requested_child->status_pending_p)
d86d4aaf
DE
2562 {
2563 event_child = requested_child;
2564 event_thread = get_lwp_thread (event_child);
2565 }
0d62e5e8 2566 }
611cb4a5 2567
0d62e5e8
DJ
2568 if (event_child != NULL)
2569 {
c058728c
SM
2570 threads_debug_printf ("Got an event from pending child %ld (%04x)",
2571 lwpid_of (event_thread),
2572 event_child->status_pending);
2573
fa96cb38 2574 *wstatp = event_child->status_pending;
bd99dc85
PA
2575 event_child->status_pending_p = 0;
2576 event_child->status_pending = 0;
24583e45 2577 switch_to_thread (event_thread);
d86d4aaf 2578 return lwpid_of (event_thread);
0d62e5e8
DJ
2579 }
2580
fa96cb38
PA
2581 /* But if we don't find a pending event, we'll have to wait.
2582
2583 We only enter this loop if no process has a pending wait status.
2584 Thus any action taken in response to a wait status inside this
2585 loop is responding as soon as we detect the status, not after any
2586 pending events. */
d8301ad1 2587
fa96cb38
PA
2588 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2589 all signals while here. */
2590 sigfillset (&block_mask);
21987b9c 2591 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
fa96cb38 2592
582511be
PA
2593 /* Always pull all events out of the kernel. We'll randomly select
2594 an event LWP out of all that have events, to prevent
2595 starvation. */
fa96cb38 2596 while (event_child == NULL)
0d62e5e8 2597 {
fa96cb38 2598 pid_t ret = 0;
0d62e5e8 2599
fa96cb38
PA
2600 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2601 quirks:
0d62e5e8 2602
fa96cb38
PA
2603 - If the thread group leader exits while other threads in the
2604 thread group still exist, waitpid(TGID, ...) hangs. That
2605 waitpid won't return an exit status until the other threads
2606 in the group are reaped.
611cb4a5 2607
fa96cb38
PA
2608 - When a non-leader thread execs, that thread just vanishes
2609 without reporting an exit (so we'd hang if we waited for it
2610 explicitly in that case). The exec event is reported to
94585166 2611 the TGID pid. */
fa96cb38
PA
2612 errno = 0;
2613 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2614
c058728c
SM
2615 threads_debug_printf ("waitpid(-1, ...) returned %d, %s",
2616 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
0d62e5e8 2617
fa96cb38 2618 if (ret > 0)
0d62e5e8 2619 {
c058728c
SM
2620 threads_debug_printf ("waitpid %ld received %s",
2621 (long) ret, status_to_str (*wstatp).c_str ());
89be2091 2622
582511be
PA
2623 /* Filter all events. IOW, leave all events pending. We'll
2624 randomly select an event LWP out of all that have events
2625 below. */
d16f3f6c 2626 filter_event (ret, *wstatp);
fa96cb38
PA
2627 /* Retry until nothing comes out of waitpid. A single
2628 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2629 continue;
2630 }
2631
20ba1ce6
PA
2632 /* Now that we've pulled all events out of the kernel, resume
2633 LWPs that don't have an interesting event to report. */
2634 if (stopping_threads == NOT_STOPPING_THREADS)
df95181f
TBA
2635 for_each_thread ([this] (thread_info *thread)
2636 {
2637 resume_stopped_resumed_lwps (thread);
2638 });
20ba1ce6
PA
2639
2640 /* ... and find an LWP with a status to report to the core, if
2641 any. */
83e1b6c1
SM
2642 event_thread = find_thread_in_random ([&] (thread_info *thread)
2643 {
2644 return status_pending_p_callback (thread, filter_ptid);
2645 });
2646
582511be
PA
2647 if (event_thread != NULL)
2648 {
2649 event_child = get_thread_lwp (event_thread);
2650 *wstatp = event_child->status_pending;
2651 event_child->status_pending_p = 0;
2652 event_child->status_pending = 0;
2653 break;
2654 }
2655
fa96cb38
PA
2656 /* Check for zombie thread group leaders. Those can't be reaped
2657 until all other threads in the thread group are. */
2658 check_zombie_leaders ();
2659
a1385b7b
SM
2660 auto not_stopped = [&] (thread_info *thread)
2661 {
2662 return not_stopped_callback (thread, wait_ptid);
2663 };
2664
fa96cb38
PA
2665 /* If there are no resumed children left in the set of LWPs we
2666 want to wait for, bail. We can't just block in
2667 waitpid/sigsuspend, because lwps might have been left stopped
2668 in trace-stop state, and we'd be stuck forever waiting for
2669 their status to change (which would only happen if we resumed
2670 them). Even if WNOHANG is set, this return code is preferred
2671 over 0 (below), as it is more detailed. */
a1385b7b 2672 if (find_thread (not_stopped) == NULL)
a6dbe5df 2673 {
c058728c
SM
2674 threads_debug_printf ("exit (no unwaited-for LWP)");
2675
21987b9c 2676 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2677 return -1;
a6dbe5df
PA
2678 }
2679
fa96cb38
PA
2680 /* No interesting event to report to the caller. */
2681 if ((options & WNOHANG))
24a09b5f 2682 {
c058728c 2683 threads_debug_printf ("WNOHANG set, no event found");
fa96cb38 2684
21987b9c 2685 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2686 return 0;
24a09b5f
DJ
2687 }
2688
fa96cb38 2689 /* Block until we get an event reported with SIGCHLD. */
c058728c 2690 threads_debug_printf ("sigsuspend'ing");
d50171e4 2691
fa96cb38 2692 sigsuspend (&prev_mask);
21987b9c 2693 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38
PA
2694 goto retry;
2695 }
d50171e4 2696
21987b9c 2697 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2698
24583e45 2699 switch_to_thread (event_thread);
d50171e4 2700
fa96cb38
PA
2701 return lwpid_of (event_thread);
2702}
2703
d16f3f6c
TBA
2704int
2705linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
fa96cb38 2706{
d16f3f6c 2707 return wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2708}
2709
6bf5e0ba
PA
2710/* Select one LWP out of those that have events pending. */
2711
2712static void
2713select_event_lwp (struct lwp_info **orig_lp)
2714{
582511be
PA
2715 struct thread_info *event_thread = NULL;
2716
2717 /* In all-stop, give preference to the LWP that is being
2718 single-stepped. There will be at most one, and it's the LWP that
2719 the core is most interested in. If we didn't do this, then we'd
2720 have to handle pending step SIGTRAPs somehow in case the core
2721 later continues the previously-stepped thread, otherwise we'd
2722 report the pending SIGTRAP, and the core, not having stepped the
2723 thread, wouldn't understand what the trap was for, and therefore
2724 would report it to the user as a random signal. */
2725 if (!non_stop)
6bf5e0ba 2726 {
39a64da5
SM
2727 event_thread = find_thread ([] (thread_info *thread)
2728 {
2729 lwp_info *lp = get_thread_lwp (thread);
2730
183be222 2731 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
39a64da5
SM
2732 && thread->last_resume_kind == resume_step
2733 && lp->status_pending_p);
2734 });
2735
582511be 2736 if (event_thread != NULL)
c058728c
SM
2737 threads_debug_printf
2738 ("Select single-step %s",
2739 target_pid_to_str (ptid_of (event_thread)).c_str ());
6bf5e0ba 2740 }
582511be 2741 if (event_thread == NULL)
6bf5e0ba
PA
2742 {
2743 /* No single-stepping LWP. Select one at random, out of those
dda83cd7 2744 which have had events. */
6bf5e0ba 2745
b0319eaa 2746 event_thread = find_thread_in_random ([&] (thread_info *thread)
39a64da5
SM
2747 {
2748 lwp_info *lp = get_thread_lwp (thread);
2749
b0319eaa 2750 /* Only resumed LWPs that have an event pending. */
183be222 2751 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
b0319eaa 2752 && lp->status_pending_p);
39a64da5 2753 });
6bf5e0ba
PA
2754 }
2755
d86d4aaf 2756 if (event_thread != NULL)
6bf5e0ba 2757 {
d86d4aaf
DE
2758 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2759
6bf5e0ba
PA
2760 /* Switch the event LWP. */
2761 *orig_lp = event_lp;
2762 }
2763}
2764
7984d532
PA
2765/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2766 NULL. */
2767
2768static void
2769unsuspend_all_lwps (struct lwp_info *except)
2770{
139720c5
SM
2771 for_each_thread ([&] (thread_info *thread)
2772 {
2773 lwp_info *lwp = get_thread_lwp (thread);
2774
2775 if (lwp != except)
2776 lwp_suspended_decr (lwp);
2777 });
7984d532
PA
2778}
2779
5a6b0a41 2780static bool lwp_running (thread_info *thread);
fa593d66
PA
2781
2782/* Stabilize threads (move out of jump pads).
2783
2784 If a thread is midway collecting a fast tracepoint, we need to
2785 finish the collection and move it out of the jump pad before
2786 reporting the signal.
2787
2788 This avoids recursion while collecting (when a signal arrives
2789 midway, and the signal handler itself collects), which would trash
2790 the trace buffer. In case the user set a breakpoint in a signal
2791 handler, this avoids the backtrace showing the jump pad, etc..
2792 Most importantly, there are certain things we can't do safely if
2793 threads are stopped in a jump pad (or in its callee's). For
2794 example:
2795
2796 - starting a new trace run. A thread still collecting the
2797 previous run, could trash the trace buffer when resumed. The trace
2798 buffer control structures would have been reset but the thread had
2799 no way to tell. The thread could even midway memcpy'ing to the
2800 buffer, which would mean that when resumed, it would clobber the
2801 trace buffer that had been set for a new run.
2802
2803 - we can't rewrite/reuse the jump pads for new tracepoints
2804 safely. Say you do tstart while a thread is stopped midway while
2805 collecting. When the thread is later resumed, it finishes the
2806 collection, and returns to the jump pad, to execute the original
2807 instruction that was under the tracepoint jump at the time the
2808 older run had been started. If the jump pad had been rewritten
2809 since for something else in the new run, the thread would now
2810 execute the wrong / random instructions. */
2811
5c9eb2f2
TBA
2812void
2813linux_process_target::stabilize_threads ()
fa593d66 2814{
13e567af
TBA
2815 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2816 {
2817 return stuck_in_jump_pad (thread);
2818 });
fa593d66 2819
d86d4aaf 2820 if (thread_stuck != NULL)
fa593d66 2821 {
c058728c
SM
2822 threads_debug_printf ("can't stabilize, LWP %ld is stuck in jump pad",
2823 lwpid_of (thread_stuck));
fa593d66
PA
2824 return;
2825 }
2826
24583e45 2827 scoped_restore_current_thread restore_thread;
fa593d66
PA
2828
2829 stabilizing_threads = 1;
2830
2831 /* Kick 'em all. */
d16f3f6c
TBA
2832 for_each_thread ([this] (thread_info *thread)
2833 {
2834 move_out_of_jump_pad (thread);
2835 });
fa593d66
PA
2836
2837 /* Loop until all are stopped out of the jump pads. */
5a6b0a41 2838 while (find_thread (lwp_running) != NULL)
fa593d66
PA
2839 {
2840 struct target_waitstatus ourstatus;
2841 struct lwp_info *lwp;
fa593d66
PA
2842 int wstat;
2843
2844 /* Note that we go through the full wait even loop. While
2845 moving threads out of jump pad, we need to be able to step
2846 over internal breakpoints and such. */
d16f3f6c 2847 wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66 2848
183be222 2849 if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
fa593d66 2850 {
0bfdf32f 2851 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2852
2853 /* Lock it. */
863d01bd 2854 lwp_suspended_inc (lwp);
fa593d66 2855
183be222 2856 if (ourstatus.sig () != GDB_SIGNAL_0
0bfdf32f 2857 || current_thread->last_resume_kind == resume_stop)
fa593d66 2858 {
183be222 2859 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
fa593d66
PA
2860 enqueue_one_deferred_signal (lwp, &wstat);
2861 }
2862 }
2863 }
2864
fcdad592 2865 unsuspend_all_lwps (NULL);
fa593d66
PA
2866
2867 stabilizing_threads = 0;
2868
b4d51a55 2869 if (debug_threads)
fa593d66 2870 {
13e567af
TBA
2871 thread_stuck = find_thread ([this] (thread_info *thread)
2872 {
2873 return stuck_in_jump_pad (thread);
2874 });
fcb056a5 2875
d86d4aaf 2876 if (thread_stuck != NULL)
c058728c
SM
2877 threads_debug_printf
2878 ("couldn't stabilize, LWP %ld got stuck in jump pad",
2879 lwpid_of (thread_stuck));
fa593d66
PA
2880 }
2881}
2882
582511be
PA
2883/* Convenience function that is called when the kernel reports an
2884 event that is not passed out to GDB. */
2885
2886static ptid_t
2887ignore_event (struct target_waitstatus *ourstatus)
2888{
2889 /* If we got an event, there may still be others, as a single
2890 SIGCHLD can indicate more than one child stopped. This forces
2891 another target_wait call. */
2892 async_file_mark ();
2893
183be222 2894 ourstatus->set_ignore ();
582511be
PA
2895 return null_ptid;
2896}
2897
fd000fb3
TBA
2898ptid_t
2899linux_process_target::filter_exit_event (lwp_info *event_child,
2900 target_waitstatus *ourstatus)
65706a29
PA
2901{
2902 struct thread_info *thread = get_lwp_thread (event_child);
2903 ptid_t ptid = ptid_of (thread);
2904
48989498
PA
2905 /* Note we must filter TARGET_WAITKIND_SIGNALLED as well, otherwise
2906 if a non-leader thread exits with a signal, we'd report it to the
2907 core which would interpret it as the whole-process exiting.
2908 There is no TARGET_WAITKIND_THREAD_SIGNALLED event kind. */
2909 if (ourstatus->kind () != TARGET_WAITKIND_EXITED
2910 && ourstatus->kind () != TARGET_WAITKIND_SIGNALLED)
2911 return ptid;
2912
8a841a35 2913 if (!is_leader (thread))
65706a29 2914 {
48989498 2915 if (report_exit_events_for (thread))
183be222 2916 ourstatus->set_thread_exited (0);
65706a29 2917 else
183be222 2918 ourstatus->set_ignore ();
65706a29
PA
2919
2920 delete_lwp (event_child);
2921 }
2922 return ptid;
2923}
2924
82075af2
JS
2925/* Returns 1 if GDB is interested in any event_child syscalls. */
2926
2927static int
2928gdb_catching_syscalls_p (struct lwp_info *event_child)
2929{
2930 struct thread_info *thread = get_lwp_thread (event_child);
2931 struct process_info *proc = get_thread_process (thread);
2932
f27866ba 2933 return !proc->syscalls_to_catch.empty ();
82075af2
JS
2934}
2935
9eedd27d
TBA
2936bool
2937linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
82075af2 2938{
4cc32bec 2939 int sysno;
82075af2
JS
2940 struct thread_info *thread = get_lwp_thread (event_child);
2941 struct process_info *proc = get_thread_process (thread);
2942
f27866ba 2943 if (proc->syscalls_to_catch.empty ())
9eedd27d 2944 return false;
82075af2 2945
f27866ba 2946 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
9eedd27d 2947 return true;
82075af2 2948
4cc32bec 2949 get_syscall_trapinfo (event_child, &sysno);
f27866ba
SM
2950
2951 for (int iter : proc->syscalls_to_catch)
82075af2 2952 if (iter == sysno)
9eedd27d 2953 return true;
82075af2 2954
9eedd27d 2955 return false;
82075af2
JS
2956}
2957
d16f3f6c
TBA
2958ptid_t
2959linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
b60cea74 2960 target_wait_flags target_options)
da6d8c04 2961{
c058728c
SM
2962 THREADS_SCOPED_DEBUG_ENTER_EXIT;
2963
c12a5089 2964 client_state &cs = get_client_state ();
e5f1222d 2965 int w;
fc7238bb 2966 struct lwp_info *event_child;
bd99dc85 2967 int options;
bd99dc85 2968 int pid;
6bf5e0ba
PA
2969 int step_over_finished;
2970 int bp_explains_trap;
2971 int maybe_internal_trap;
2972 int report_to_gdb;
219f2f23 2973 int trace_event;
c2d6af84 2974 int in_step_range;
bd99dc85 2975
c058728c 2976 threads_debug_printf ("[%s]", target_pid_to_str (ptid).c_str ());
87ce2a04 2977
bd99dc85
PA
2978 /* Translate generic target options into linux options. */
2979 options = __WALL;
2980 if (target_options & TARGET_WNOHANG)
2981 options |= WNOHANG;
0d62e5e8 2982
fa593d66
PA
2983 bp_explains_trap = 0;
2984 trace_event = 0;
c2d6af84 2985 in_step_range = 0;
183be222 2986 ourstatus->set_ignore ();
bd99dc85 2987
ef980d65 2988 bool was_any_resumed = any_resumed ();
f2faf941 2989
d7e15655 2990 if (step_over_bkpt == null_ptid)
d16f3f6c 2991 pid = wait_for_event (ptid, &w, options);
6bf5e0ba
PA
2992 else
2993 {
c058728c
SM
2994 threads_debug_printf ("step_over_bkpt set [%s], doing a blocking wait",
2995 target_pid_to_str (step_over_bkpt).c_str ());
d16f3f6c 2996 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
6bf5e0ba
PA
2997 }
2998
ef980d65 2999 if (pid == 0 || (pid == -1 && !was_any_resumed))
87ce2a04 3000 {
fa96cb38
PA
3001 gdb_assert (target_options & TARGET_WNOHANG);
3002
c058728c 3003 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_IGNORE");
fa96cb38 3004
183be222 3005 ourstatus->set_ignore ();
87ce2a04
DE
3006 return null_ptid;
3007 }
fa96cb38
PA
3008 else if (pid == -1)
3009 {
c058728c 3010 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_NO_RESUMED");
bd99dc85 3011
183be222 3012 ourstatus->set_no_resumed ();
fa96cb38
PA
3013 return null_ptid;
3014 }
0d62e5e8 3015
0bfdf32f 3016 event_child = get_thread_lwp (current_thread);
0d62e5e8 3017
d16f3f6c 3018 /* wait_for_event only returns an exit status for the last
fa96cb38
PA
3019 child of a process. Report it. */
3020 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 3021 {
fa96cb38 3022 if (WIFEXITED (w))
0d62e5e8 3023 {
183be222 3024 ourstatus->set_exited (WEXITSTATUS (w));
bd99dc85 3025
c058728c
SM
3026 threads_debug_printf
3027 ("ret = %s, exited with retcode %d",
3028 target_pid_to_str (ptid_of (current_thread)).c_str (),
3029 WEXITSTATUS (w));
fa96cb38
PA
3030 }
3031 else
3032 {
183be222 3033 ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
5b1c542e 3034
c058728c
SM
3035 threads_debug_printf
3036 ("ret = %s, terminated with signal %d",
3037 target_pid_to_str (ptid_of (current_thread)).c_str (),
3038 WTERMSIG (w));
0d62e5e8 3039 }
fa96cb38 3040
48989498 3041 return filter_exit_event (event_child, ourstatus);
da6d8c04
DJ
3042 }
3043
2d97cd35
AT
3044 /* If step-over executes a breakpoint instruction, in the case of a
3045 hardware single step it means a gdb/gdbserver breakpoint had been
3046 planted on top of a permanent breakpoint, in the case of a software
3047 single step it may just mean that gdbserver hit the reinsert breakpoint.
e7ad2f14 3048 The PC has been adjusted by save_stop_reason to point at
2d97cd35
AT
3049 the breakpoint address.
3050 So in the case of the hardware single step advance the PC manually
3051 past the breakpoint and in the case of software single step advance only
3b9a79ef 3052 if it's not the single_step_breakpoint we are hitting.
2d97cd35
AT
3053 This avoids that a program would keep trapping a permanent breakpoint
3054 forever. */
d7e15655 3055 if (step_over_bkpt != null_ptid
2d97cd35
AT
3056 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3057 && (event_child->stepping
3b9a79ef 3058 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
8090aef2 3059 {
dd373349
AT
3060 int increment_pc = 0;
3061 int breakpoint_kind = 0;
3062 CORE_ADDR stop_pc = event_child->stop_pc;
3063
d16f3f6c
TBA
3064 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3065 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
8090aef2 3066
c058728c
SM
3067 threads_debug_printf
3068 ("step-over for %s executed software breakpoint",
3069 target_pid_to_str (ptid_of (current_thread)).c_str ());
8090aef2
PA
3070
3071 if (increment_pc != 0)
3072 {
3073 struct regcache *regcache
3074 = get_thread_regcache (current_thread, 1);
3075
3076 event_child->stop_pc += increment_pc;
bf9ae9d8 3077 low_set_pc (regcache, event_child->stop_pc);
8090aef2 3078
d7146cda 3079 if (!low_breakpoint_at (event_child->stop_pc))
15c66dd6 3080 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
3081 }
3082 }
3083
6bf5e0ba
PA
3084 /* If this event was not handled before, and is not a SIGTRAP, we
3085 report it. SIGILL and SIGSEGV are also treated as traps in case
3086 a breakpoint is inserted at the current PC. If this target does
3087 not support internal breakpoints at all, we also report the
3088 SIGTRAP without further processing; it's of no concern to us. */
3089 maybe_internal_trap
bf9ae9d8 3090 = (low_supports_breakpoints ()
6bf5e0ba
PA
3091 && (WSTOPSIG (w) == SIGTRAP
3092 || ((WSTOPSIG (w) == SIGILL
3093 || WSTOPSIG (w) == SIGSEGV)
d7146cda 3094 && low_breakpoint_at (event_child->stop_pc))));
6bf5e0ba
PA
3095
3096 if (maybe_internal_trap)
3097 {
3098 /* Handle anything that requires bookkeeping before deciding to
3099 report the event or continue waiting. */
3100
3101 /* First check if we can explain the SIGTRAP with an internal
3102 breakpoint, or if we should possibly report the event to GDB.
3103 Do this before anything that may remove or insert a
3104 breakpoint. */
3105 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3106
3107 /* We have a SIGTRAP, possibly a step-over dance has just
3108 finished. If so, tweak the state machine accordingly,
3b9a79ef
YQ
3109 reinsert breakpoints and delete any single-step
3110 breakpoints. */
6bf5e0ba
PA
3111 step_over_finished = finish_step_over (event_child);
3112
3113 /* Now invoke the callbacks of any internal breakpoints there. */
3114 check_breakpoints (event_child->stop_pc);
3115
219f2f23
PA
3116 /* Handle tracepoint data collecting. This may overflow the
3117 trace buffer, and cause a tracing stop, removing
3118 breakpoints. */
3119 trace_event = handle_tracepoints (event_child);
3120
6bf5e0ba 3121 if (bp_explains_trap)
c058728c 3122 threads_debug_printf ("Hit a gdbserver breakpoint.");
6bf5e0ba
PA
3123 }
3124 else
3125 {
3126 /* We have some other signal, possibly a step-over dance was in
3127 progress, and it should be cancelled too. */
3128 step_over_finished = finish_step_over (event_child);
fa593d66
PA
3129 }
3130
3131 /* We have all the data we need. Either report the event to GDB, or
3132 resume threads and keep waiting for more. */
3133
3134 /* If we're collecting a fast tracepoint, finish the collection and
3135 move out of the jump pad before delivering a signal. See
3136 linux_stabilize_threads. */
3137
3138 if (WIFSTOPPED (w)
3139 && WSTOPSIG (w) != SIGTRAP
3140 && supports_fast_tracepoints ()
58b4daa5 3141 && agent_loaded_p ())
fa593d66 3142 {
c058728c
SM
3143 threads_debug_printf ("Got signal %d for LWP %ld. Check if we need "
3144 "to defer or adjust it.",
3145 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3146
3147 /* Allow debugging the jump pad itself. */
0bfdf32f 3148 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
3149 && maybe_move_out_of_jump_pad (event_child, &w))
3150 {
3151 enqueue_one_deferred_signal (event_child, &w);
3152
c058728c
SM
3153 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad)",
3154 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66 3155
df95181f 3156 resume_one_lwp (event_child, 0, 0, NULL);
582511be
PA
3157
3158 return ignore_event (ourstatus);
fa593d66
PA
3159 }
3160 }
219f2f23 3161
229d26fc
SM
3162 if (event_child->collecting_fast_tracepoint
3163 != fast_tpoint_collect_result::not_collecting)
fa593d66 3164 {
c058728c
SM
3165 threads_debug_printf
3166 ("LWP %ld was trying to move out of the jump pad (%d). "
3167 "Check if we're already there.",
3168 lwpid_of (current_thread),
3169 (int) event_child->collecting_fast_tracepoint);
fa593d66
PA
3170
3171 trace_event = 1;
3172
3173 event_child->collecting_fast_tracepoint
3174 = linux_fast_tracepoint_collecting (event_child, NULL);
3175
229d26fc
SM
3176 if (event_child->collecting_fast_tracepoint
3177 != fast_tpoint_collect_result::before_insn)
fa593d66
PA
3178 {
3179 /* No longer need this breakpoint. */
3180 if (event_child->exit_jump_pad_bkpt != NULL)
3181 {
c058728c
SM
3182 threads_debug_printf
3183 ("No longer need exit-jump-pad bkpt; removing it."
3184 "stopping all threads momentarily.");
fa593d66
PA
3185
3186 /* Other running threads could hit this breakpoint.
3187 We don't handle moribund locations like GDB does,
3188 instead we always pause all threads when removing
3189 breakpoints, so that any step-over or
3190 decr_pc_after_break adjustment is always taken
3191 care of while the breakpoint is still
3192 inserted. */
3193 stop_all_lwps (1, event_child);
fa593d66
PA
3194
3195 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3196 event_child->exit_jump_pad_bkpt = NULL;
3197
3198 unstop_all_lwps (1, event_child);
3199
3200 gdb_assert (event_child->suspended >= 0);
3201 }
3202 }
3203
229d26fc
SM
3204 if (event_child->collecting_fast_tracepoint
3205 == fast_tpoint_collect_result::not_collecting)
fa593d66 3206 {
c058728c
SM
3207 threads_debug_printf
3208 ("fast tracepoint finished collecting successfully.");
fa593d66
PA
3209
3210 /* We may have a deferred signal to report. */
3211 if (dequeue_one_deferred_signal (event_child, &w))
c058728c 3212 threads_debug_printf ("dequeued one signal.");
3c11dd79 3213 else
fa593d66 3214 {
c058728c 3215 threads_debug_printf ("no deferred signals.");
fa593d66
PA
3216
3217 if (stabilizing_threads)
3218 {
183be222 3219 ourstatus->set_stopped (GDB_SIGNAL_0);
87ce2a04 3220
c058728c
SM
3221 threads_debug_printf
3222 ("ret = %s, stopped while stabilizing threads",
3223 target_pid_to_str (ptid_of (current_thread)).c_str ());
87ce2a04 3224
0bfdf32f 3225 return ptid_of (current_thread);
fa593d66
PA
3226 }
3227 }
3228 }
6bf5e0ba
PA
3229 }
3230
e471f25b
PA
3231 /* Check whether GDB would be interested in this event. */
3232
82075af2
JS
3233 /* Check if GDB is interested in this syscall. */
3234 if (WIFSTOPPED (w)
3235 && WSTOPSIG (w) == SYSCALL_SIGTRAP
9eedd27d 3236 && !gdb_catch_this_syscall (event_child))
82075af2 3237 {
c058728c
SM
3238 threads_debug_printf ("Ignored syscall for LWP %ld.",
3239 lwpid_of (current_thread));
82075af2 3240
df95181f 3241 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
edeeb602 3242
82075af2
JS
3243 return ignore_event (ourstatus);
3244 }
3245
e471f25b
PA
3246 /* If GDB is not interested in this signal, don't stop other
3247 threads, and don't report it to GDB. Just resume the inferior
3248 right away. We do this for threading-related signals as well as
3249 any that GDB specifically requested we ignore. But never ignore
3250 SIGSTOP if we sent it ourselves, and do not ignore signals when
3251 stepping - they may require special handling to skip the signal
c9587f88
AT
3252 handler. Also never ignore signals that could be caused by a
3253 breakpoint. */
e471f25b 3254 if (WIFSTOPPED (w)
0bfdf32f 3255 && current_thread->last_resume_kind != resume_step
e471f25b 3256 && (
1a981360 3257#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 3258 (current_process ()->priv->thread_db != NULL
e471f25b
PA
3259 && (WSTOPSIG (w) == __SIGRTMIN
3260 || WSTOPSIG (w) == __SIGRTMIN + 1))
3261 ||
3262#endif
c12a5089 3263 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 3264 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
3265 && current_thread->last_resume_kind == resume_stop)
3266 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
3267 {
3268 siginfo_t info, *info_p;
3269
c058728c
SM
3270 threads_debug_printf ("Ignored signal %d for LWP %ld.",
3271 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 3272
0bfdf32f 3273 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 3274 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
3275 info_p = &info;
3276 else
3277 info_p = NULL;
863d01bd
PA
3278
3279 if (step_over_finished)
3280 {
3281 /* We cancelled this thread's step-over above. We still
3282 need to unsuspend all other LWPs, and set them back
3283 running again while the signal handler runs. */
3284 unsuspend_all_lwps (event_child);
3285
3286 /* Enqueue the pending signal info so that proceed_all_lwps
3287 doesn't lose it. */
3288 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3289
3290 proceed_all_lwps ();
3291 }
3292 else
3293 {
df95181f
TBA
3294 resume_one_lwp (event_child, event_child->stepping,
3295 WSTOPSIG (w), info_p);
863d01bd 3296 }
edeeb602 3297
582511be 3298 return ignore_event (ourstatus);
e471f25b
PA
3299 }
3300
c2d6af84
PA
3301 /* Note that all addresses are always "out of the step range" when
3302 there's no range to begin with. */
3303 in_step_range = lwp_in_step_range (event_child);
3304
3305 /* If GDB wanted this thread to single step, and the thread is out
3306 of the step range, we always want to report the SIGTRAP, and let
3307 GDB handle it. Watchpoints should always be reported. So should
3308 signals we can't explain. A SIGTRAP we can't explain could be a
3309 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3310 do, we're be able to handle GDB breakpoints on top of internal
3311 breakpoints, by handling the internal breakpoint and still
3312 reporting the event to GDB. If we don't, we're out of luck, GDB
863d01bd
PA
3313 won't see the breakpoint hit. If we see a single-step event but
3314 the thread should be continuing, don't pass the trap to gdb.
3315 That indicates that we had previously finished a single-step but
3316 left the single-step pending -- see
3317 complete_ongoing_step_over. */
6bf5e0ba 3318 report_to_gdb = (!maybe_internal_trap
0bfdf32f 3319 || (current_thread->last_resume_kind == resume_step
c2d6af84 3320 && !in_step_range)
15c66dd6 3321 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
863d01bd
PA
3322 || (!in_step_range
3323 && !bp_explains_trap
3324 && !trace_event
3325 && !step_over_finished
3326 && !(current_thread->last_resume_kind == resume_continue
3327 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
9f3a5c85 3328 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5 3329 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
de0d863e 3330 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
183be222 3331 || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
d3ce09f5
SS
3332
3333 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
3334
3335 /* We found no reason GDB would want us to stop. We either hit one
3336 of our own breakpoints, or finished an internal step GDB
3337 shouldn't know about. */
3338 if (!report_to_gdb)
3339 {
c058728c
SM
3340 if (bp_explains_trap)
3341 threads_debug_printf ("Hit a gdbserver breakpoint.");
3342
3343 if (step_over_finished)
3344 threads_debug_printf ("Step-over finished.");
3345
3346 if (trace_event)
3347 threads_debug_printf ("Tracepoint event.");
3348
3349 if (lwp_in_step_range (event_child))
3350 threads_debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).",
3351 paddress (event_child->stop_pc),
3352 paddress (event_child->step_range_start),
3353 paddress (event_child->step_range_end));
6bf5e0ba
PA
3354
3355 /* We're not reporting this breakpoint to GDB, so apply the
3356 decr_pc_after_break adjustment to the inferior's regcache
3357 ourselves. */
3358
bf9ae9d8 3359 if (low_supports_breakpoints ())
6bf5e0ba
PA
3360 {
3361 struct regcache *regcache
0bfdf32f 3362 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3363 low_set_pc (regcache, event_child->stop_pc);
6bf5e0ba
PA
3364 }
3365
7984d532 3366 if (step_over_finished)
e3652c84
YQ
3367 {
3368 /* If we have finished stepping over a breakpoint, we've
3369 stopped and suspended all LWPs momentarily except the
3370 stepping one. This is where we resume them all again.
3371 We're going to keep waiting, so use proceed, which
3372 handles stepping over the next breakpoint. */
3373 unsuspend_all_lwps (event_child);
3374 }
3375 else
3376 {
3377 /* Remove the single-step breakpoints if any. Note that
3378 there isn't single-step breakpoint if we finished stepping
3379 over. */
7582c77c 3380 if (supports_software_single_step ()
e3652c84
YQ
3381 && has_single_step_breakpoints (current_thread))
3382 {
3383 stop_all_lwps (0, event_child);
3384 delete_single_step_breakpoints (current_thread);
3385 unstop_all_lwps (0, event_child);
3386 }
3387 }
7984d532 3388
c058728c 3389 threads_debug_printf ("proceeding all threads.");
edeeb602 3390
c058728c 3391 proceed_all_lwps ();
edeeb602 3392
582511be 3393 return ignore_event (ourstatus);
6bf5e0ba
PA
3394 }
3395
c058728c
SM
3396 if (debug_threads)
3397 {
3398 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3399 threads_debug_printf ("LWP %ld: extended event with waitstatus %s",
3400 lwpid_of (get_lwp_thread (event_child)),
3401 event_child->waitstatus.to_string ().c_str ());
3402
3403 if (current_thread->last_resume_kind == resume_step)
3404 {
3405 if (event_child->step_range_start == event_child->step_range_end)
3406 threads_debug_printf
3407 ("GDB wanted to single-step, reporting event.");
3408 else if (!lwp_in_step_range (event_child))
3409 threads_debug_printf ("Out of step range, reporting event.");
3410 }
3411
3412 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3413 threads_debug_printf ("Stopped by watchpoint.");
3414 else if (gdb_breakpoint_here (event_child->stop_pc))
3415 threads_debug_printf ("Stopped by GDB breakpoint.");
3416 }
3417
3418 threads_debug_printf ("Hit a non-gdbserver trap event.");
6bf5e0ba
PA
3419
3420 /* Alright, we're going to report a stop. */
3421
3b9a79ef 3422 /* Remove single-step breakpoints. */
7582c77c 3423 if (supports_software_single_step ())
8901d193 3424 {
3b9a79ef 3425 /* Remove single-step breakpoints or not. It it is true, stop all
8901d193
YQ
3426 lwps, so that other threads won't hit the breakpoint in the
3427 staled memory. */
3b9a79ef 3428 int remove_single_step_breakpoints_p = 0;
8901d193
YQ
3429
3430 if (non_stop)
3431 {
3b9a79ef
YQ
3432 remove_single_step_breakpoints_p
3433 = has_single_step_breakpoints (current_thread);
8901d193
YQ
3434 }
3435 else
3436 {
3437 /* In all-stop, a stop reply cancels all previous resume
3b9a79ef 3438 requests. Delete all single-step breakpoints. */
8901d193 3439
9c80ecd6
SM
3440 find_thread ([&] (thread_info *thread) {
3441 if (has_single_step_breakpoints (thread))
3442 {
3443 remove_single_step_breakpoints_p = 1;
3444 return true;
3445 }
8901d193 3446
9c80ecd6
SM
3447 return false;
3448 });
8901d193
YQ
3449 }
3450
3b9a79ef 3451 if (remove_single_step_breakpoints_p)
8901d193 3452 {
3b9a79ef 3453 /* If we remove single-step breakpoints from memory, stop all lwps,
8901d193
YQ
3454 so that other threads won't hit the breakpoint in the staled
3455 memory. */
3456 stop_all_lwps (0, event_child);
3457
3458 if (non_stop)
3459 {
3b9a79ef
YQ
3460 gdb_assert (has_single_step_breakpoints (current_thread));
3461 delete_single_step_breakpoints (current_thread);
8901d193
YQ
3462 }
3463 else
3464 {
9c80ecd6
SM
3465 for_each_thread ([] (thread_info *thread){
3466 if (has_single_step_breakpoints (thread))
3467 delete_single_step_breakpoints (thread);
3468 });
8901d193
YQ
3469 }
3470
3471 unstop_all_lwps (0, event_child);
3472 }
3473 }
3474
582511be 3475 if (!stabilizing_threads)
6bf5e0ba
PA
3476 {
3477 /* In all-stop, stop all threads. */
582511be
PA
3478 if (!non_stop)
3479 stop_all_lwps (0, NULL);
6bf5e0ba 3480
c03e6ccc 3481 if (step_over_finished)
582511be
PA
3482 {
3483 if (!non_stop)
3484 {
3485 /* If we were doing a step-over, all other threads but
3486 the stepping one had been paused in start_step_over,
3487 with their suspend counts incremented. We don't want
3488 to do a full unstop/unpause, because we're in
3489 all-stop mode (so we want threads stopped), but we
3490 still need to unsuspend the other threads, to
3491 decrement their `suspended' count back. */
3492 unsuspend_all_lwps (event_child);
3493 }
3494 else
3495 {
3496 /* If we just finished a step-over, then all threads had
3497 been momentarily paused. In all-stop, that's fine,
3498 we want threads stopped by now anyway. In non-stop,
3499 we need to re-resume threads that GDB wanted to be
3500 running. */
3501 unstop_all_lwps (1, event_child);
3502 }
3503 }
c03e6ccc 3504
3aa5cfa0
AT
3505 /* If we're not waiting for a specific LWP, choose an event LWP
3506 from among those that have had events. Giving equal priority
3507 to all LWPs that have had events helps prevent
3508 starvation. */
d7e15655 3509 if (ptid == minus_one_ptid)
3aa5cfa0
AT
3510 {
3511 event_child->status_pending_p = 1;
3512 event_child->status_pending = w;
3513
3514 select_event_lwp (&event_child);
3515
3516 /* current_thread and event_child must stay in sync. */
24583e45 3517 switch_to_thread (get_lwp_thread (event_child));
3aa5cfa0
AT
3518
3519 event_child->status_pending_p = 0;
3520 w = event_child->status_pending;
3521 }
3522
3523
fa593d66 3524 /* Stabilize threads (move out of jump pads). */
582511be 3525 if (!non_stop)
5c9eb2f2 3526 target_stabilize_threads ();
6bf5e0ba
PA
3527 }
3528 else
3529 {
3530 /* If we just finished a step-over, then all threads had been
3531 momentarily paused. In all-stop, that's fine, we want
3532 threads stopped by now anyway. In non-stop, we need to
3533 re-resume threads that GDB wanted to be running. */
3534 if (step_over_finished)
7984d532 3535 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3536 }
3537
e88cf517
SM
3538 /* At this point, we haven't set OURSTATUS. This is where we do it. */
3539 gdb_assert (ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3540
183be222 3541 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
de0d863e 3542 {
393a6b59
PA
3543 /* If the reported event is an exit, fork, vfork, clone or exec,
3544 let GDB know. */
5a04c4cf 3545
393a6b59
PA
3546 /* Break the unreported fork/vfork/clone relationship chain. */
3547 if (is_new_child_status (event_child->waitstatus.kind ()))
5a04c4cf 3548 {
393a6b59
PA
3549 event_child->relative->relative = NULL;
3550 event_child->relative = NULL;
5a04c4cf
PA
3551 }
3552
00db26fa 3553 *ourstatus = event_child->waitstatus;
de0d863e 3554 /* Clear the event lwp's waitstatus since we handled it already. */
183be222 3555 event_child->waitstatus.set_ignore ();
de0d863e
DB
3556 }
3557 else
183be222 3558 {
e88cf517 3559 /* The LWP stopped due to a plain signal or a syscall signal. Either way,
3bfdcabb 3560 event_child->waitstatus wasn't filled in with the details, so look at
e88cf517
SM
3561 the wait status W. */
3562 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3563 {
3564 int syscall_number;
3565
3566 get_syscall_trapinfo (event_child, &syscall_number);
3567 if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3568 ourstatus->set_syscall_entry (syscall_number);
3569 else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3570 ourstatus->set_syscall_return (syscall_number);
3571 else
3572 gdb_assert_not_reached ("unexpected syscall state");
3573 }
3574 else if (current_thread->last_resume_kind == resume_stop
3575 && WSTOPSIG (w) == SIGSTOP)
3576 {
3577 /* A thread that has been requested to stop by GDB with vCont;t,
3578 and it stopped cleanly, so report as SIG0. The use of
3579 SIGSTOP is an implementation detail. */
3580 ourstatus->set_stopped (GDB_SIGNAL_0);
3581 }
3582 else
3583 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
183be222 3584 }
5b1c542e 3585
582511be 3586 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3587 it was a software breakpoint, and the client doesn't know we can
3588 adjust the breakpoint ourselves. */
3589 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
c12a5089 3590 && !cs.swbreak_feature)
582511be 3591 {
d4807ea2 3592 int decr_pc = low_decr_pc_after_break ();
582511be
PA
3593
3594 if (decr_pc != 0)
3595 {
3596 struct regcache *regcache
3597 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3598 low_set_pc (regcache, event_child->stop_pc + decr_pc);
582511be
PA
3599 }
3600 }
3601
d7e15655 3602 gdb_assert (step_over_bkpt == null_ptid);
d50171e4 3603
e48359ea 3604 threads_debug_printf ("ret = %s, %s",
c058728c 3605 target_pid_to_str (ptid_of (current_thread)).c_str (),
e48359ea 3606 ourstatus->to_string ().c_str ());
bd99dc85 3607
48989498 3608 return filter_exit_event (event_child, ourstatus);
bd99dc85
PA
3609}
3610
3611/* Get rid of any pending event in the pipe. */
3612static void
3613async_file_flush (void)
3614{
cdc8e9b2 3615 linux_event_pipe.flush ();
bd99dc85
PA
3616}
3617
3618/* Put something in the pipe, so the event loop wakes up. */
3619static void
3620async_file_mark (void)
3621{
cdc8e9b2 3622 linux_event_pipe.mark ();
bd99dc85
PA
3623}
3624
6532e7e3
TBA
3625ptid_t
3626linux_process_target::wait (ptid_t ptid,
3627 target_waitstatus *ourstatus,
b60cea74 3628 target_wait_flags target_options)
bd99dc85 3629{
95954743 3630 ptid_t event_ptid;
bd99dc85 3631
bd99dc85
PA
3632 /* Flush the async file first. */
3633 if (target_is_async_p ())
3634 async_file_flush ();
3635
582511be
PA
3636 do
3637 {
d16f3f6c 3638 event_ptid = wait_1 (ptid, ourstatus, target_options);
582511be
PA
3639 }
3640 while ((target_options & TARGET_WNOHANG) == 0
183be222 3641 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3642
3643 /* If at least one stop was reported, there may be more. A single
3644 SIGCHLD can signal more than one child stop. */
3645 if (target_is_async_p ()
3646 && (target_options & TARGET_WNOHANG) != 0
d7e15655 3647 && event_ptid != null_ptid)
bd99dc85
PA
3648 async_file_mark ();
3649
3650 return event_ptid;
da6d8c04
DJ
3651}
3652
c5f62d5f 3653/* Send a signal to an LWP. */
fd500816
DJ
3654
3655static int
a1928bad 3656kill_lwp (unsigned long lwpid, int signo)
fd500816 3657{
4a6ed09b 3658 int ret;
fd500816 3659
4a6ed09b
PA
3660 errno = 0;
3661 ret = syscall (__NR_tkill, lwpid, signo);
3662 if (errno == ENOSYS)
3663 {
3664 /* If tkill fails, then we are not using nptl threads, a
3665 configuration we no longer support. */
3666 perror_with_name (("tkill"));
3667 }
3668 return ret;
fd500816
DJ
3669}
3670
964e4306
PA
3671void
3672linux_stop_lwp (struct lwp_info *lwp)
3673{
3674 send_sigstop (lwp);
3675}
3676
0d62e5e8 3677static void
02fc4de7 3678send_sigstop (struct lwp_info *lwp)
0d62e5e8 3679{
bd99dc85 3680 int pid;
0d62e5e8 3681
d86d4aaf 3682 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3683
0d62e5e8
DJ
3684 /* If we already have a pending stop signal for this process, don't
3685 send another. */
54a0b537 3686 if (lwp->stop_expected)
0d62e5e8 3687 {
c058728c 3688 threads_debug_printf ("Have pending sigstop for lwp %d", pid);
ae13219e 3689
0d62e5e8
DJ
3690 return;
3691 }
3692
c058728c 3693 threads_debug_printf ("Sending sigstop to lwp %d", pid);
0d62e5e8 3694
d50171e4 3695 lwp->stop_expected = 1;
bd99dc85 3696 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3697}
3698
df3e4dbe
SM
3699static void
3700send_sigstop (thread_info *thread, lwp_info *except)
02fc4de7 3701{
d86d4aaf 3702 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3703
7984d532
PA
3704 /* Ignore EXCEPT. */
3705 if (lwp == except)
df3e4dbe 3706 return;
7984d532 3707
02fc4de7 3708 if (lwp->stopped)
df3e4dbe 3709 return;
02fc4de7
PA
3710
3711 send_sigstop (lwp);
7984d532
PA
3712}
3713
3714/* Increment the suspend count of an LWP, and stop it, if not stopped
3715 yet. */
df3e4dbe
SM
3716static void
3717suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
7984d532 3718{
d86d4aaf 3719 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3720
3721 /* Ignore EXCEPT. */
3722 if (lwp == except)
df3e4dbe 3723 return;
7984d532 3724
863d01bd 3725 lwp_suspended_inc (lwp);
7984d532 3726
df3e4dbe 3727 send_sigstop (thread, except);
02fc4de7
PA
3728}
3729
95954743
PA
3730static void
3731mark_lwp_dead (struct lwp_info *lwp, int wstat)
3732{
95954743
PA
3733 /* Store the exit status for later. */
3734 lwp->status_pending_p = 1;
3735 lwp->status_pending = wstat;
3736
00db26fa
PA
3737 /* Store in waitstatus as well, as there's nothing else to process
3738 for this event. */
3739 if (WIFEXITED (wstat))
183be222 3740 lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
00db26fa 3741 else if (WIFSIGNALED (wstat))
183be222 3742 lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
00db26fa 3743
95954743
PA
3744 /* Prevent trying to stop it. */
3745 lwp->stopped = 1;
3746
3747 /* No further stops are expected from a dead lwp. */
3748 lwp->stop_expected = 0;
3749}
3750
00db26fa
PA
3751/* Return true if LWP has exited already, and has a pending exit event
3752 to report to GDB. */
3753
3754static int
3755lwp_is_marked_dead (struct lwp_info *lwp)
3756{
3757 return (lwp->status_pending_p
3758 && (WIFEXITED (lwp->status_pending)
3759 || WIFSIGNALED (lwp->status_pending)));
3760}
3761
d16f3f6c
TBA
3762void
3763linux_process_target::wait_for_sigstop ()
0d62e5e8 3764{
0bfdf32f 3765 struct thread_info *saved_thread;
95954743 3766 ptid_t saved_tid;
fa96cb38
PA
3767 int wstat;
3768 int ret;
0d62e5e8 3769
0bfdf32f
GB
3770 saved_thread = current_thread;
3771 if (saved_thread != NULL)
9c80ecd6 3772 saved_tid = saved_thread->id;
bd99dc85 3773 else
95954743 3774 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3775
20ac1cdb
TBA
3776 scoped_restore_current_thread restore_thread;
3777
c058728c 3778 threads_debug_printf ("pulling events");
d50171e4 3779
fa96cb38
PA
3780 /* Passing NULL_PTID as filter indicates we want all events to be
3781 left pending. Eventually this returns when there are no
3782 unwaited-for children left. */
d16f3f6c 3783 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
fa96cb38 3784 gdb_assert (ret == -1);
0d62e5e8 3785
13d3d99b 3786 if (saved_thread == NULL || mythread_alive (saved_tid))
20ac1cdb 3787 return;
0d62e5e8
DJ
3788 else
3789 {
c058728c 3790 threads_debug_printf ("Previously current thread died.");
0d62e5e8 3791
f0db101d
PA
3792 /* We can't change the current inferior behind GDB's back,
3793 otherwise, a subsequent command may apply to the wrong
3794 process. */
20ac1cdb
TBA
3795 restore_thread.dont_restore ();
3796 switch_to_thread (nullptr);
0d62e5e8
DJ
3797 }
3798}
3799
13e567af
TBA
3800bool
3801linux_process_target::stuck_in_jump_pad (thread_info *thread)
fa593d66 3802{
d86d4aaf 3803 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3804
863d01bd
PA
3805 if (lwp->suspended != 0)
3806 {
f34652de 3807 internal_error ("LWP %ld is suspended, suspended=%d\n",
863d01bd
PA
3808 lwpid_of (thread), lwp->suspended);
3809 }
fa593d66
PA
3810 gdb_assert (lwp->stopped);
3811
3812 /* Allow debugging the jump pad, gdb_collect, etc.. */
3813 return (supports_fast_tracepoints ()
58b4daa5 3814 && agent_loaded_p ()
fa593d66 3815 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3816 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66 3817 || thread->last_resume_kind == resume_step)
229d26fc
SM
3818 && (linux_fast_tracepoint_collecting (lwp, NULL)
3819 != fast_tpoint_collect_result::not_collecting));
fa593d66
PA
3820}
3821
d16f3f6c
TBA
3822void
3823linux_process_target::move_out_of_jump_pad (thread_info *thread)
fa593d66 3824{
d86d4aaf 3825 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3826 int *wstat;
3827
863d01bd
PA
3828 if (lwp->suspended != 0)
3829 {
f34652de 3830 internal_error ("LWP %ld is suspended, suspended=%d\n",
863d01bd
PA
3831 lwpid_of (thread), lwp->suspended);
3832 }
fa593d66
PA
3833 gdb_assert (lwp->stopped);
3834
f0ce0d3a 3835 /* For gdb_breakpoint_here. */
24583e45
TBA
3836 scoped_restore_current_thread restore_thread;
3837 switch_to_thread (thread);
f0ce0d3a 3838
fa593d66
PA
3839 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3840
3841 /* Allow debugging the jump pad, gdb_collect, etc. */
3842 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3843 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3844 && thread->last_resume_kind != resume_step
3845 && maybe_move_out_of_jump_pad (lwp, wstat))
3846 {
c058728c
SM
3847 threads_debug_printf ("LWP %ld needs stabilizing (in jump pad)",
3848 lwpid_of (thread));
fa593d66
PA
3849
3850 if (wstat)
3851 {
3852 lwp->status_pending_p = 0;
3853 enqueue_one_deferred_signal (lwp, wstat);
3854
c058728c
SM
3855 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad",
3856 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
3857 }
3858
df95181f 3859 resume_one_lwp (lwp, 0, 0, NULL);
fa593d66
PA
3860 }
3861 else
863d01bd 3862 lwp_suspended_inc (lwp);
fa593d66
PA
3863}
3864
5a6b0a41
SM
3865static bool
3866lwp_running (thread_info *thread)
fa593d66 3867{
d86d4aaf 3868 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3869
00db26fa 3870 if (lwp_is_marked_dead (lwp))
5a6b0a41
SM
3871 return false;
3872
3873 return !lwp->stopped;
fa593d66
PA
3874}
3875
d16f3f6c
TBA
3876void
3877linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
0d62e5e8 3878{
bde24c0a
PA
3879 /* Should not be called recursively. */
3880 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3881
c058728c
SM
3882 THREADS_SCOPED_DEBUG_ENTER_EXIT;
3883
3884 threads_debug_printf
3885 ("%s, except=%s", suspend ? "stop-and-suspend" : "stop",
3886 (except != NULL
3887 ? target_pid_to_str (ptid_of (get_lwp_thread (except))).c_str ()
3888 : "none"));
87ce2a04 3889
bde24c0a
PA
3890 stopping_threads = (suspend
3891 ? STOPPING_AND_SUSPENDING_THREADS
3892 : STOPPING_THREADS);
7984d532
PA
3893
3894 if (suspend)
df3e4dbe
SM
3895 for_each_thread ([&] (thread_info *thread)
3896 {
3897 suspend_and_send_sigstop (thread, except);
3898 });
7984d532 3899 else
df3e4dbe
SM
3900 for_each_thread ([&] (thread_info *thread)
3901 {
3902 send_sigstop (thread, except);
3903 });
3904
fa96cb38 3905 wait_for_sigstop ();
bde24c0a 3906 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04 3907
c058728c 3908 threads_debug_printf ("setting stopping_threads back to !stopping");
0d62e5e8
DJ
3909}
3910
863d01bd
PA
3911/* Enqueue one signal in the chain of signals which need to be
3912 delivered to this process on next resume. */
3913
3914static void
3915enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3916{
013e3554
TBA
3917 lwp->pending_signals.emplace_back (signal);
3918 if (info == nullptr)
3919 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
863d01bd 3920 else
013e3554 3921 lwp->pending_signals.back ().info = *info;
863d01bd
PA
3922}
3923
df95181f
TBA
3924void
3925linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
fa5308bd 3926{
984a2c04
YQ
3927 struct thread_info *thread = get_lwp_thread (lwp);
3928 struct regcache *regcache = get_thread_regcache (thread, 1);
8ce47547 3929
24583e45 3930 scoped_restore_current_thread restore_thread;
984a2c04 3931
24583e45 3932 switch_to_thread (thread);
7582c77c 3933 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
fa5308bd 3934
a0ff9e1a 3935 for (CORE_ADDR pc : next_pcs)
3b9a79ef 3936 set_single_step_breakpoint (pc, current_ptid);
fa5308bd
AT
3937}
3938
df95181f
TBA
3939int
3940linux_process_target::single_step (lwp_info* lwp)
7fe5e27e
AT
3941{
3942 int step = 0;
3943
b31cdfa6 3944 if (supports_hardware_single_step ())
7fe5e27e
AT
3945 {
3946 step = 1;
3947 }
7582c77c 3948 else if (supports_software_single_step ())
7fe5e27e
AT
3949 {
3950 install_software_single_step_breakpoints (lwp);
3951 step = 0;
3952 }
3953 else
c058728c 3954 threads_debug_printf ("stepping is not implemented on this target");
7fe5e27e
AT
3955
3956 return step;
3957}
3958
35ac8b3e 3959/* The signal can be delivered to the inferior if we are not trying to
5b061e98
YQ
3960 finish a fast tracepoint collect. Since signal can be delivered in
3961 the step-over, the program may go to signal handler and trap again
3962 after return from the signal handler. We can live with the spurious
3963 double traps. */
35ac8b3e
YQ
3964
3965static int
3966lwp_signal_can_be_delivered (struct lwp_info *lwp)
3967{
229d26fc
SM
3968 return (lwp->collecting_fast_tracepoint
3969 == fast_tpoint_collect_result::not_collecting);
35ac8b3e
YQ
3970}
3971
df95181f
TBA
3972void
3973linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
3974 int signal, siginfo_t *info)
da6d8c04 3975{
d86d4aaf 3976 struct thread_info *thread = get_lwp_thread (lwp);
82075af2 3977 int ptrace_request;
c06cbd92
YQ
3978 struct process_info *proc = get_thread_process (thread);
3979
3980 /* Note that target description may not be initialised
3981 (proc->tdesc == NULL) at this point because the program hasn't
3982 stopped at the first instruction yet. It means GDBserver skips
3983 the extra traps from the wrapper program (see option --wrapper).
3984 Code in this function that requires register access should be
3985 guarded by proc->tdesc == NULL or something else. */
0d62e5e8 3986
54a0b537 3987 if (lwp->stopped == 0)
0d62e5e8
DJ
3988 return;
3989
183be222 3990 gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
65706a29 3991
229d26fc
SM
3992 fast_tpoint_collect_result fast_tp_collecting
3993 = lwp->collecting_fast_tracepoint;
fa593d66 3994
229d26fc
SM
3995 gdb_assert (!stabilizing_threads
3996 || (fast_tp_collecting
3997 != fast_tpoint_collect_result::not_collecting));
fa593d66 3998
219f2f23
PA
3999 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4000 user used the "jump" command, or "set $pc = foo"). */
c06cbd92 4001 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
219f2f23
PA
4002 {
4003 /* Collecting 'while-stepping' actions doesn't make sense
4004 anymore. */
d86d4aaf 4005 release_while_stepping_state_list (thread);
219f2f23
PA
4006 }
4007
0d62e5e8 4008 /* If we have pending signals or status, and a new signal, enqueue the
35ac8b3e
YQ
4009 signal. Also enqueue the signal if it can't be delivered to the
4010 inferior right now. */
0d62e5e8 4011 if (signal != 0
fa593d66 4012 && (lwp->status_pending_p
013e3554 4013 || !lwp->pending_signals.empty ()
35ac8b3e 4014 || !lwp_signal_can_be_delivered (lwp)))
94610ec4
YQ
4015 {
4016 enqueue_pending_signal (lwp, signal, info);
4017
4018 /* Postpone any pending signal. It was enqueued above. */
4019 signal = 0;
4020 }
0d62e5e8 4021
d50171e4
PA
4022 if (lwp->status_pending_p)
4023 {
c058728c
SM
4024 threads_debug_printf
4025 ("Not resuming lwp %ld (%s, stop %s); has pending status",
4026 lwpid_of (thread), step ? "step" : "continue",
4027 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
4028 return;
4029 }
0d62e5e8 4030
24583e45
TBA
4031 scoped_restore_current_thread restore_thread;
4032 switch_to_thread (thread);
0d62e5e8 4033
0d62e5e8
DJ
4034 /* This bit needs some thinking about. If we get a signal that
4035 we must report while a single-step reinsert is still pending,
4036 we often end up resuming the thread. It might be better to
4037 (ew) allow a stack of pending events; then we could be sure that
4038 the reinsert happened right away and not lose any signals.
4039
4040 Making this stack would also shrink the window in which breakpoints are
54a0b537 4041 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
4042 complete correctness, so it won't solve that problem. It may be
4043 worthwhile just to solve this one, however. */
54a0b537 4044 if (lwp->bp_reinsert != 0)
0d62e5e8 4045 {
c058728c
SM
4046 threads_debug_printf (" pending reinsert at 0x%s",
4047 paddress (lwp->bp_reinsert));
d50171e4 4048
b31cdfa6 4049 if (supports_hardware_single_step ())
d50171e4 4050 {
229d26fc 4051 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
4052 {
4053 if (step == 0)
9986ba08 4054 warning ("BAD - reinserting but not stepping.");
fa593d66 4055 if (lwp->suspended)
9986ba08
PA
4056 warning ("BAD - reinserting and suspended(%d).",
4057 lwp->suspended);
fa593d66 4058 }
d50171e4 4059 }
f79b145d
YQ
4060
4061 step = maybe_hw_step (thread);
0d62e5e8
DJ
4062 }
4063
229d26fc 4064 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
c058728c
SM
4065 threads_debug_printf
4066 ("lwp %ld wants to get out of fast tracepoint jump pad "
4067 "(exit-jump-pad-bkpt)", lwpid_of (thread));
4068
229d26fc 4069 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
fa593d66 4070 {
c058728c
SM
4071 threads_debug_printf
4072 ("lwp %ld wants to get out of fast tracepoint jump pad single-stepping",
4073 lwpid_of (thread));
fa593d66 4074
b31cdfa6 4075 if (supports_hardware_single_step ())
fa593d66
PA
4076 step = 1;
4077 else
38e08fca 4078 {
f34652de 4079 internal_error ("moving out of jump pad single-stepping"
38e08fca
GB
4080 " not implemented on this target");
4081 }
fa593d66
PA
4082 }
4083
219f2f23
PA
4084 /* If we have while-stepping actions in this thread set it stepping.
4085 If we have a signal to deliver, it may or may not be set to
4086 SIG_IGN, we don't know. Assume so, and allow collecting
4087 while-stepping into a signal handler. A possible smart thing to
4088 do would be to set an internal breakpoint at the signal return
4089 address, continue, and carry on catching this while-stepping
4090 action only when that breakpoint is hit. A future
4091 enhancement. */
7fe5e27e 4092 if (thread->while_stepping != NULL)
219f2f23 4093 {
c058728c
SM
4094 threads_debug_printf
4095 ("lwp %ld has a while-stepping action -> forcing step.",
4096 lwpid_of (thread));
7fe5e27e
AT
4097
4098 step = single_step (lwp);
219f2f23
PA
4099 }
4100
bf9ae9d8 4101 if (proc->tdesc != NULL && low_supports_breakpoints ())
0d62e5e8 4102 {
0bfdf32f 4103 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be 4104
bf9ae9d8 4105 lwp->stop_pc = low_get_pc (regcache);
582511be 4106
c058728c
SM
4107 threads_debug_printf (" %s from pc 0x%lx", step ? "step" : "continue",
4108 (long) lwp->stop_pc);
0d62e5e8
DJ
4109 }
4110
35ac8b3e
YQ
4111 /* If we have pending signals, consume one if it can be delivered to
4112 the inferior. */
013e3554 4113 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
0d62e5e8 4114 {
013e3554 4115 const pending_signal &p_sig = lwp->pending_signals.front ();
0d62e5e8 4116
013e3554
TBA
4117 signal = p_sig.signal;
4118 if (p_sig.info.si_signo != 0)
d86d4aaf 4119 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554 4120 &p_sig.info);
32ca6d61 4121
013e3554 4122 lwp->pending_signals.pop_front ();
0d62e5e8
DJ
4123 }
4124
c058728c
SM
4125 threads_debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)",
4126 lwpid_of (thread), step ? "step" : "continue", signal,
4127 lwp->stop_expected ? "expected" : "not expected");
94610ec4 4128
d7599cc0 4129 low_prepare_to_resume (lwp);
aa5ca48f 4130
d86d4aaf 4131 regcache_invalidate_thread (thread);
da6d8c04 4132 errno = 0;
54a0b537 4133 lwp->stepping = step;
82075af2
JS
4134 if (step)
4135 ptrace_request = PTRACE_SINGLESTEP;
4136 else if (gdb_catching_syscalls_p (lwp))
4137 ptrace_request = PTRACE_SYSCALL;
4138 else
4139 ptrace_request = PTRACE_CONT;
4140 ptrace (ptrace_request,
4141 lwpid_of (thread),
b8e1b30e 4142 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
4143 /* Coerce to a uintptr_t first to avoid potential gcc warning
4144 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4145 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 4146
da6d8c04 4147 if (errno)
20471e00
SM
4148 {
4149 int saved_errno = errno;
4150
4151 threads_debug_printf ("ptrace errno = %d (%s)",
4152 saved_errno, strerror (saved_errno));
4153
4154 errno = saved_errno;
4155 perror_with_name ("resuming thread");
4156 }
23f238d3
PA
4157
4158 /* Successfully resumed. Clear state that no longer makes sense,
4159 and mark the LWP as running. Must not do this before resuming
4160 otherwise if that fails other code will be confused. E.g., we'd
4161 later try to stop the LWP and hang forever waiting for a stop
4162 status. Note that we must not throw after this is cleared,
4163 otherwise handle_zombie_lwp_error would get confused. */
4164 lwp->stopped = 0;
4165 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4166}
4167
d7599cc0
TBA
4168void
4169linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4170{
4171 /* Nop. */
4172}
4173
23f238d3
PA
4174/* Called when we try to resume a stopped LWP and that errors out. If
4175 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4176 or about to become), discard the error, clear any pending status
4177 the LWP may have, and return true (we'll collect the exit status
4178 soon enough). Otherwise, return false. */
4179
4180static int
4181check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4182{
4183 struct thread_info *thread = get_lwp_thread (lp);
4184
4185 /* If we get an error after resuming the LWP successfully, we'd
4186 confuse !T state for the LWP being gone. */
4187 gdb_assert (lp->stopped);
4188
4189 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4190 because even if ptrace failed with ESRCH, the tracee may be "not
4191 yet fully dead", but already refusing ptrace requests. In that
4192 case the tracee has 'R (Running)' state for a little bit
4193 (observed in Linux 3.18). See also the note on ESRCH in the
4194 ptrace(2) man page. Instead, check whether the LWP has any state
4195 other than ptrace-stopped. */
4196
4197 /* Don't assume anything if /proc/PID/status can't be read. */
4198 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 4199 {
23f238d3
PA
4200 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4201 lp->status_pending_p = 0;
4202 return 1;
4203 }
4204 return 0;
4205}
4206
df95181f
TBA
4207void
4208linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4209 siginfo_t *info)
23f238d3 4210{
a70b8144 4211 try
23f238d3 4212 {
df95181f 4213 resume_one_lwp_throw (lwp, step, signal, info);
23f238d3 4214 }
230d2906 4215 catch (const gdb_exception_error &ex)
23f238d3 4216 {
20471e00
SM
4217 if (check_ptrace_stopped_lwp_gone (lwp))
4218 {
4219 /* This could because we tried to resume an LWP after its leader
4220 exited. Mark it as resumed, so we can collect an exit event
4221 from it. */
4222 lwp->stopped = 0;
4223 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4224 }
4225 else
eedc3f4f 4226 throw;
3221518c 4227 }
da6d8c04
DJ
4228}
4229
5fdda392
SM
4230/* This function is called once per thread via for_each_thread.
4231 We look up which resume request applies to THREAD and mark it with a
4232 pointer to the appropriate resume request.
5544ad89
DJ
4233
4234 This algorithm is O(threads * resume elements), but resume elements
4235 is small (and will remain small at least until GDB supports thread
4236 suspension). */
ebcf782c 4237
5fdda392
SM
4238static void
4239linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
0d62e5e8 4240{
d86d4aaf 4241 struct lwp_info *lwp = get_thread_lwp (thread);
64386c31 4242
5fdda392 4243 for (int ndx = 0; ndx < n; ndx++)
95954743 4244 {
5fdda392 4245 ptid_t ptid = resume[ndx].thread;
d7e15655 4246 if (ptid == minus_one_ptid
9c80ecd6 4247 || ptid == thread->id
0c9070b3
YQ
4248 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4249 of PID'. */
e99b03dc 4250 || (ptid.pid () == pid_of (thread)
0e998d96 4251 && (ptid.is_pid ()
e38504b3 4252 || ptid.lwp () == -1)))
95954743 4253 {
5fdda392 4254 if (resume[ndx].kind == resume_stop
8336d594 4255 && thread->last_resume_kind == resume_stop)
d50171e4 4256 {
c058728c
SM
4257 threads_debug_printf
4258 ("already %s LWP %ld at GDB's request",
4259 (thread->last_status.kind () == TARGET_WAITKIND_STOPPED
4260 ? "stopped" : "stopping"),
4261 lwpid_of (thread));
d50171e4
PA
4262
4263 continue;
4264 }
4265
5a04c4cf
PA
4266 /* Ignore (wildcard) resume requests for already-resumed
4267 threads. */
5fdda392 4268 if (resume[ndx].kind != resume_stop
5a04c4cf
PA
4269 && thread->last_resume_kind != resume_stop)
4270 {
c058728c
SM
4271 threads_debug_printf
4272 ("already %s LWP %ld at GDB's request",
4273 (thread->last_resume_kind == resume_step
4274 ? "stepping" : "continuing"),
4275 lwpid_of (thread));
5a04c4cf
PA
4276 continue;
4277 }
4278
393a6b59
PA
4279 /* Don't let wildcard resumes resume fork/vfork/clone
4280 children that GDB does not yet know are new children. */
4281 if (lwp->relative != NULL)
5a04c4cf 4282 {
393a6b59 4283 struct lwp_info *rel = lwp->relative;
5a04c4cf
PA
4284
4285 if (rel->status_pending_p
393a6b59 4286 && is_new_child_status (rel->waitstatus.kind ()))
5a04c4cf 4287 {
c058728c
SM
4288 threads_debug_printf
4289 ("not resuming LWP %ld: has queued stop reply",
4290 lwpid_of (thread));
5a04c4cf
PA
4291 continue;
4292 }
4293 }
4294
4295 /* If the thread has a pending event that has already been
4296 reported to GDBserver core, but GDB has not pulled the
4297 event out of the vStopped queue yet, likewise, ignore the
4298 (wildcard) resume request. */
9c80ecd6 4299 if (in_queued_stop_replies (thread->id))
5a04c4cf 4300 {
c058728c
SM
4301 threads_debug_printf
4302 ("not resuming LWP %ld: has queued stop reply",
4303 lwpid_of (thread));
5a04c4cf
PA
4304 continue;
4305 }
4306
5fdda392 4307 lwp->resume = &resume[ndx];
8336d594 4308 thread->last_resume_kind = lwp->resume->kind;
fa593d66 4309
c2d6af84
PA
4310 lwp->step_range_start = lwp->resume->step_range_start;
4311 lwp->step_range_end = lwp->resume->step_range_end;
4312
fa593d66
PA
4313 /* If we had a deferred signal to report, dequeue one now.
4314 This can happen if LWP gets more than one signal while
4315 trying to get out of a jump pad. */
4316 if (lwp->stopped
4317 && !lwp->status_pending_p
4318 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4319 {
4320 lwp->status_pending_p = 1;
4321
c058728c
SM
4322 threads_debug_printf
4323 ("Dequeueing deferred signal %d for LWP %ld, "
4324 "leaving status pending.",
4325 WSTOPSIG (lwp->status_pending),
4326 lwpid_of (thread));
fa593d66
PA
4327 }
4328
5fdda392 4329 return;
95954743
PA
4330 }
4331 }
2bd7c093
PA
4332
4333 /* No resume action for this thread. */
4334 lwp->resume = NULL;
5544ad89
DJ
4335}
4336
df95181f
TBA
4337bool
4338linux_process_target::resume_status_pending (thread_info *thread)
5544ad89 4339{
d86d4aaf 4340 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4341
bd99dc85
PA
4342 /* LWPs which will not be resumed are not interesting, because
4343 we might not wait for them next time through linux_wait. */
2bd7c093 4344 if (lwp->resume == NULL)
25c28b4d 4345 return false;
64386c31 4346
df95181f 4347 return thread_still_has_status_pending (thread);
d50171e4
PA
4348}
4349
df95181f
TBA
4350bool
4351linux_process_target::thread_needs_step_over (thread_info *thread)
d50171e4 4352{
d86d4aaf 4353 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4354 CORE_ADDR pc;
c06cbd92
YQ
4355 struct process_info *proc = get_thread_process (thread);
4356
4357 /* GDBserver is skipping the extra traps from the wrapper program,
4358 don't have to do step over. */
4359 if (proc->tdesc == NULL)
eca55aec 4360 return false;
d50171e4
PA
4361
4362 /* LWPs which will not be resumed are not interesting, because we
4363 might not wait for them next time through linux_wait. */
4364
4365 if (!lwp->stopped)
4366 {
c058728c
SM
4367 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped",
4368 lwpid_of (thread));
eca55aec 4369 return false;
d50171e4
PA
4370 }
4371
8336d594 4372 if (thread->last_resume_kind == resume_stop)
d50171e4 4373 {
c058728c
SM
4374 threads_debug_printf
4375 ("Need step over [LWP %ld]? Ignoring, should remain stopped",
4376 lwpid_of (thread));
eca55aec 4377 return false;
d50171e4
PA
4378 }
4379
7984d532
PA
4380 gdb_assert (lwp->suspended >= 0);
4381
4382 if (lwp->suspended)
4383 {
c058728c
SM
4384 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, suspended",
4385 lwpid_of (thread));
eca55aec 4386 return false;
7984d532
PA
4387 }
4388
bd99dc85 4389 if (lwp->status_pending_p)
d50171e4 4390 {
c058728c
SM
4391 threads_debug_printf
4392 ("Need step over [LWP %ld]? Ignoring, has pending status.",
4393 lwpid_of (thread));
eca55aec 4394 return false;
d50171e4
PA
4395 }
4396
4397 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4398 or we have. */
4399 pc = get_pc (lwp);
4400
4401 /* If the PC has changed since we stopped, then don't do anything,
4402 and let the breakpoint/tracepoint be hit. This happens if, for
4403 instance, GDB handled the decr_pc_after_break subtraction itself,
4404 GDB is OOL stepping this thread, or the user has issued a "jump"
4405 command, or poked thread's registers herself. */
4406 if (pc != lwp->stop_pc)
4407 {
c058728c
SM
4408 threads_debug_printf
4409 ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4410 "Old stop_pc was 0x%s, PC is now 0x%s", lwpid_of (thread),
4411 paddress (lwp->stop_pc), paddress (pc));
eca55aec 4412 return false;
d50171e4
PA
4413 }
4414
484b3c32
YQ
4415 /* On software single step target, resume the inferior with signal
4416 rather than stepping over. */
7582c77c 4417 if (supports_software_single_step ()
013e3554 4418 && !lwp->pending_signals.empty ()
484b3c32
YQ
4419 && lwp_signal_can_be_delivered (lwp))
4420 {
c058728c
SM
4421 threads_debug_printf
4422 ("Need step over [LWP %ld]? Ignoring, has pending signals.",
4423 lwpid_of (thread));
484b3c32 4424
eca55aec 4425 return false;
484b3c32
YQ
4426 }
4427
24583e45
TBA
4428 scoped_restore_current_thread restore_thread;
4429 switch_to_thread (thread);
d50171e4 4430
8b07ae33 4431 /* We can only step over breakpoints we know about. */
fa593d66 4432 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 4433 {
8b07ae33 4434 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
4435 though. If the condition is being evaluated on the target's side
4436 and it evaluate to false, step over this breakpoint as well. */
4437 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
4438 && gdb_condition_true_at_breakpoint (pc)
4439 && gdb_no_commands_at_breakpoint (pc))
8b07ae33 4440 {
c058728c
SM
4441 threads_debug_printf ("Need step over [LWP %ld]? yes, but found"
4442 " GDB breakpoint at 0x%s; skipping step over",
4443 lwpid_of (thread), paddress (pc));
d50171e4 4444
eca55aec 4445 return false;
8b07ae33
PA
4446 }
4447 else
4448 {
c058728c
SM
4449 threads_debug_printf ("Need step over [LWP %ld]? yes, "
4450 "found breakpoint at 0x%s",
4451 lwpid_of (thread), paddress (pc));
d50171e4 4452
8b07ae33 4453 /* We've found an lwp that needs stepping over --- return 1 so
8f86d7aa 4454 that find_thread stops looking. */
eca55aec 4455 return true;
8b07ae33 4456 }
d50171e4
PA
4457 }
4458
c058728c
SM
4459 threads_debug_printf
4460 ("Need step over [LWP %ld]? No, no breakpoint found at 0x%s",
4461 lwpid_of (thread), paddress (pc));
c6ecbae5 4462
eca55aec 4463 return false;
5544ad89
DJ
4464}
4465
d16f3f6c
TBA
4466void
4467linux_process_target::start_step_over (lwp_info *lwp)
d50171e4 4468{
d86d4aaf 4469 struct thread_info *thread = get_lwp_thread (lwp);
d50171e4 4470 CORE_ADDR pc;
d50171e4 4471
c058728c
SM
4472 threads_debug_printf ("Starting step-over on LWP %ld. Stopping all threads",
4473 lwpid_of (thread));
d50171e4 4474
7984d532 4475 stop_all_lwps (1, lwp);
863d01bd
PA
4476
4477 if (lwp->suspended != 0)
4478 {
f34652de 4479 internal_error ("LWP %ld suspended=%d\n", lwpid_of (thread),
863d01bd
PA
4480 lwp->suspended);
4481 }
d50171e4 4482
c058728c 4483 threads_debug_printf ("Done stopping all threads for step-over.");
d50171e4
PA
4484
4485 /* Note, we should always reach here with an already adjusted PC,
4486 either by GDB (if we're resuming due to GDB's request), or by our
4487 caller, if we just finished handling an internal breakpoint GDB
4488 shouldn't care about. */
4489 pc = get_pc (lwp);
4490
24583e45
TBA
4491 bool step = false;
4492 {
4493 scoped_restore_current_thread restore_thread;
4494 switch_to_thread (thread);
d50171e4 4495
24583e45
TBA
4496 lwp->bp_reinsert = pc;
4497 uninsert_breakpoints_at (pc);
4498 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4 4499
24583e45
TBA
4500 step = single_step (lwp);
4501 }
d50171e4 4502
df95181f 4503 resume_one_lwp (lwp, step, 0, NULL);
d50171e4
PA
4504
4505 /* Require next event from this LWP. */
9c80ecd6 4506 step_over_bkpt = thread->id;
d50171e4
PA
4507}
4508
b31cdfa6
TBA
4509bool
4510linux_process_target::finish_step_over (lwp_info *lwp)
d50171e4
PA
4511{
4512 if (lwp->bp_reinsert != 0)
4513 {
24583e45 4514 scoped_restore_current_thread restore_thread;
f79b145d 4515
c058728c 4516 threads_debug_printf ("Finished step over.");
d50171e4 4517
24583e45 4518 switch_to_thread (get_lwp_thread (lwp));
f79b145d 4519
d50171e4
PA
4520 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4521 may be no breakpoint to reinsert there by now. */
4522 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4523 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4524
4525 lwp->bp_reinsert = 0;
4526
3b9a79ef
YQ
4527 /* Delete any single-step breakpoints. No longer needed. We
4528 don't have to worry about other threads hitting this trap,
4529 and later not being able to explain it, because we were
4530 stepping over a breakpoint, and we hold all threads but
4531 LWP stopped while doing that. */
b31cdfa6 4532 if (!supports_hardware_single_step ())
f79b145d 4533 {
3b9a79ef
YQ
4534 gdb_assert (has_single_step_breakpoints (current_thread));
4535 delete_single_step_breakpoints (current_thread);
f79b145d 4536 }
d50171e4
PA
4537
4538 step_over_bkpt = null_ptid;
b31cdfa6 4539 return true;
d50171e4
PA
4540 }
4541 else
b31cdfa6 4542 return false;
d50171e4
PA
4543}
4544
d16f3f6c
TBA
4545void
4546linux_process_target::complete_ongoing_step_over ()
863d01bd 4547{
d7e15655 4548 if (step_over_bkpt != null_ptid)
863d01bd
PA
4549 {
4550 struct lwp_info *lwp;
4551 int wstat;
4552 int ret;
4553
c058728c 4554 threads_debug_printf ("detach: step over in progress, finish it first");
863d01bd
PA
4555
4556 /* Passing NULL_PTID as filter indicates we want all events to
4557 be left pending. Eventually this returns when there are no
4558 unwaited-for children left. */
d16f3f6c
TBA
4559 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4560 __WALL);
863d01bd
PA
4561 gdb_assert (ret == -1);
4562
4563 lwp = find_lwp_pid (step_over_bkpt);
4564 if (lwp != NULL)
7e9cf1fe
PA
4565 {
4566 finish_step_over (lwp);
4567
4568 /* If we got our step SIGTRAP, don't leave it pending,
4569 otherwise we would report it to GDB as a spurious
4570 SIGTRAP. */
4571 gdb_assert (lwp->status_pending_p);
4572 if (WIFSTOPPED (lwp->status_pending)
4573 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4574 {
4575 thread_info *thread = get_lwp_thread (lwp);
4576 if (thread->last_resume_kind != resume_step)
4577 {
c058728c 4578 threads_debug_printf ("detach: discard step-over SIGTRAP");
7e9cf1fe
PA
4579
4580 lwp->status_pending_p = 0;
4581 lwp->status_pending = 0;
4582 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4583 }
4584 else
c058728c
SM
4585 threads_debug_printf
4586 ("detach: resume_step, not discarding step-over SIGTRAP");
7e9cf1fe
PA
4587 }
4588 }
863d01bd
PA
4589 step_over_bkpt = null_ptid;
4590 unsuspend_all_lwps (lwp);
4591 }
4592}
4593
df95181f
TBA
4594void
4595linux_process_target::resume_one_thread (thread_info *thread,
4596 bool leave_all_stopped)
5544ad89 4597{
d86d4aaf 4598 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4599 int leave_pending;
5544ad89 4600
2bd7c093 4601 if (lwp->resume == NULL)
c80825ff 4602 return;
5544ad89 4603
bd99dc85 4604 if (lwp->resume->kind == resume_stop)
5544ad89 4605 {
c058728c
SM
4606 threads_debug_printf ("resume_stop request for LWP %ld",
4607 lwpid_of (thread));
bd99dc85
PA
4608
4609 if (!lwp->stopped)
4610 {
c058728c 4611 threads_debug_printf ("stopping LWP %ld", lwpid_of (thread));
bd99dc85 4612
d50171e4
PA
4613 /* Stop the thread, and wait for the event asynchronously,
4614 through the event loop. */
02fc4de7 4615 send_sigstop (lwp);
bd99dc85
PA
4616 }
4617 else
4618 {
c058728c 4619 threads_debug_printf ("already stopped LWP %ld", lwpid_of (thread));
d50171e4
PA
4620
4621 /* The LWP may have been stopped in an internal event that
4622 was not meant to be notified back to GDB (e.g., gdbserver
4623 breakpoint), so we should be reporting a stop event in
4624 this case too. */
4625
4626 /* If the thread already has a pending SIGSTOP, this is a
4627 no-op. Otherwise, something later will presumably resume
4628 the thread and this will cause it to cancel any pending
4629 operation, due to last_resume_kind == resume_stop. If
4630 the thread already has a pending status to report, we
4631 will still report it the next time we wait - see
4632 status_pending_p_callback. */
1a981360
PA
4633
4634 /* If we already have a pending signal to report, then
4635 there's no need to queue a SIGSTOP, as this means we're
4636 midway through moving the LWP out of the jumppad, and we
4637 will report the pending signal as soon as that is
4638 finished. */
013e3554 4639 if (lwp->pending_signals_to_report.empty ())
1a981360 4640 send_sigstop (lwp);
bd99dc85 4641 }
32ca6d61 4642
bd99dc85
PA
4643 /* For stop requests, we're done. */
4644 lwp->resume = NULL;
183be222 4645 thread->last_status.set_ignore ();
c80825ff 4646 return;
5544ad89
DJ
4647 }
4648
bd99dc85 4649 /* If this thread which is about to be resumed has a pending status,
863d01bd
PA
4650 then don't resume it - we can just report the pending status.
4651 Likewise if it is suspended, because e.g., another thread is
4652 stepping past a breakpoint. Make sure to queue any signals that
4653 would otherwise be sent. In all-stop mode, we do this decision
4654 based on if *any* thread has a pending status. If there's a
4655 thread that needs the step-over-breakpoint dance, then don't
4656 resume any other thread but that particular one. */
4657 leave_pending = (lwp->suspended
4658 || lwp->status_pending_p
4659 || leave_all_stopped);
5544ad89 4660
0e9a339e
YQ
4661 /* If we have a new signal, enqueue the signal. */
4662 if (lwp->resume->sig != 0)
4663 {
4664 siginfo_t info, *info_p;
4665
4666 /* If this is the same signal we were previously stopped by,
4667 make sure to queue its siginfo. */
4668 if (WIFSTOPPED (lwp->last_status)
4669 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4670 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4671 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4672 info_p = &info;
4673 else
4674 info_p = NULL;
4675
4676 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4677 }
4678
d50171e4 4679 if (!leave_pending)
bd99dc85 4680 {
c058728c 4681 threads_debug_printf ("resuming LWP %ld", lwpid_of (thread));
5544ad89 4682
9c80ecd6 4683 proceed_one_lwp (thread, NULL);
bd99dc85
PA
4684 }
4685 else
c058728c 4686 threads_debug_printf ("leaving LWP %ld stopped", lwpid_of (thread));
5544ad89 4687
183be222 4688 thread->last_status.set_ignore ();
bd99dc85 4689 lwp->resume = NULL;
0d62e5e8
DJ
4690}
4691
0e4d7e35
TBA
4692void
4693linux_process_target::resume (thread_resume *resume_info, size_t n)
0d62e5e8 4694{
d86d4aaf 4695 struct thread_info *need_step_over = NULL;
c6ecbae5 4696
c058728c 4697 THREADS_SCOPED_DEBUG_ENTER_EXIT;
87ce2a04 4698
5fdda392
SM
4699 for_each_thread ([&] (thread_info *thread)
4700 {
4701 linux_set_resume_request (thread, resume_info, n);
4702 });
5544ad89 4703
d50171e4
PA
4704 /* If there is a thread which would otherwise be resumed, which has
4705 a pending status, then don't resume any threads - we can just
4706 report the pending status. Make sure to queue any signals that
4707 would otherwise be sent. In non-stop mode, we'll apply this
4708 logic to each thread individually. We consume all pending events
4709 before considering to start a step-over (in all-stop). */
25c28b4d 4710 bool any_pending = false;
bd99dc85 4711 if (!non_stop)
df95181f
TBA
4712 any_pending = find_thread ([this] (thread_info *thread)
4713 {
4714 return resume_status_pending (thread);
4715 }) != nullptr;
d50171e4
PA
4716
4717 /* If there is a thread which would otherwise be resumed, which is
4718 stopped at a breakpoint that needs stepping over, then don't
4719 resume any threads - have it step over the breakpoint with all
4720 other threads stopped, then resume all threads again. Make sure
4721 to queue any signals that would otherwise be delivered or
4722 queued. */
bf9ae9d8 4723 if (!any_pending && low_supports_breakpoints ())
df95181f
TBA
4724 need_step_over = find_thread ([this] (thread_info *thread)
4725 {
4726 return thread_needs_step_over (thread);
4727 });
d50171e4 4728
c80825ff 4729 bool leave_all_stopped = (need_step_over != NULL || any_pending);
d50171e4 4730
c058728c
SM
4731 if (need_step_over != NULL)
4732 threads_debug_printf ("Not resuming all, need step over");
4733 else if (any_pending)
4734 threads_debug_printf ("Not resuming, all-stop and found "
4735 "an LWP with pending status");
4736 else
4737 threads_debug_printf ("Resuming, no pending status or step over needed");
d50171e4
PA
4738
4739 /* Even if we're leaving threads stopped, queue all signals we'd
4740 otherwise deliver. */
c80825ff
SM
4741 for_each_thread ([&] (thread_info *thread)
4742 {
df95181f 4743 resume_one_thread (thread, leave_all_stopped);
c80825ff 4744 });
d50171e4
PA
4745
4746 if (need_step_over)
d86d4aaf 4747 start_step_over (get_thread_lwp (need_step_over));
87ce2a04 4748
1bebeeca
PA
4749 /* We may have events that were pending that can/should be sent to
4750 the client now. Trigger a linux_wait call. */
4751 if (target_is_async_p ())
4752 async_file_mark ();
d50171e4
PA
4753}
4754
df95181f
TBA
4755void
4756linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
d50171e4 4757{
d86d4aaf 4758 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4759 int step;
4760
7984d532 4761 if (lwp == except)
e2b44075 4762 return;
d50171e4 4763
c058728c 4764 threads_debug_printf ("lwp %ld", lwpid_of (thread));
d50171e4
PA
4765
4766 if (!lwp->stopped)
4767 {
c058728c 4768 threads_debug_printf (" LWP %ld already running", lwpid_of (thread));
e2b44075 4769 return;
d50171e4
PA
4770 }
4771
02fc4de7 4772 if (thread->last_resume_kind == resume_stop
183be222 4773 && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
d50171e4 4774 {
c058728c
SM
4775 threads_debug_printf (" client wants LWP to remain %ld stopped",
4776 lwpid_of (thread));
e2b44075 4777 return;
d50171e4
PA
4778 }
4779
4780 if (lwp->status_pending_p)
4781 {
c058728c
SM
4782 threads_debug_printf (" LWP %ld has pending status, leaving stopped",
4783 lwpid_of (thread));
e2b44075 4784 return;
d50171e4
PA
4785 }
4786
7984d532
PA
4787 gdb_assert (lwp->suspended >= 0);
4788
d50171e4
PA
4789 if (lwp->suspended)
4790 {
c058728c 4791 threads_debug_printf (" LWP %ld is suspended", lwpid_of (thread));
e2b44075 4792 return;
d50171e4
PA
4793 }
4794
1a981360 4795 if (thread->last_resume_kind == resume_stop
013e3554 4796 && lwp->pending_signals_to_report.empty ()
229d26fc
SM
4797 && (lwp->collecting_fast_tracepoint
4798 == fast_tpoint_collect_result::not_collecting))
02fc4de7
PA
4799 {
4800 /* We haven't reported this LWP as stopped yet (otherwise, the
4801 last_status.kind check above would catch it, and we wouldn't
4802 reach here. This LWP may have been momentarily paused by a
4803 stop_all_lwps call while handling for example, another LWP's
4804 step-over. In that case, the pending expected SIGSTOP signal
4805 that was queued at vCont;t handling time will have already
4806 been consumed by wait_for_sigstop, and so we need to requeue
4807 another one here. Note that if the LWP already has a SIGSTOP
4808 pending, this is a no-op. */
4809
c058728c
SM
4810 threads_debug_printf
4811 ("Client wants LWP %ld to stop. Making sure it has a SIGSTOP pending",
4812 lwpid_of (thread));
02fc4de7
PA
4813
4814 send_sigstop (lwp);
4815 }
4816
863d01bd
PA
4817 if (thread->last_resume_kind == resume_step)
4818 {
c058728c
SM
4819 threads_debug_printf (" stepping LWP %ld, client wants it stepping",
4820 lwpid_of (thread));
8901d193 4821
3b9a79ef 4822 /* If resume_step is requested by GDB, install single-step
8901d193 4823 breakpoints when the thread is about to be actually resumed if
3b9a79ef 4824 the single-step breakpoints weren't removed. */
7582c77c 4825 if (supports_software_single_step ()
3b9a79ef 4826 && !has_single_step_breakpoints (thread))
8901d193
YQ
4827 install_software_single_step_breakpoints (lwp);
4828
4829 step = maybe_hw_step (thread);
863d01bd
PA
4830 }
4831 else if (lwp->bp_reinsert != 0)
4832 {
c058728c
SM
4833 threads_debug_printf (" stepping LWP %ld, reinsert set",
4834 lwpid_of (thread));
f79b145d
YQ
4835
4836 step = maybe_hw_step (thread);
863d01bd
PA
4837 }
4838 else
4839 step = 0;
4840
df95181f 4841 resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
4842}
4843
df95181f
TBA
4844void
4845linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4846 lwp_info *except)
7984d532 4847{
d86d4aaf 4848 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
4849
4850 if (lwp == except)
e2b44075 4851 return;
7984d532 4852
863d01bd 4853 lwp_suspended_decr (lwp);
7984d532 4854
e2b44075 4855 proceed_one_lwp (thread, except);
d50171e4
PA
4856}
4857
d16f3f6c
TBA
4858void
4859linux_process_target::proceed_all_lwps ()
d50171e4 4860{
d86d4aaf 4861 struct thread_info *need_step_over;
d50171e4
PA
4862
4863 /* If there is a thread which would otherwise be resumed, which is
4864 stopped at a breakpoint that needs stepping over, then don't
4865 resume any threads - have it step over the breakpoint with all
4866 other threads stopped, then resume all threads again. */
4867
bf9ae9d8 4868 if (low_supports_breakpoints ())
d50171e4 4869 {
df95181f
TBA
4870 need_step_over = find_thread ([this] (thread_info *thread)
4871 {
4872 return thread_needs_step_over (thread);
4873 });
d50171e4
PA
4874
4875 if (need_step_over != NULL)
4876 {
c058728c
SM
4877 threads_debug_printf ("found thread %ld needing a step-over",
4878 lwpid_of (need_step_over));
d50171e4 4879
d86d4aaf 4880 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
4881 return;
4882 }
4883 }
5544ad89 4884
c058728c 4885 threads_debug_printf ("Proceeding, no step-over needed");
d50171e4 4886
df95181f 4887 for_each_thread ([this] (thread_info *thread)
e2b44075
SM
4888 {
4889 proceed_one_lwp (thread, NULL);
4890 });
d50171e4
PA
4891}
4892
d16f3f6c
TBA
4893void
4894linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
d50171e4 4895{
c058728c
SM
4896 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4897
4898 if (except)
4899 threads_debug_printf ("except=(LWP %ld)",
4900 lwpid_of (get_lwp_thread (except)));
4901 else
4902 threads_debug_printf ("except=nullptr");
5544ad89 4903
7984d532 4904 if (unsuspend)
e2b44075
SM
4905 for_each_thread ([&] (thread_info *thread)
4906 {
4907 unsuspend_and_proceed_one_lwp (thread, except);
4908 });
7984d532 4909 else
e2b44075
SM
4910 for_each_thread ([&] (thread_info *thread)
4911 {
4912 proceed_one_lwp (thread, except);
4913 });
0d62e5e8
DJ
4914}
4915
58caa3dc
DJ
4916
4917#ifdef HAVE_LINUX_REGSETS
4918
1faeff08
MR
4919#define use_linux_regsets 1
4920
030031ee
PA
4921/* Returns true if REGSET has been disabled. */
4922
4923static int
4924regset_disabled (struct regsets_info *info, struct regset_info *regset)
4925{
4926 return (info->disabled_regsets != NULL
4927 && info->disabled_regsets[regset - info->regsets]);
4928}
4929
4930/* Disable REGSET. */
4931
4932static void
4933disable_regset (struct regsets_info *info, struct regset_info *regset)
4934{
4935 int dr_offset;
4936
4937 dr_offset = regset - info->regsets;
4938 if (info->disabled_regsets == NULL)
224c3ddb 4939 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
030031ee
PA
4940 info->disabled_regsets[dr_offset] = 1;
4941}
4942
58caa3dc 4943static int
3aee8918
PA
4944regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4945 struct regcache *regcache)
58caa3dc
DJ
4946{
4947 struct regset_info *regset;
e9d25b98 4948 int saw_general_regs = 0;
95954743 4949 int pid;
1570b33e 4950 struct iovec iov;
58caa3dc 4951
0bfdf32f 4952 pid = lwpid_of (current_thread);
28eef672 4953 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 4954 {
1570b33e
L
4955 void *buf, *data;
4956 int nt_type, res;
58caa3dc 4957
030031ee 4958 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 4959 continue;
58caa3dc 4960
bca929d3 4961 buf = xmalloc (regset->size);
1570b33e
L
4962
4963 nt_type = regset->nt_type;
4964 if (nt_type)
4965 {
4966 iov.iov_base = buf;
4967 iov.iov_len = regset->size;
4968 data = (void *) &iov;
4969 }
4970 else
4971 data = buf;
4972
dfb64f85 4973#ifndef __sparc__
f15f9948 4974 res = ptrace (regset->get_request, pid,
b8e1b30e 4975 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4976#else
1570b33e 4977 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4978#endif
58caa3dc
DJ
4979 if (res < 0)
4980 {
1ef53e6b
AH
4981 if (errno == EIO
4982 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 4983 {
1ef53e6b
AH
4984 /* If we get EIO on a regset, or an EINVAL and the regset is
4985 optional, do not try it again for this process mode. */
030031ee 4986 disable_regset (regsets_info, regset);
58caa3dc 4987 }
e5a9158d
AA
4988 else if (errno == ENODATA)
4989 {
4990 /* ENODATA may be returned if the regset is currently
4991 not "active". This can happen in normal operation,
4992 so suppress the warning in this case. */
4993 }
fcd4a73d
YQ
4994 else if (errno == ESRCH)
4995 {
4996 /* At this point, ESRCH should mean the process is
4997 already gone, in which case we simply ignore attempts
4998 to read its registers. */
4999 }
58caa3dc
DJ
5000 else
5001 {
0d62e5e8 5002 char s[256];
95954743
PA
5003 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5004 pid);
0d62e5e8 5005 perror (s);
58caa3dc
DJ
5006 }
5007 }
098dbe61
AA
5008 else
5009 {
5010 if (regset->type == GENERAL_REGS)
5011 saw_general_regs = 1;
5012 regset->store_function (regcache, buf);
5013 }
fdeb2a12 5014 free (buf);
58caa3dc 5015 }
e9d25b98
DJ
5016 if (saw_general_regs)
5017 return 0;
5018 else
5019 return 1;
58caa3dc
DJ
5020}
5021
5022static int
3aee8918
PA
5023regsets_store_inferior_registers (struct regsets_info *regsets_info,
5024 struct regcache *regcache)
58caa3dc
DJ
5025{
5026 struct regset_info *regset;
e9d25b98 5027 int saw_general_regs = 0;
95954743 5028 int pid;
1570b33e 5029 struct iovec iov;
58caa3dc 5030
0bfdf32f 5031 pid = lwpid_of (current_thread);
28eef672 5032 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5033 {
1570b33e
L
5034 void *buf, *data;
5035 int nt_type, res;
58caa3dc 5036
feea5f36
AA
5037 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5038 || regset->fill_function == NULL)
28eef672 5039 continue;
58caa3dc 5040
bca929d3 5041 buf = xmalloc (regset->size);
545587ee
DJ
5042
5043 /* First fill the buffer with the current register set contents,
5044 in case there are any items in the kernel's regset that are
5045 not in gdbserver's regcache. */
1570b33e
L
5046
5047 nt_type = regset->nt_type;
5048 if (nt_type)
5049 {
5050 iov.iov_base = buf;
5051 iov.iov_len = regset->size;
5052 data = (void *) &iov;
5053 }
5054 else
5055 data = buf;
5056
dfb64f85 5057#ifndef __sparc__
f15f9948 5058 res = ptrace (regset->get_request, pid,
b8e1b30e 5059 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5060#else
689cc2ae 5061 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5062#endif
545587ee
DJ
5063
5064 if (res == 0)
5065 {
5066 /* Then overlay our cached registers on that. */
442ea881 5067 regset->fill_function (regcache, buf);
545587ee
DJ
5068
5069 /* Only now do we write the register set. */
dfb64f85 5070#ifndef __sparc__
f15f9948 5071 res = ptrace (regset->set_request, pid,
b8e1b30e 5072 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5073#else
1570b33e 5074 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 5075#endif
545587ee
DJ
5076 }
5077
58caa3dc
DJ
5078 if (res < 0)
5079 {
1ef53e6b
AH
5080 if (errno == EIO
5081 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5082 {
1ef53e6b
AH
5083 /* If we get EIO on a regset, or an EINVAL and the regset is
5084 optional, do not try it again for this process mode. */
030031ee 5085 disable_regset (regsets_info, regset);
58caa3dc 5086 }
3221518c
UW
5087 else if (errno == ESRCH)
5088 {
1b3f6016
PA
5089 /* At this point, ESRCH should mean the process is
5090 already gone, in which case we simply ignore attempts
5091 to change its registers. See also the related
df95181f 5092 comment in resume_one_lwp. */
fdeb2a12 5093 free (buf);
3221518c
UW
5094 return 0;
5095 }
58caa3dc
DJ
5096 else
5097 {
ce3a066d 5098 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
5099 }
5100 }
e9d25b98
DJ
5101 else if (regset->type == GENERAL_REGS)
5102 saw_general_regs = 1;
09ec9b38 5103 free (buf);
58caa3dc 5104 }
e9d25b98
DJ
5105 if (saw_general_regs)
5106 return 0;
5107 else
5108 return 1;
58caa3dc
DJ
5109}
5110
1faeff08 5111#else /* !HAVE_LINUX_REGSETS */
58caa3dc 5112
1faeff08 5113#define use_linux_regsets 0
3aee8918
PA
5114#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5115#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 5116
58caa3dc 5117#endif
1faeff08
MR
5118
5119/* Return 1 if register REGNO is supported by one of the regset ptrace
5120 calls or 0 if it has to be transferred individually. */
5121
5122static int
3aee8918 5123linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
5124{
5125 unsigned char mask = 1 << (regno % 8);
5126 size_t index = regno / 8;
5127
5128 return (use_linux_regsets
3aee8918
PA
5129 && (regs_info->regset_bitmap == NULL
5130 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
5131}
5132
58caa3dc 5133#ifdef HAVE_LINUX_USRREGS
1faeff08 5134
5b3da067 5135static int
3aee8918 5136register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
5137{
5138 int addr;
5139
3aee8918 5140 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
5141 error ("Invalid register number %d.", regnum);
5142
3aee8918 5143 addr = usrregs->regmap[regnum];
1faeff08
MR
5144
5145 return addr;
5146}
5147
daca57a7
TBA
5148
5149void
5150linux_process_target::fetch_register (const usrregs_info *usrregs,
5151 regcache *regcache, int regno)
1faeff08
MR
5152{
5153 CORE_ADDR regaddr;
5154 int i, size;
5155 char *buf;
5156 int pid;
5157
3aee8918 5158 if (regno >= usrregs->num_regs)
1faeff08 5159 return;
daca57a7 5160 if (low_cannot_fetch_register (regno))
1faeff08
MR
5161 return;
5162
3aee8918 5163 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5164 if (regaddr == -1)
5165 return;
5166
3aee8918
PA
5167 size = ((register_size (regcache->tdesc, regno)
5168 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5169 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5170 buf = (char *) alloca (size);
1faeff08 5171
0bfdf32f 5172 pid = lwpid_of (current_thread);
1faeff08
MR
5173 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5174 {
5175 errno = 0;
5176 *(PTRACE_XFER_TYPE *) (buf + i) =
5177 ptrace (PTRACE_PEEKUSER, pid,
5178 /* Coerce to a uintptr_t first to avoid potential gcc warning
5179 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5180 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
5181 regaddr += sizeof (PTRACE_XFER_TYPE);
5182 if (errno != 0)
9a70f35c
YQ
5183 {
5184 /* Mark register REGNO unavailable. */
5185 supply_register (regcache, regno, NULL);
5186 return;
5187 }
1faeff08
MR
5188 }
5189
b35db733 5190 low_supply_ptrace_register (regcache, regno, buf);
1faeff08
MR
5191}
5192
daca57a7
TBA
5193void
5194linux_process_target::store_register (const usrregs_info *usrregs,
5195 regcache *regcache, int regno)
1faeff08
MR
5196{
5197 CORE_ADDR regaddr;
5198 int i, size;
5199 char *buf;
5200 int pid;
5201
3aee8918 5202 if (regno >= usrregs->num_regs)
1faeff08 5203 return;
daca57a7 5204 if (low_cannot_store_register (regno))
1faeff08
MR
5205 return;
5206
3aee8918 5207 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5208 if (regaddr == -1)
5209 return;
5210
3aee8918
PA
5211 size = ((register_size (regcache->tdesc, regno)
5212 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5213 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5214 buf = (char *) alloca (size);
1faeff08
MR
5215 memset (buf, 0, size);
5216
b35db733 5217 low_collect_ptrace_register (regcache, regno, buf);
1faeff08 5218
0bfdf32f 5219 pid = lwpid_of (current_thread);
1faeff08
MR
5220 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5221 {
5222 errno = 0;
5223 ptrace (PTRACE_POKEUSER, pid,
5224 /* Coerce to a uintptr_t first to avoid potential gcc warning
5225 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5226 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5227 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
5228 if (errno != 0)
5229 {
5230 /* At this point, ESRCH should mean the process is
5231 already gone, in which case we simply ignore attempts
5232 to change its registers. See also the related
df95181f 5233 comment in resume_one_lwp. */
1faeff08
MR
5234 if (errno == ESRCH)
5235 return;
5236
daca57a7
TBA
5237
5238 if (!low_cannot_store_register (regno))
6d91ce9a 5239 error ("writing register %d: %s", regno, safe_strerror (errno));
1faeff08
MR
5240 }
5241 regaddr += sizeof (PTRACE_XFER_TYPE);
5242 }
5243}
daca57a7 5244#endif /* HAVE_LINUX_USRREGS */
1faeff08 5245
b35db733
TBA
5246void
5247linux_process_target::low_collect_ptrace_register (regcache *regcache,
5248 int regno, char *buf)
5249{
5250 collect_register (regcache, regno, buf);
5251}
5252
5253void
5254linux_process_target::low_supply_ptrace_register (regcache *regcache,
5255 int regno, const char *buf)
5256{
5257 supply_register (regcache, regno, buf);
5258}
5259
daca57a7
TBA
5260void
5261linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5262 regcache *regcache,
5263 int regno, int all)
1faeff08 5264{
daca57a7 5265#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5266 struct usrregs_info *usr = regs_info->usrregs;
5267
1faeff08
MR
5268 if (regno == -1)
5269 {
3aee8918
PA
5270 for (regno = 0; regno < usr->num_regs; regno++)
5271 if (all || !linux_register_in_regsets (regs_info, regno))
5272 fetch_register (usr, regcache, regno);
1faeff08
MR
5273 }
5274 else
3aee8918 5275 fetch_register (usr, regcache, regno);
daca57a7 5276#endif
1faeff08
MR
5277}
5278
daca57a7
TBA
5279void
5280linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5281 regcache *regcache,
5282 int regno, int all)
1faeff08 5283{
daca57a7 5284#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5285 struct usrregs_info *usr = regs_info->usrregs;
5286
1faeff08
MR
5287 if (regno == -1)
5288 {
3aee8918
PA
5289 for (regno = 0; regno < usr->num_regs; regno++)
5290 if (all || !linux_register_in_regsets (regs_info, regno))
5291 store_register (usr, regcache, regno);
1faeff08
MR
5292 }
5293 else
3aee8918 5294 store_register (usr, regcache, regno);
58caa3dc 5295#endif
daca57a7 5296}
1faeff08 5297
a5a4d4cd
TBA
5298void
5299linux_process_target::fetch_registers (regcache *regcache, int regno)
1faeff08
MR
5300{
5301 int use_regsets;
5302 int all = 0;
aa8d21c9 5303 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5304
5305 if (regno == -1)
5306 {
bd70b1f2 5307 if (regs_info->usrregs != NULL)
3aee8918 5308 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
bd70b1f2 5309 low_fetch_register (regcache, regno);
c14dfd32 5310
3aee8918
PA
5311 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5312 if (regs_info->usrregs != NULL)
5313 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
5314 }
5315 else
5316 {
bd70b1f2 5317 if (low_fetch_register (regcache, regno))
c14dfd32
PA
5318 return;
5319
3aee8918 5320 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5321 if (use_regsets)
3aee8918
PA
5322 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5323 regcache);
5324 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5325 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5326 }
58caa3dc
DJ
5327}
5328
a5a4d4cd
TBA
5329void
5330linux_process_target::store_registers (regcache *regcache, int regno)
58caa3dc 5331{
1faeff08
MR
5332 int use_regsets;
5333 int all = 0;
aa8d21c9 5334 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5335
5336 if (regno == -1)
5337 {
3aee8918
PA
5338 all = regsets_store_inferior_registers (regs_info->regsets_info,
5339 regcache);
5340 if (regs_info->usrregs != NULL)
5341 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
5342 }
5343 else
5344 {
3aee8918 5345 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5346 if (use_regsets)
3aee8918
PA
5347 all = regsets_store_inferior_registers (regs_info->regsets_info,
5348 regcache);
5349 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5350 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5351 }
58caa3dc
DJ
5352}
5353
bd70b1f2
TBA
5354bool
5355linux_process_target::low_fetch_register (regcache *regcache, int regno)
5356{
5357 return false;
5358}
da6d8c04 5359
e2558df3 5360/* A wrapper for the read_memory target op. */
da6d8c04 5361
c3e735a6 5362static int
f450004a 5363linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
e2558df3 5364{
52405d85 5365 return the_target->read_memory (memaddr, myaddr, len);
e2558df3
TBA
5366}
5367
e2558df3 5368
421490af
PA
5369/* Helper for read_memory/write_memory using /proc/PID/mem. Because
5370 we can use a single read/write call, this can be much more
5371 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
5372 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running threads.
5373 One an only one of READBUF and WRITEBUF is non-null. If READBUF is
5374 not null, then we're reading, otherwise we're writing. */
5375
5376static int
5377proc_xfer_memory (CORE_ADDR memaddr, unsigned char *readbuf,
5378 const gdb_byte *writebuf, int len)
da6d8c04 5379{
421490af 5380 gdb_assert ((readbuf == nullptr) != (writebuf == nullptr));
fd462a61 5381
421490af
PA
5382 process_info *proc = current_process ();
5383
5384 int fd = proc->priv->mem_fd;
5385 if (fd == -1)
5386 return EIO;
5387
5388 while (len > 0)
fd462a61 5389 {
4934b29e
MR
5390 int bytes;
5391
31a56a22
PA
5392 /* Use pread64/pwrite64 if available, since they save a syscall
5393 and can handle 64-bit offsets even on 32-bit platforms (for
5394 instance, SPARC debugging a SPARC64 application). But only
5395 use them if the offset isn't so high that when cast to off_t
5396 it'd be negative, as seen on SPARC64. pread64/pwrite64
5397 outright reject such offsets. lseek does not. */
fd462a61 5398#ifdef HAVE_PREAD64
31a56a22 5399 if ((off_t) memaddr >= 0)
421490af 5400 bytes = (readbuf != nullptr
31a56a22
PA
5401 ? pread64 (fd, readbuf, len, memaddr)
5402 : pwrite64 (fd, writebuf, len, memaddr));
5403 else
fd462a61 5404#endif
31a56a22
PA
5405 {
5406 bytes = -1;
5407 if (lseek (fd, memaddr, SEEK_SET) != -1)
5408 bytes = (readbuf != nullptr
5409 ? read (fd, readbuf, len)
5410 : write (fd, writebuf, len));
5411 }
fd462a61 5412
421490af
PA
5413 if (bytes < 0)
5414 return errno;
5415 else if (bytes == 0)
4934b29e 5416 {
421490af
PA
5417 /* EOF means the address space is gone, the whole process
5418 exited or execed. */
5419 return EIO;
4934b29e 5420 }
da6d8c04 5421
421490af
PA
5422 memaddr += bytes;
5423 if (readbuf != nullptr)
5424 readbuf += bytes;
5425 else
5426 writebuf += bytes;
5427 len -= bytes;
da6d8c04
DJ
5428 }
5429
421490af
PA
5430 return 0;
5431}
c3e735a6 5432
421490af
PA
5433int
5434linux_process_target::read_memory (CORE_ADDR memaddr,
5435 unsigned char *myaddr, int len)
5436{
5437 return proc_xfer_memory (memaddr, myaddr, nullptr, len);
da6d8c04
DJ
5438}
5439
93ae6fdc
PA
5440/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5441 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 5442 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 5443
e2558df3
TBA
5444int
5445linux_process_target::write_memory (CORE_ADDR memaddr,
5446 const unsigned char *myaddr, int len)
da6d8c04 5447{
0d62e5e8
DJ
5448 if (debug_threads)
5449 {
58d6951d 5450 /* Dump up to four bytes. */
bf47e248
PA
5451 char str[4 * 2 + 1];
5452 char *p = str;
5453 int dump = len < 4 ? len : 4;
5454
421490af 5455 for (int i = 0; i < dump; i++)
bf47e248
PA
5456 {
5457 sprintf (p, "%02x", myaddr[i]);
5458 p += 2;
5459 }
5460 *p = '\0';
5461
c058728c 5462 threads_debug_printf ("Writing %s to 0x%08lx in process %d",
421490af 5463 str, (long) memaddr, current_process ()->pid);
0d62e5e8
DJ
5464 }
5465
421490af 5466 return proc_xfer_memory (memaddr, nullptr, myaddr, len);
da6d8c04 5467}
2f2893d9 5468
2a31c7aa
TBA
5469void
5470linux_process_target::look_up_symbols ()
2f2893d9 5471{
0d62e5e8 5472#ifdef USE_THREAD_DB
95954743
PA
5473 struct process_info *proc = current_process ();
5474
fe978cb0 5475 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5476 return;
5477
9b4c5f87 5478 thread_db_init ();
0d62e5e8
DJ
5479#endif
5480}
5481
eb497a2a
TBA
5482void
5483linux_process_target::request_interrupt ()
e5379b03 5484{
78708b7c
PA
5485 /* Send a SIGINT to the process group. This acts just like the user
5486 typed a ^C on the controlling terminal. */
4c35c4c6
TV
5487 int res = ::kill (-signal_pid, SIGINT);
5488 if (res == -1)
5489 warning (_("Sending SIGINT to process group of pid %ld failed: %s"),
5490 signal_pid, safe_strerror (errno));
e5379b03
DJ
5491}
5492
eac215cc
TBA
5493bool
5494linux_process_target::supports_read_auxv ()
5495{
5496 return true;
5497}
5498
aa691b87
RM
5499/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5500 to debugger memory starting at MYADDR. */
5501
eac215cc 5502int
43e5fbd8
TJB
5503linux_process_target::read_auxv (int pid, CORE_ADDR offset,
5504 unsigned char *myaddr, unsigned int len)
aa691b87
RM
5505{
5506 char filename[PATH_MAX];
5507 int fd, n;
5508
6cebaf6e 5509 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5510
5511 fd = open (filename, O_RDONLY);
5512 if (fd < 0)
5513 return -1;
5514
5515 if (offset != (CORE_ADDR) 0
5516 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5517 n = -1;
5518 else
5519 n = read (fd, myaddr, len);
5520
5521 close (fd);
5522
5523 return n;
5524}
5525
7e0bde70
TBA
5526int
5527linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5528 int size, raw_breakpoint *bp)
e013ee27 5529{
c8f4bfdd
YQ
5530 if (type == raw_bkpt_type_sw)
5531 return insert_memory_breakpoint (bp);
e013ee27 5532 else
9db9aa23
TBA
5533 return low_insert_point (type, addr, size, bp);
5534}
5535
5536int
5537linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5538 int size, raw_breakpoint *bp)
5539{
5540 /* Unsupported (see target.h). */
5541 return 1;
e013ee27
OF
5542}
5543
7e0bde70
TBA
5544int
5545linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5546 int size, raw_breakpoint *bp)
e013ee27 5547{
c8f4bfdd
YQ
5548 if (type == raw_bkpt_type_sw)
5549 return remove_memory_breakpoint (bp);
e013ee27 5550 else
9db9aa23
TBA
5551 return low_remove_point (type, addr, size, bp);
5552}
5553
5554int
5555linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5556 int size, raw_breakpoint *bp)
5557{
5558 /* Unsupported (see target.h). */
5559 return 1;
e013ee27
OF
5560}
5561
84320c4e 5562/* Implement the stopped_by_sw_breakpoint target_ops
3e572f71
PA
5563 method. */
5564
84320c4e
TBA
5565bool
5566linux_process_target::stopped_by_sw_breakpoint ()
3e572f71
PA
5567{
5568 struct lwp_info *lwp = get_thread_lwp (current_thread);
5569
5570 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5571}
5572
84320c4e 5573/* Implement the supports_stopped_by_sw_breakpoint target_ops
3e572f71
PA
5574 method. */
5575
84320c4e
TBA
5576bool
5577linux_process_target::supports_stopped_by_sw_breakpoint ()
3e572f71
PA
5578{
5579 return USE_SIGTRAP_SIGINFO;
5580}
5581
93fe88b2 5582/* Implement the stopped_by_hw_breakpoint target_ops
3e572f71
PA
5583 method. */
5584
93fe88b2
TBA
5585bool
5586linux_process_target::stopped_by_hw_breakpoint ()
3e572f71
PA
5587{
5588 struct lwp_info *lwp = get_thread_lwp (current_thread);
5589
5590 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5591}
5592
93fe88b2 5593/* Implement the supports_stopped_by_hw_breakpoint target_ops
3e572f71
PA
5594 method. */
5595
93fe88b2
TBA
5596bool
5597linux_process_target::supports_stopped_by_hw_breakpoint ()
3e572f71
PA
5598{
5599 return USE_SIGTRAP_SIGINFO;
5600}
5601
70b90b91 5602/* Implement the supports_hardware_single_step target_ops method. */
45614f15 5603
22aa6223
TBA
5604bool
5605linux_process_target::supports_hardware_single_step ()
45614f15 5606{
b31cdfa6 5607 return true;
45614f15
YQ
5608}
5609
6eeb5c55
TBA
5610bool
5611linux_process_target::stopped_by_watchpoint ()
e013ee27 5612{
0bfdf32f 5613 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 5614
15c66dd6 5615 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
5616}
5617
6eeb5c55
TBA
5618CORE_ADDR
5619linux_process_target::stopped_data_address ()
e013ee27 5620{
0bfdf32f 5621 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
5622
5623 return lwp->stopped_data_address;
e013ee27
OF
5624}
5625
db0dfaa0
LM
5626/* This is only used for targets that define PT_TEXT_ADDR,
5627 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5628 the target has different ways of acquiring this information, like
5629 loadmaps. */
52fb6437 5630
5203ae1e
TBA
5631bool
5632linux_process_target::supports_read_offsets ()
5633{
5634#ifdef SUPPORTS_READ_OFFSETS
5635 return true;
5636#else
5637 return false;
5638#endif
5639}
5640
52fb6437
NS
5641/* Under uClinux, programs are loaded at non-zero offsets, which we need
5642 to tell gdb about. */
5643
5203ae1e
TBA
5644int
5645linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
52fb6437 5646{
5203ae1e 5647#ifdef SUPPORTS_READ_OFFSETS
52fb6437 5648 unsigned long text, text_end, data;
62828379 5649 int pid = lwpid_of (current_thread);
52fb6437
NS
5650
5651 errno = 0;
5652
b8e1b30e
LM
5653 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5654 (PTRACE_TYPE_ARG4) 0);
5655 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5656 (PTRACE_TYPE_ARG4) 0);
5657 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5658 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
5659
5660 if (errno == 0)
5661 {
5662 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
5663 used by gdb) are relative to the beginning of the program,
5664 with the data segment immediately following the text segment.
5665 However, the actual runtime layout in memory may put the data
5666 somewhere else, so when we send gdb a data base-address, we
5667 use the real data base address and subtract the compile-time
5668 data base-address from it (which is just the length of the
5669 text segment). BSS immediately follows data in both
5670 cases. */
52fb6437
NS
5671 *text_p = text;
5672 *data_p = data - (text_end - text);
1b3f6016 5673
52fb6437
NS
5674 return 1;
5675 }
5203ae1e
TBA
5676 return 0;
5677#else
5678 gdb_assert_not_reached ("target op read_offsets not supported");
52fb6437 5679#endif
5203ae1e 5680}
52fb6437 5681
6e3fd7e9
TBA
5682bool
5683linux_process_target::supports_get_tls_address ()
5684{
5685#ifdef USE_THREAD_DB
5686 return true;
5687#else
5688 return false;
5689#endif
5690}
5691
5692int
5693linux_process_target::get_tls_address (thread_info *thread,
5694 CORE_ADDR offset,
5695 CORE_ADDR load_module,
5696 CORE_ADDR *address)
5697{
5698#ifdef USE_THREAD_DB
5699 return thread_db_get_tls_address (thread, offset, load_module, address);
5700#else
5701 return -1;
5702#endif
5703}
5704
2d0795ee
TBA
5705bool
5706linux_process_target::supports_qxfer_osdata ()
5707{
5708 return true;
5709}
5710
5711int
5712linux_process_target::qxfer_osdata (const char *annex,
5713 unsigned char *readbuf,
5714 unsigned const char *writebuf,
5715 CORE_ADDR offset, int len)
07e059b5 5716{
d26e3629 5717 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
5718}
5719
cb63de7c
TBA
5720void
5721linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5722 gdb_byte *inf_siginfo, int direction)
d0722149 5723{
cb63de7c 5724 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
d0722149
DE
5725
5726 /* If there was no callback, or the callback didn't do anything,
5727 then just do a straight memcpy. */
5728 if (!done)
5729 {
5730 if (direction == 1)
a5362b9a 5731 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 5732 else
a5362b9a 5733 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
5734 }
5735}
5736
cb63de7c
TBA
5737bool
5738linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5739 int direction)
5740{
5741 return false;
5742}
5743
d7abedf7
TBA
5744bool
5745linux_process_target::supports_qxfer_siginfo ()
5746{
5747 return true;
5748}
5749
5750int
5751linux_process_target::qxfer_siginfo (const char *annex,
5752 unsigned char *readbuf,
5753 unsigned const char *writebuf,
5754 CORE_ADDR offset, int len)
4aa995e1 5755{
d0722149 5756 int pid;
a5362b9a 5757 siginfo_t siginfo;
8adce034 5758 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1 5759
0bfdf32f 5760 if (current_thread == NULL)
4aa995e1
PA
5761 return -1;
5762
0bfdf32f 5763 pid = lwpid_of (current_thread);
4aa995e1 5764
c058728c
SM
5765 threads_debug_printf ("%s siginfo for lwp %d.",
5766 readbuf != NULL ? "Reading" : "Writing",
5767 pid);
4aa995e1 5768
0adea5f7 5769 if (offset >= sizeof (siginfo))
4aa995e1
PA
5770 return -1;
5771
b8e1b30e 5772 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5773 return -1;
5774
d0722149
DE
5775 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5776 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5777 inferior with a 64-bit GDBSERVER should look the same as debugging it
5778 with a 32-bit GDBSERVER, we need to convert it. */
5779 siginfo_fixup (&siginfo, inf_siginfo, 0);
5780
4aa995e1
PA
5781 if (offset + len > sizeof (siginfo))
5782 len = sizeof (siginfo) - offset;
5783
5784 if (readbuf != NULL)
d0722149 5785 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
5786 else
5787 {
d0722149
DE
5788 memcpy (inf_siginfo + offset, writebuf, len);
5789
5790 /* Convert back to ptrace layout before flushing it out. */
5791 siginfo_fixup (&siginfo, inf_siginfo, 1);
5792
b8e1b30e 5793 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5794 return -1;
5795 }
5796
5797 return len;
5798}
5799
bd99dc85
PA
5800/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5801 so we notice when children change state; as the handler for the
5802 sigsuspend in my_waitpid. */
5803
5804static void
5805sigchld_handler (int signo)
5806{
5807 int old_errno = errno;
5808
5809 if (debug_threads)
e581f2b4
PA
5810 {
5811 do
5812 {
a7e559cc
AH
5813 /* Use the async signal safe debug function. */
5814 if (debug_write ("sigchld_handler\n",
5815 sizeof ("sigchld_handler\n") - 1) < 0)
e581f2b4
PA
5816 break; /* just ignore */
5817 } while (0);
5818 }
bd99dc85
PA
5819
5820 if (target_is_async_p ())
5821 async_file_mark (); /* trigger a linux_wait */
5822
5823 errno = old_errno;
5824}
5825
0dc587d4
TBA
5826bool
5827linux_process_target::supports_non_stop ()
bd99dc85 5828{
0dc587d4 5829 return true;
bd99dc85
PA
5830}
5831
0dc587d4
TBA
5832bool
5833linux_process_target::async (bool enable)
bd99dc85 5834{
0dc587d4 5835 bool previous = target_is_async_p ();
bd99dc85 5836
c058728c
SM
5837 threads_debug_printf ("async (%d), previous=%d",
5838 enable, previous);
8336d594 5839
bd99dc85
PA
5840 if (previous != enable)
5841 {
5842 sigset_t mask;
5843 sigemptyset (&mask);
5844 sigaddset (&mask, SIGCHLD);
5845
21987b9c 5846 gdb_sigmask (SIG_BLOCK, &mask, NULL);
bd99dc85
PA
5847
5848 if (enable)
5849 {
8674f082 5850 if (!linux_event_pipe.open_pipe ())
aa96c426 5851 {
21987b9c 5852 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
aa96c426
GB
5853
5854 warning ("creating event pipe failed.");
5855 return previous;
5856 }
bd99dc85 5857
bd99dc85 5858 /* Register the event loop handler. */
cdc8e9b2 5859 add_file_handler (linux_event_pipe.event_fd (),
2554f6f5
SM
5860 handle_target_event, NULL,
5861 "linux-low");
bd99dc85
PA
5862
5863 /* Always trigger a linux_wait. */
5864 async_file_mark ();
5865 }
5866 else
5867 {
cdc8e9b2 5868 delete_file_handler (linux_event_pipe.event_fd ());
bd99dc85 5869
8674f082 5870 linux_event_pipe.close_pipe ();
bd99dc85
PA
5871 }
5872
21987b9c 5873 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
bd99dc85
PA
5874 }
5875
5876 return previous;
5877}
5878
0dc587d4
TBA
5879int
5880linux_process_target::start_non_stop (bool nonstop)
bd99dc85
PA
5881{
5882 /* Register or unregister from event-loop accordingly. */
0dc587d4 5883 target_async (nonstop);
aa96c426 5884
0dc587d4 5885 if (target_is_async_p () != (nonstop != false))
aa96c426
GB
5886 return -1;
5887
bd99dc85
PA
5888 return 0;
5889}
5890
652aef77
TBA
5891bool
5892linux_process_target::supports_multi_process ()
cf8fd78b 5893{
652aef77 5894 return true;
cf8fd78b
PA
5895}
5896
89245bc0
DB
5897/* Check if fork events are supported. */
5898
9690a72a
TBA
5899bool
5900linux_process_target::supports_fork_events ()
89245bc0 5901{
a2885186 5902 return true;
89245bc0
DB
5903}
5904
5905/* Check if vfork events are supported. */
5906
9690a72a
TBA
5907bool
5908linux_process_target::supports_vfork_events ()
89245bc0 5909{
a2885186 5910 return true;
89245bc0
DB
5911}
5912
393a6b59
PA
5913/* Return the set of supported thread options. */
5914
5915gdb_thread_options
5916linux_process_target::supported_thread_options ()
5917{
48989498 5918 return GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
393a6b59
PA
5919}
5920
94585166
DB
5921/* Check if exec events are supported. */
5922
9690a72a
TBA
5923bool
5924linux_process_target::supports_exec_events ()
94585166 5925{
a2885186 5926 return true;
94585166
DB
5927}
5928
de0d863e
DB
5929/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5930 ptrace flags for all inferiors. This is in case the new GDB connection
5931 doesn't support the same set of events that the previous one did. */
5932
fb00dfce
TBA
5933void
5934linux_process_target::handle_new_gdb_connection ()
de0d863e 5935{
de0d863e 5936 /* Request that all the lwps reset their ptrace options. */
bbf550d5
SM
5937 for_each_thread ([] (thread_info *thread)
5938 {
5939 struct lwp_info *lwp = get_thread_lwp (thread);
5940
5941 if (!lwp->stopped)
5942 {
5943 /* Stop the lwp so we can modify its ptrace options. */
5944 lwp->must_set_ptrace_flags = 1;
5945 linux_stop_lwp (lwp);
5946 }
5947 else
5948 {
5949 /* Already stopped; go ahead and set the ptrace options. */
5950 struct process_info *proc = find_process_pid (pid_of (thread));
5951 int options = linux_low_ptrace_options (proc->attached);
5952
5953 linux_enable_event_reporting (lwpid_of (thread), options);
5954 lwp->must_set_ptrace_flags = 0;
5955 }
5956 });
de0d863e
DB
5957}
5958
55cf3021
TBA
5959int
5960linux_process_target::handle_monitor_command (char *mon)
5961{
5962#ifdef USE_THREAD_DB
5963 return thread_db_handle_monitor_command (mon);
5964#else
5965 return 0;
5966#endif
5967}
5968
95a45fc1
TBA
5969int
5970linux_process_target::core_of_thread (ptid_t ptid)
5971{
5972 return linux_common_core_of_thread (ptid);
5973}
5974
c756403b
TBA
5975bool
5976linux_process_target::supports_disable_randomization ()
03583c20 5977{
c756403b 5978 return true;
03583c20 5979}
efcbbd14 5980
c0245cb9
TBA
5981bool
5982linux_process_target::supports_agent ()
d1feda86 5983{
c0245cb9 5984 return true;
d1feda86
YQ
5985}
5986
2526e0cd
TBA
5987bool
5988linux_process_target::supports_range_stepping ()
c2d6af84 5989{
7582c77c 5990 if (supports_software_single_step ())
2526e0cd 5991 return true;
c2d6af84 5992
9cfd8715
TBA
5993 return low_supports_range_stepping ();
5994}
5995
5996bool
5997linux_process_target::low_supports_range_stepping ()
5998{
5999 return false;
c2d6af84
PA
6000}
6001
8247b823
TBA
6002bool
6003linux_process_target::supports_pid_to_exec_file ()
6004{
6005 return true;
6006}
6007
04977957 6008const char *
8247b823
TBA
6009linux_process_target::pid_to_exec_file (int pid)
6010{
6011 return linux_proc_pid_to_exec_file (pid);
6012}
6013
c9b7b804
TBA
6014bool
6015linux_process_target::supports_multifs ()
6016{
6017 return true;
6018}
6019
6020int
6021linux_process_target::multifs_open (int pid, const char *filename,
6022 int flags, mode_t mode)
6023{
6024 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6025}
6026
6027int
6028linux_process_target::multifs_unlink (int pid, const char *filename)
6029{
6030 return linux_mntns_unlink (pid, filename);
6031}
6032
6033ssize_t
6034linux_process_target::multifs_readlink (int pid, const char *filename,
6035 char *buf, size_t bufsiz)
6036{
6037 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6038}
6039
723b724b 6040#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
6041struct target_loadseg
6042{
6043 /* Core address to which the segment is mapped. */
6044 Elf32_Addr addr;
6045 /* VMA recorded in the program header. */
6046 Elf32_Addr p_vaddr;
6047 /* Size of this segment in memory. */
6048 Elf32_Word p_memsz;
6049};
6050
723b724b 6051# if defined PT_GETDSBT
78d85199
YQ
6052struct target_loadmap
6053{
6054 /* Protocol version number, must be zero. */
6055 Elf32_Word version;
6056 /* Pointer to the DSBT table, its size, and the DSBT index. */
6057 unsigned *dsbt_table;
6058 unsigned dsbt_size, dsbt_index;
6059 /* Number of segments in this map. */
6060 Elf32_Word nsegs;
6061 /* The actual memory map. */
6062 struct target_loadseg segs[/*nsegs*/];
6063};
723b724b
MF
6064# define LINUX_LOADMAP PT_GETDSBT
6065# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6066# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6067# else
6068struct target_loadmap
6069{
6070 /* Protocol version number, must be zero. */
6071 Elf32_Half version;
6072 /* Number of segments in this map. */
6073 Elf32_Half nsegs;
6074 /* The actual memory map. */
6075 struct target_loadseg segs[/*nsegs*/];
6076};
6077# define LINUX_LOADMAP PTRACE_GETFDPIC
6078# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6079# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6080# endif
78d85199 6081
9da41fda
TBA
6082bool
6083linux_process_target::supports_read_loadmap ()
6084{
6085 return true;
6086}
6087
6088int
6089linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6090 unsigned char *myaddr, unsigned int len)
78d85199 6091{
0bfdf32f 6092 int pid = lwpid_of (current_thread);
78d85199
YQ
6093 int addr = -1;
6094 struct target_loadmap *data = NULL;
6095 unsigned int actual_length, copy_length;
6096
6097 if (strcmp (annex, "exec") == 0)
723b724b 6098 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 6099 else if (strcmp (annex, "interp") == 0)
723b724b 6100 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
6101 else
6102 return -1;
6103
723b724b 6104 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
6105 return -1;
6106
6107 if (data == NULL)
6108 return -1;
6109
6110 actual_length = sizeof (struct target_loadmap)
6111 + sizeof (struct target_loadseg) * data->nsegs;
6112
6113 if (offset < 0 || offset > actual_length)
6114 return -1;
6115
6116 copy_length = actual_length - offset < len ? actual_length - offset : len;
6117 memcpy (myaddr, (char *) data + offset, copy_length);
6118 return copy_length;
6119}
723b724b 6120#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 6121
bc8d3ae4
TBA
6122bool
6123linux_process_target::supports_catch_syscall ()
82075af2 6124{
a2885186 6125 return low_supports_catch_syscall ();
82075af2
JS
6126}
6127
9eedd27d
TBA
6128bool
6129linux_process_target::low_supports_catch_syscall ()
6130{
6131 return false;
6132}
6133
770d8f6a
TBA
6134CORE_ADDR
6135linux_process_target::read_pc (regcache *regcache)
219f2f23 6136{
bf9ae9d8 6137 if (!low_supports_breakpoints ())
219f2f23
PA
6138 return 0;
6139
bf9ae9d8 6140 return low_get_pc (regcache);
219f2f23
PA
6141}
6142
770d8f6a
TBA
6143void
6144linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
219f2f23 6145{
bf9ae9d8 6146 gdb_assert (low_supports_breakpoints ());
219f2f23 6147
bf9ae9d8 6148 low_set_pc (regcache, pc);
219f2f23
PA
6149}
6150
68119632
TBA
6151bool
6152linux_process_target::supports_thread_stopped ()
6153{
6154 return true;
6155}
6156
6157bool
6158linux_process_target::thread_stopped (thread_info *thread)
8336d594
PA
6159{
6160 return get_thread_lwp (thread)->stopped;
6161}
6162
ef980d65
PA
6163bool
6164linux_process_target::any_resumed ()
6165{
6166 bool any_resumed;
6167
6168 auto status_pending_p_any = [&] (thread_info *thread)
6169 {
6170 return status_pending_p_callback (thread, minus_one_ptid);
6171 };
6172
6173 auto not_stopped = [&] (thread_info *thread)
6174 {
6175 return not_stopped_callback (thread, minus_one_ptid);
6176 };
6177
6178 /* Find a resumed LWP, if any. */
6179 if (find_thread (status_pending_p_any) != NULL)
6180 any_resumed = 1;
6181 else if (find_thread (not_stopped) != NULL)
6182 any_resumed = 1;
6183 else
6184 any_resumed = 0;
6185
6186 return any_resumed;
6187}
6188
8336d594
PA
6189/* This exposes stop-all-threads functionality to other modules. */
6190
29e8dc09
TBA
6191void
6192linux_process_target::pause_all (bool freeze)
8336d594 6193{
7984d532
PA
6194 stop_all_lwps (freeze, NULL);
6195}
6196
6197/* This exposes unstop-all-threads functionality to other gdbserver
6198 modules. */
6199
29e8dc09
TBA
6200void
6201linux_process_target::unpause_all (bool unfreeze)
7984d532
PA
6202{
6203 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
6204}
6205
2268b414
JK
6206/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6207
6208static int
6209get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6210 CORE_ADDR *phdr_memaddr, int *num_phdr)
6211{
6212 char filename[PATH_MAX];
6213 int fd;
6214 const int auxv_size = is_elf64
6215 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6216 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6217
6218 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6219
6220 fd = open (filename, O_RDONLY);
6221 if (fd < 0)
6222 return 1;
6223
6224 *phdr_memaddr = 0;
6225 *num_phdr = 0;
6226 while (read (fd, buf, auxv_size) == auxv_size
6227 && (*phdr_memaddr == 0 || *num_phdr == 0))
6228 {
6229 if (is_elf64)
6230 {
6231 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6232
6233 switch (aux->a_type)
6234 {
6235 case AT_PHDR:
6236 *phdr_memaddr = aux->a_un.a_val;
6237 break;
6238 case AT_PHNUM:
6239 *num_phdr = aux->a_un.a_val;
6240 break;
6241 }
6242 }
6243 else
6244 {
6245 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6246
6247 switch (aux->a_type)
6248 {
6249 case AT_PHDR:
6250 *phdr_memaddr = aux->a_un.a_val;
6251 break;
6252 case AT_PHNUM:
6253 *num_phdr = aux->a_un.a_val;
6254 break;
6255 }
6256 }
6257 }
6258
6259 close (fd);
6260
6261 if (*phdr_memaddr == 0 || *num_phdr == 0)
6262 {
6263 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6264 "phdr_memaddr = %ld, phdr_num = %d",
6265 (long) *phdr_memaddr, *num_phdr);
6266 return 2;
6267 }
6268
6269 return 0;
6270}
6271
6272/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6273
6274static CORE_ADDR
6275get_dynamic (const int pid, const int is_elf64)
6276{
6277 CORE_ADDR phdr_memaddr, relocation;
db1ff28b 6278 int num_phdr, i;
2268b414 6279 unsigned char *phdr_buf;
db1ff28b 6280 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
2268b414
JK
6281
6282 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6283 return 0;
6284
6285 gdb_assert (num_phdr < 100); /* Basic sanity check. */
224c3ddb 6286 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
2268b414
JK
6287
6288 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6289 return 0;
6290
6291 /* Compute relocation: it is expected to be 0 for "regular" executables,
6292 non-zero for PIE ones. */
6293 relocation = -1;
db1ff28b
JK
6294 for (i = 0; relocation == -1 && i < num_phdr; i++)
6295 if (is_elf64)
6296 {
6297 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6298
6299 if (p->p_type == PT_PHDR)
6300 relocation = phdr_memaddr - p->p_vaddr;
6301 }
6302 else
6303 {
6304 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6305
6306 if (p->p_type == PT_PHDR)
6307 relocation = phdr_memaddr - p->p_vaddr;
6308 }
6309
2268b414
JK
6310 if (relocation == -1)
6311 {
e237a7e2
JK
6312 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6313 any real world executables, including PIE executables, have always
6314 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6315 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6316 or present DT_DEBUG anyway (fpc binaries are statically linked).
6317
6318 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6319
6320 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6321
2268b414
JK
6322 return 0;
6323 }
6324
db1ff28b
JK
6325 for (i = 0; i < num_phdr; i++)
6326 {
6327 if (is_elf64)
6328 {
6329 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6330
6331 if (p->p_type == PT_DYNAMIC)
6332 return p->p_vaddr + relocation;
6333 }
6334 else
6335 {
6336 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
2268b414 6337
db1ff28b
JK
6338 if (p->p_type == PT_DYNAMIC)
6339 return p->p_vaddr + relocation;
6340 }
6341 }
2268b414
JK
6342
6343 return 0;
6344}
6345
6346/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
6347 can be 0 if the inferior does not yet have the library list initialized.
6348 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6349 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
6350
6351static CORE_ADDR
6352get_r_debug (const int pid, const int is_elf64)
6353{
6354 CORE_ADDR dynamic_memaddr;
6355 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6356 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 6357 CORE_ADDR map = -1;
2268b414
JK
6358
6359 dynamic_memaddr = get_dynamic (pid, is_elf64);
6360 if (dynamic_memaddr == 0)
367ba2c2 6361 return map;
2268b414
JK
6362
6363 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6364 {
6365 if (is_elf64)
6366 {
6367 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
a738da3a 6368#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6369 union
6370 {
6371 Elf64_Xword map;
6372 unsigned char buf[sizeof (Elf64_Xword)];
6373 }
6374 rld_map;
a738da3a
MF
6375#endif
6376#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6377 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6378 {
6379 if (linux_read_memory (dyn->d_un.d_val,
6380 rld_map.buf, sizeof (rld_map.buf)) == 0)
6381 return rld_map.map;
6382 else
6383 break;
6384 }
75f62ce7 6385#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6386#ifdef DT_MIPS_RLD_MAP_REL
6387 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6388 {
6389 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6390 rld_map.buf, sizeof (rld_map.buf)) == 0)
6391 return rld_map.map;
6392 else
6393 break;
6394 }
6395#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6396
367ba2c2
MR
6397 if (dyn->d_tag == DT_DEBUG && map == -1)
6398 map = dyn->d_un.d_val;
2268b414
JK
6399
6400 if (dyn->d_tag == DT_NULL)
6401 break;
6402 }
6403 else
6404 {
6405 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
a738da3a 6406#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6407 union
6408 {
6409 Elf32_Word map;
6410 unsigned char buf[sizeof (Elf32_Word)];
6411 }
6412 rld_map;
a738da3a
MF
6413#endif
6414#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6415 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6416 {
6417 if (linux_read_memory (dyn->d_un.d_val,
6418 rld_map.buf, sizeof (rld_map.buf)) == 0)
6419 return rld_map.map;
6420 else
6421 break;
6422 }
75f62ce7 6423#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6424#ifdef DT_MIPS_RLD_MAP_REL
6425 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6426 {
6427 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6428 rld_map.buf, sizeof (rld_map.buf)) == 0)
6429 return rld_map.map;
6430 else
6431 break;
6432 }
6433#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6434
367ba2c2
MR
6435 if (dyn->d_tag == DT_DEBUG && map == -1)
6436 map = dyn->d_un.d_val;
2268b414
JK
6437
6438 if (dyn->d_tag == DT_NULL)
6439 break;
6440 }
6441
6442 dynamic_memaddr += dyn_size;
6443 }
6444
367ba2c2 6445 return map;
2268b414
JK
6446}
6447
6448/* Read one pointer from MEMADDR in the inferior. */
6449
6450static int
6451read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6452{
485f1ee4
PA
6453 int ret;
6454
6455 /* Go through a union so this works on either big or little endian
6456 hosts, when the inferior's pointer size is smaller than the size
6457 of CORE_ADDR. It is assumed the inferior's endianness is the
6458 same of the superior's. */
6459 union
6460 {
6461 CORE_ADDR core_addr;
6462 unsigned int ui;
6463 unsigned char uc;
6464 } addr;
6465
6466 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6467 if (ret == 0)
6468 {
6469 if (ptr_size == sizeof (CORE_ADDR))
6470 *ptr = addr.core_addr;
6471 else if (ptr_size == sizeof (unsigned int))
6472 *ptr = addr.ui;
6473 else
6474 gdb_assert_not_reached ("unhandled pointer size");
6475 }
6476 return ret;
2268b414
JK
6477}
6478
974387bb
TBA
6479bool
6480linux_process_target::supports_qxfer_libraries_svr4 ()
6481{
6482 return true;
6483}
6484
2268b414
JK
6485struct link_map_offsets
6486 {
6487 /* Offset and size of r_debug.r_version. */
6488 int r_version_offset;
6489
6490 /* Offset and size of r_debug.r_map. */
6491 int r_map_offset;
6492
8d56636a
MM
6493 /* Offset of r_debug_extended.r_next. */
6494 int r_next_offset;
6495
2268b414
JK
6496 /* Offset to l_addr field in struct link_map. */
6497 int l_addr_offset;
6498
6499 /* Offset to l_name field in struct link_map. */
6500 int l_name_offset;
6501
6502 /* Offset to l_ld field in struct link_map. */
6503 int l_ld_offset;
6504
6505 /* Offset to l_next field in struct link_map. */
6506 int l_next_offset;
6507
6508 /* Offset to l_prev field in struct link_map. */
6509 int l_prev_offset;
6510 };
6511
8d56636a
MM
6512static const link_map_offsets lmo_32bit_offsets =
6513 {
6514 0, /* r_version offset. */
6515 4, /* r_debug.r_map offset. */
6516 20, /* r_debug_extended.r_next. */
6517 0, /* l_addr offset in link_map. */
6518 4, /* l_name offset in link_map. */
6519 8, /* l_ld offset in link_map. */
6520 12, /* l_next offset in link_map. */
6521 16 /* l_prev offset in link_map. */
6522 };
6523
6524static const link_map_offsets lmo_64bit_offsets =
6525 {
6526 0, /* r_version offset. */
6527 8, /* r_debug.r_map offset. */
6528 40, /* r_debug_extended.r_next. */
6529 0, /* l_addr offset in link_map. */
6530 8, /* l_name offset in link_map. */
6531 16, /* l_ld offset in link_map. */
6532 24, /* l_next offset in link_map. */
6533 32 /* l_prev offset in link_map. */
6534 };
6535
6536/* Get the loaded shared libraries from one namespace. */
6537
6538static void
2733d9d5
MM
6539read_link_map (std::string &document, CORE_ADDR lmid, CORE_ADDR lm_addr,
6540 CORE_ADDR lm_prev, int ptr_size, const link_map_offsets *lmo)
8d56636a
MM
6541{
6542 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6543
6544 while (lm_addr
6545 && read_one_ptr (lm_addr + lmo->l_name_offset,
6546 &l_name, ptr_size) == 0
6547 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6548 &l_addr, ptr_size) == 0
6549 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6550 &l_ld, ptr_size) == 0
6551 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6552 &l_prev, ptr_size) == 0
6553 && read_one_ptr (lm_addr + lmo->l_next_offset,
6554 &l_next, ptr_size) == 0)
6555 {
6556 unsigned char libname[PATH_MAX];
6557
6558 if (lm_prev != l_prev)
6559 {
6560 warning ("Corrupted shared library list: 0x%s != 0x%s",
6561 paddress (lm_prev), paddress (l_prev));
6562 break;
6563 }
6564
ad10f44e
MM
6565 /* Not checking for error because reading may stop before we've got
6566 PATH_MAX worth of characters. */
6567 libname[0] = '\0';
6568 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6569 libname[sizeof (libname) - 1] = '\0';
6570 if (libname[0] != '\0')
8d56636a 6571 {
ad10f44e 6572 string_appendf (document, "<library name=\"");
de75275f 6573 xml_escape_text_append (document, (char *) libname);
ad10f44e 6574 string_appendf (document, "\" lm=\"0x%s\" l_addr=\"0x%s\" "
2733d9d5 6575 "l_ld=\"0x%s\" lmid=\"0x%s\"/>",
ad10f44e 6576 paddress (lm_addr), paddress (l_addr),
2733d9d5 6577 paddress (l_ld), paddress (lmid));
8d56636a
MM
6578 }
6579
6580 lm_prev = lm_addr;
6581 lm_addr = l_next;
6582 }
6583}
6584
fb723180 6585/* Construct qXfer:libraries-svr4:read reply. */
2268b414 6586
974387bb
TBA
6587int
6588linux_process_target::qxfer_libraries_svr4 (const char *annex,
6589 unsigned char *readbuf,
6590 unsigned const char *writebuf,
6591 CORE_ADDR offset, int len)
2268b414 6592{
fe978cb0 6593 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
6594 char filename[PATH_MAX];
6595 int pid, is_elf64;
214d508e 6596 unsigned int machine;
2733d9d5 6597 CORE_ADDR lmid = 0, lm_addr = 0, lm_prev = 0;
2268b414
JK
6598
6599 if (writebuf != NULL)
6600 return -2;
6601 if (readbuf == NULL)
6602 return -1;
6603
0bfdf32f 6604 pid = lwpid_of (current_thread);
2268b414 6605 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 6606 is_elf64 = elf_64_file_p (filename, &machine);
8d56636a
MM
6607 const link_map_offsets *lmo;
6608 int ptr_size;
6609 if (is_elf64)
6610 {
6611 lmo = &lmo_64bit_offsets;
6612 ptr_size = 8;
6613 }
6614 else
6615 {
6616 lmo = &lmo_32bit_offsets;
6617 ptr_size = 4;
6618 }
2268b414 6619
b1fbec62
GB
6620 while (annex[0] != '\0')
6621 {
6622 const char *sep;
6623 CORE_ADDR *addrp;
da4ae14a 6624 int name_len;
2268b414 6625
b1fbec62
GB
6626 sep = strchr (annex, '=');
6627 if (sep == NULL)
6628 break;
0c5bf5a9 6629
da4ae14a 6630 name_len = sep - annex;
2733d9d5
MM
6631 if (name_len == 4 && startswith (annex, "lmid"))
6632 addrp = &lmid;
6633 else if (name_len == 5 && startswith (annex, "start"))
b1fbec62 6634 addrp = &lm_addr;
da4ae14a 6635 else if (name_len == 4 && startswith (annex, "prev"))
b1fbec62
GB
6636 addrp = &lm_prev;
6637 else
6638 {
6639 annex = strchr (sep, ';');
6640 if (annex == NULL)
6641 break;
6642 annex++;
6643 continue;
6644 }
6645
6646 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 6647 }
b1fbec62 6648
8d56636a
MM
6649 std::string document = "<library-list-svr4 version=\"1.0\"";
6650
6651 /* When the starting LM_ADDR is passed in the annex, only traverse that
2733d9d5 6652 namespace, which is assumed to be identified by LMID.
8d56636a
MM
6653
6654 Otherwise, start with R_DEBUG and traverse all namespaces we find. */
6655 if (lm_addr != 0)
ad10f44e
MM
6656 {
6657 document += ">";
2733d9d5 6658 read_link_map (document, lmid, lm_addr, lm_prev, ptr_size, lmo);
ad10f44e 6659 }
8d56636a 6660 else
2268b414 6661 {
8d56636a
MM
6662 if (lm_prev != 0)
6663 warning ("ignoring prev=0x%s without start", paddress (lm_prev));
b1fbec62 6664
2733d9d5
MM
6665 /* We could interpret LMID as 'provide only the libraries for this
6666 namespace' but GDB is currently only providing lmid, start, and
6667 prev, or nothing. */
6668 if (lmid != 0)
6669 warning ("ignoring lmid=0x%s without start", paddress (lmid));
6670
8d56636a
MM
6671 CORE_ADDR r_debug = priv->r_debug;
6672 if (r_debug == 0)
6673 r_debug = priv->r_debug = get_r_debug (pid, is_elf64);
b1fbec62
GB
6674
6675 /* We failed to find DT_DEBUG. Such situation will not change
6676 for this inferior - do not retry it. Report it to GDB as
6677 E01, see for the reasons at the GDB solib-svr4.c side. */
8d56636a 6678 if (r_debug == (CORE_ADDR) -1)
b1fbec62
GB
6679 return -1;
6680
ad10f44e
MM
6681 /* Terminate the header if we end up with an empty list. */
6682 if (r_debug == 0)
6683 document += ">";
6684
8d56636a 6685 while (r_debug != 0)
2268b414 6686 {
8d56636a
MM
6687 int r_version = 0;
6688 if (linux_read_memory (r_debug + lmo->r_version_offset,
b1fbec62 6689 (unsigned char *) &r_version,
8d56636a
MM
6690 sizeof (r_version)) != 0)
6691 {
6692 warning ("unable to read r_version from 0x%s",
6693 paddress (r_debug + lmo->r_version_offset));
6694 break;
6695 }
6696
6697 if (r_version < 1)
b1fbec62
GB
6698 {
6699 warning ("unexpected r_debug version %d", r_version);
8d56636a 6700 break;
b1fbec62 6701 }
8d56636a
MM
6702
6703 if (read_one_ptr (r_debug + lmo->r_map_offset, &lm_addr,
6704 ptr_size) != 0)
b1fbec62 6705 {
8d56636a
MM
6706 warning ("unable to read r_map from 0x%s",
6707 paddress (r_debug + lmo->r_map_offset));
6708 break;
b1fbec62 6709 }
2268b414 6710
ad10f44e
MM
6711 /* We read the entire namespace. */
6712 lm_prev = 0;
6713
6714 /* The first entry corresponds to the main executable unless the
6715 dynamic loader was loaded late by a static executable. But
6716 in such case the main executable does not have PT_DYNAMIC
6717 present and we would not have gotten here. */
6718 if (r_debug == priv->r_debug)
6719 {
6720 if (lm_addr != 0)
6721 string_appendf (document, " main-lm=\"0x%s\">",
6722 paddress (lm_addr));
6723 else
6724 document += ">";
6725
6726 lm_prev = lm_addr;
6727 if (read_one_ptr (lm_addr + lmo->l_next_offset,
6728 &lm_addr, ptr_size) != 0)
6729 {
6730 warning ("unable to read l_next from 0x%s",
6731 paddress (lm_addr + lmo->l_next_offset));
6732 break;
6733 }
6734 }
6735
2733d9d5 6736 read_link_map (document, r_debug, lm_addr, lm_prev, ptr_size, lmo);
b1fbec62 6737
8d56636a
MM
6738 if (r_version < 2)
6739 break;
b1fbec62 6740
8d56636a
MM
6741 if (read_one_ptr (r_debug + lmo->r_next_offset, &r_debug,
6742 ptr_size) != 0)
2268b414 6743 {
8d56636a
MM
6744 warning ("unable to read r_next from 0x%s",
6745 paddress (r_debug + lmo->r_next_offset));
6746 break;
d878444c 6747 }
0afae3cf 6748 }
2268b414
JK
6749 }
6750
ad10f44e 6751 document += "</library-list-svr4>";
b1fbec62 6752
f6e8a41e 6753 int document_len = document.length ();
2268b414
JK
6754 if (offset < document_len)
6755 document_len -= offset;
6756 else
6757 document_len = 0;
6758 if (len > document_len)
6759 len = document_len;
6760
f6e8a41e 6761 memcpy (readbuf, document.data () + offset, len);
2268b414
JK
6762
6763 return len;
6764}
6765
9accd112
MM
6766#ifdef HAVE_LINUX_BTRACE
6767
8263b346
TBA
6768bool
6769linux_process_target::supports_btrace ()
6770{
6771 return true;
6772}
6773
79597bdd 6774btrace_target_info *
696c0d5e 6775linux_process_target::enable_btrace (thread_info *tp,
79597bdd
TBA
6776 const btrace_config *conf)
6777{
696c0d5e 6778 return linux_enable_btrace (tp->id, conf);
79597bdd
TBA
6779}
6780
969c39fb 6781/* See to_disable_btrace target method. */
9accd112 6782
79597bdd
TBA
6783int
6784linux_process_target::disable_btrace (btrace_target_info *tinfo)
969c39fb
MM
6785{
6786 enum btrace_error err;
6787
6788 err = linux_disable_btrace (tinfo);
6789 return (err == BTRACE_ERR_NONE ? 0 : -1);
6790}
6791
bc504a31 6792/* Encode an Intel Processor Trace configuration. */
b20a6524
MM
6793
6794static void
873a185b 6795linux_low_encode_pt_config (std::string *buffer,
b20a6524
MM
6796 const struct btrace_data_pt_config *config)
6797{
873a185b 6798 *buffer += "<pt-config>\n";
b20a6524
MM
6799
6800 switch (config->cpu.vendor)
6801 {
6802 case CV_INTEL:
873a185b
TT
6803 string_xml_appendf (*buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6804 "model=\"%u\" stepping=\"%u\"/>\n",
6805 config->cpu.family, config->cpu.model,
6806 config->cpu.stepping);
b20a6524
MM
6807 break;
6808
6809 default:
6810 break;
6811 }
6812
873a185b 6813 *buffer += "</pt-config>\n";
b20a6524
MM
6814}
6815
6816/* Encode a raw buffer. */
6817
6818static void
873a185b 6819linux_low_encode_raw (std::string *buffer, const gdb_byte *data,
b20a6524
MM
6820 unsigned int size)
6821{
6822 if (size == 0)
6823 return;
6824
268a13a5 6825 /* We use hex encoding - see gdbsupport/rsp-low.h. */
873a185b 6826 *buffer += "<raw>\n";
b20a6524
MM
6827
6828 while (size-- > 0)
6829 {
6830 char elem[2];
6831
6832 elem[0] = tohex ((*data >> 4) & 0xf);
6833 elem[1] = tohex (*data++ & 0xf);
6834
8b2d5ef8 6835 buffer->append (elem, 2);
b20a6524
MM
6836 }
6837
873a185b 6838 *buffer += "</raw>\n";
b20a6524
MM
6839}
6840
969c39fb
MM
6841/* See to_read_btrace target method. */
6842
79597bdd
TBA
6843int
6844linux_process_target::read_btrace (btrace_target_info *tinfo,
873a185b 6845 std::string *buffer,
79597bdd 6846 enum btrace_read_type type)
9accd112 6847{
734b0e4b 6848 struct btrace_data btrace;
969c39fb 6849 enum btrace_error err;
9accd112 6850
969c39fb
MM
6851 err = linux_read_btrace (&btrace, tinfo, type);
6852 if (err != BTRACE_ERR_NONE)
6853 {
6854 if (err == BTRACE_ERR_OVERFLOW)
873a185b 6855 *buffer += "E.Overflow.";
969c39fb 6856 else
873a185b 6857 *buffer += "E.Generic Error.";
969c39fb 6858
8dcc53b3 6859 return -1;
969c39fb 6860 }
9accd112 6861
734b0e4b
MM
6862 switch (btrace.format)
6863 {
6864 case BTRACE_FORMAT_NONE:
873a185b 6865 *buffer += "E.No Trace.";
8dcc53b3 6866 return -1;
734b0e4b
MM
6867
6868 case BTRACE_FORMAT_BTS:
873a185b
TT
6869 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6870 *buffer += "<btrace version=\"1.0\">\n";
9accd112 6871
46f29a9a 6872 for (const btrace_block &block : *btrace.variant.bts.blocks)
873a185b
TT
6873 string_xml_appendf (*buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6874 paddress (block.begin), paddress (block.end));
9accd112 6875
873a185b 6876 *buffer += "</btrace>\n";
734b0e4b
MM
6877 break;
6878
b20a6524 6879 case BTRACE_FORMAT_PT:
873a185b
TT
6880 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6881 *buffer += "<btrace version=\"1.0\">\n";
6882 *buffer += "<pt>\n";
b20a6524
MM
6883
6884 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
9accd112 6885
b20a6524
MM
6886 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6887 btrace.variant.pt.size);
6888
873a185b
TT
6889 *buffer += "</pt>\n";
6890 *buffer += "</btrace>\n";
b20a6524
MM
6891 break;
6892
6893 default:
873a185b 6894 *buffer += "E.Unsupported Trace Format.";
8dcc53b3 6895 return -1;
734b0e4b 6896 }
969c39fb
MM
6897
6898 return 0;
9accd112 6899}
f4abbc16
MM
6900
6901/* See to_btrace_conf target method. */
6902
79597bdd
TBA
6903int
6904linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
873a185b 6905 std::string *buffer)
f4abbc16
MM
6906{
6907 const struct btrace_config *conf;
6908
873a185b
TT
6909 *buffer += "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n";
6910 *buffer += "<btrace-conf version=\"1.0\">\n";
f4abbc16
MM
6911
6912 conf = linux_btrace_conf (tinfo);
6913 if (conf != NULL)
6914 {
6915 switch (conf->format)
6916 {
6917 case BTRACE_FORMAT_NONE:
6918 break;
6919
6920 case BTRACE_FORMAT_BTS:
873a185b
TT
6921 string_xml_appendf (*buffer, "<bts");
6922 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->bts.size);
6923 string_xml_appendf (*buffer, " />\n");
f4abbc16 6924 break;
b20a6524
MM
6925
6926 case BTRACE_FORMAT_PT:
873a185b
TT
6927 string_xml_appendf (*buffer, "<pt");
6928 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->pt.size);
6929 string_xml_appendf (*buffer, "/>\n");
b20a6524 6930 break;
f4abbc16
MM
6931 }
6932 }
6933
873a185b 6934 *buffer += "</btrace-conf>\n";
f4abbc16
MM
6935 return 0;
6936}
9accd112
MM
6937#endif /* HAVE_LINUX_BTRACE */
6938
7b669087
GB
6939/* See nat/linux-nat.h. */
6940
6941ptid_t
6942current_lwp_ptid (void)
6943{
6944 return ptid_of (current_thread);
6945}
6946
7f63b89b
TBA
6947const char *
6948linux_process_target::thread_name (ptid_t thread)
6949{
6950 return linux_proc_tid_get_name (thread);
6951}
6952
6953#if USE_THREAD_DB
6954bool
6955linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
6956 int *handle_len)
6957{
6958 return thread_db_thread_handle (ptid, handle, handle_len);
6959}
6960#endif
6961
7b961964
SM
6962thread_info *
6963linux_process_target::thread_pending_parent (thread_info *thread)
6964{
6965 lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
6966
6967 if (parent == nullptr)
6968 return nullptr;
6969
6970 return get_lwp_thread (parent);
6971}
6972
df5ad102 6973thread_info *
faf44a31
PA
6974linux_process_target::thread_pending_child (thread_info *thread,
6975 target_waitkind *kind)
df5ad102 6976{
faf44a31 6977 lwp_info *child = get_thread_lwp (thread)->pending_child (kind);
df5ad102
SM
6978
6979 if (child == nullptr)
6980 return nullptr;
6981
6982 return get_lwp_thread (child);
6983}
6984
276d4552
YQ
6985/* Default implementation of linux_target_ops method "set_pc" for
6986 32-bit pc register which is literally named "pc". */
6987
6988void
6989linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
6990{
6991 uint32_t newpc = pc;
6992
6993 supply_register_by_name (regcache, "pc", &newpc);
6994}
6995
6996/* Default implementation of linux_target_ops method "get_pc" for
6997 32-bit pc register which is literally named "pc". */
6998
6999CORE_ADDR
7000linux_get_pc_32bit (struct regcache *regcache)
7001{
7002 uint32_t pc;
7003
7004 collect_register_by_name (regcache, "pc", &pc);
c058728c 7005 threads_debug_printf ("stop pc is 0x%" PRIx32, pc);
276d4552
YQ
7006 return pc;
7007}
7008
6f69e520
YQ
7009/* Default implementation of linux_target_ops method "set_pc" for
7010 64-bit pc register which is literally named "pc". */
7011
7012void
7013linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7014{
7015 uint64_t newpc = pc;
7016
7017 supply_register_by_name (regcache, "pc", &newpc);
7018}
7019
7020/* Default implementation of linux_target_ops method "get_pc" for
7021 64-bit pc register which is literally named "pc". */
7022
7023CORE_ADDR
7024linux_get_pc_64bit (struct regcache *regcache)
7025{
7026 uint64_t pc;
7027
7028 collect_register_by_name (regcache, "pc", &pc);
c058728c 7029 threads_debug_printf ("stop pc is 0x%" PRIx64, pc);
6f69e520
YQ
7030 return pc;
7031}
7032
0570503d 7033/* See linux-low.h. */
974c89e0 7034
0570503d 7035int
43e5fbd8 7036linux_get_auxv (int pid, int wordsize, CORE_ADDR match, CORE_ADDR *valp)
974c89e0
AH
7037{
7038 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7039 int offset = 0;
7040
7041 gdb_assert (wordsize == 4 || wordsize == 8);
7042
43e5fbd8
TJB
7043 while (the_target->read_auxv (pid, offset, data, 2 * wordsize)
7044 == 2 * wordsize)
974c89e0
AH
7045 {
7046 if (wordsize == 4)
7047 {
0570503d 7048 uint32_t *data_p = (uint32_t *) data;
974c89e0 7049 if (data_p[0] == match)
0570503d
PFC
7050 {
7051 *valp = data_p[1];
7052 return 1;
7053 }
974c89e0
AH
7054 }
7055 else
7056 {
0570503d 7057 uint64_t *data_p = (uint64_t *) data;
974c89e0 7058 if (data_p[0] == match)
0570503d
PFC
7059 {
7060 *valp = data_p[1];
7061 return 1;
7062 }
974c89e0
AH
7063 }
7064
7065 offset += 2 * wordsize;
7066 }
7067
7068 return 0;
7069}
7070
7071/* See linux-low.h. */
7072
7073CORE_ADDR
43e5fbd8 7074linux_get_hwcap (int pid, int wordsize)
974c89e0 7075{
0570503d 7076 CORE_ADDR hwcap = 0;
43e5fbd8 7077 linux_get_auxv (pid, wordsize, AT_HWCAP, &hwcap);
0570503d 7078 return hwcap;
974c89e0
AH
7079}
7080
7081/* See linux-low.h. */
7082
7083CORE_ADDR
43e5fbd8 7084linux_get_hwcap2 (int pid, int wordsize)
974c89e0 7085{
0570503d 7086 CORE_ADDR hwcap2 = 0;
43e5fbd8 7087 linux_get_auxv (pid, wordsize, AT_HWCAP2, &hwcap2);
0570503d 7088 return hwcap2;
974c89e0 7089}
6f69e520 7090
3aee8918
PA
7091#ifdef HAVE_LINUX_REGSETS
7092void
7093initialize_regsets_info (struct regsets_info *info)
7094{
7095 for (info->num_regsets = 0;
7096 info->regsets[info->num_regsets].size >= 0;
7097 info->num_regsets++)
7098 ;
3aee8918
PA
7099}
7100#endif
7101
da6d8c04
DJ
7102void
7103initialize_low (void)
7104{
bd99dc85 7105 struct sigaction sigchld_action;
dd373349 7106
bd99dc85 7107 memset (&sigchld_action, 0, sizeof (sigchld_action));
ef0478f6 7108 set_target_ops (the_linux_target);
dd373349 7109
aa7c7447 7110 linux_ptrace_init_warnings ();
1b919490 7111 linux_proc_init_warnings ();
bd99dc85
PA
7112
7113 sigchld_action.sa_handler = sigchld_handler;
7114 sigemptyset (&sigchld_action.sa_mask);
7115 sigchld_action.sa_flags = SA_RESTART;
7116 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
7117
7118 initialize_low_arch ();
89245bc0
DB
7119
7120 linux_check_ptrace_features ();
da6d8c04 7121}