]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdbserver/linux-low.cc
stop_all_threads: (re-)enable async before waiting for stops
[thirdparty/binutils-gdb.git] / gdbserver / linux-low.cc
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
213516ef 2 Copyright (C) 1995-2023 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
268a13a5 22#include "gdbsupport/agent.h"
de0d863e 23#include "tdesc.h"
cdc8e9b2
JB
24#include "gdbsupport/event-loop.h"
25#include "gdbsupport/event-pipe.h"
268a13a5
TT
26#include "gdbsupport/rsp-low.h"
27#include "gdbsupport/signals-state-save-restore.h"
96d7229d
LM
28#include "nat/linux-nat.h"
29#include "nat/linux-waitpid.h"
268a13a5 30#include "gdbsupport/gdb_wait.h"
5826e159 31#include "nat/gdb_ptrace.h"
125f8a3d
GB
32#include "nat/linux-ptrace.h"
33#include "nat/linux-procfs.h"
8cc73a39 34#include "nat/linux-personality.h"
da6d8c04
DJ
35#include <signal.h>
36#include <sys/ioctl.h>
37#include <fcntl.h>
0a30fbc4 38#include <unistd.h>
fd500816 39#include <sys/syscall.h>
f9387fc3 40#include <sched.h>
07e059b5
VP
41#include <ctype.h>
42#include <pwd.h>
43#include <sys/types.h>
44#include <dirent.h>
53ce3c39 45#include <sys/stat.h>
efcbbd14 46#include <sys/vfs.h>
1570b33e 47#include <sys/uio.h>
268a13a5 48#include "gdbsupport/filestuff.h"
c144c7a0 49#include "tracepoint.h"
276d4552 50#include <inttypes.h>
268a13a5 51#include "gdbsupport/common-inferior.h"
2090129c 52#include "nat/fork-inferior.h"
268a13a5 53#include "gdbsupport/environ.h"
21987b9c 54#include "gdbsupport/gdb-sigmask.h"
268a13a5 55#include "gdbsupport/scoped_restore.h"
957f3f49
DE
56#ifndef ELFMAG0
57/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
58 then ELFMAG0 will have been defined. If it didn't get included by
59 gdb_proc_service.h then including it will likely introduce a duplicate
60 definition of elf_fpregset_t. */
61#include <elf.h>
62#endif
14d2069a 63#include "nat/linux-namespaces.h"
efcbbd14 64
fd462a61
DJ
65#ifndef O_LARGEFILE
66#define O_LARGEFILE 0
67#endif
1a981360 68
69f4c9cc
AH
69#ifndef AT_HWCAP2
70#define AT_HWCAP2 26
71#endif
72
db0dfaa0
LM
73/* Some targets did not define these ptrace constants from the start,
74 so gdbserver defines them locally here. In the future, these may
75 be removed after they are added to asm/ptrace.h. */
76#if !(defined(PT_TEXT_ADDR) \
77 || defined(PT_DATA_ADDR) \
78 || defined(PT_TEXT_END_ADDR))
79#if defined(__mcoldfire__)
80/* These are still undefined in 3.10 kernels. */
81#define PT_TEXT_ADDR 49*4
82#define PT_DATA_ADDR 50*4
83#define PT_TEXT_END_ADDR 51*4
db0dfaa0
LM
84/* These are still undefined in 3.10 kernels. */
85#elif defined(__TMS320C6X__)
86#define PT_TEXT_ADDR (0x10000*4)
87#define PT_DATA_ADDR (0x10004*4)
88#define PT_TEXT_END_ADDR (0x10008*4)
89#endif
90#endif
91
5203ae1e
TBA
92#if (defined(__UCLIBC__) \
93 && defined(HAS_NOMMU) \
94 && defined(PT_TEXT_ADDR) \
95 && defined(PT_DATA_ADDR) \
96 && defined(PT_TEXT_END_ADDR))
97#define SUPPORTS_READ_OFFSETS
98#endif
99
9accd112 100#ifdef HAVE_LINUX_BTRACE
125f8a3d 101# include "nat/linux-btrace.h"
268a13a5 102# include "gdbsupport/btrace-common.h"
9accd112
MM
103#endif
104
8365dcf5
TJB
105#ifndef HAVE_ELF32_AUXV_T
106/* Copied from glibc's elf.h. */
107typedef struct
108{
109 uint32_t a_type; /* Entry type */
110 union
111 {
112 uint32_t a_val; /* Integer value */
113 /* We use to have pointer elements added here. We cannot do that,
114 though, since it does not work when using 32-bit definitions
115 on 64-bit platforms and vice versa. */
116 } a_un;
117} Elf32_auxv_t;
118#endif
119
120#ifndef HAVE_ELF64_AUXV_T
121/* Copied from glibc's elf.h. */
122typedef struct
123{
124 uint64_t a_type; /* Entry type */
125 union
126 {
127 uint64_t a_val; /* Integer value */
128 /* We use to have pointer elements added here. We cannot do that,
129 though, since it does not work when using 32-bit definitions
130 on 64-bit platforms and vice versa. */
131 } a_un;
132} Elf64_auxv_t;
133#endif
134
ded48a5e
YQ
135/* Does the current host support PTRACE_GETREGSET? */
136int have_ptrace_getregset = -1;
137
8a841a35
PA
138/* Return TRUE if THREAD is the leader thread of the process. */
139
140static bool
141is_leader (thread_info *thread)
142{
143 ptid_t ptid = ptid_of (thread);
144 return ptid.pid () == ptid.lwp ();
145}
146
48989498
PA
147/* Return true if we should report thread exit events to GDB, for
148 THR. */
149
150static bool
151report_exit_events_for (thread_info *thr)
152{
153 client_state &cs = get_client_state ();
154
155 return (cs.report_thread_events
156 || (thr->thread_options & GDB_THREAD_OPTION_EXIT) != 0);
157}
158
cff068da
GB
159/* LWP accessors. */
160
161/* See nat/linux-nat.h. */
162
163ptid_t
164ptid_of_lwp (struct lwp_info *lwp)
165{
166 return ptid_of (get_lwp_thread (lwp));
167}
168
169/* See nat/linux-nat.h. */
170
4b134ca1
GB
171void
172lwp_set_arch_private_info (struct lwp_info *lwp,
173 struct arch_lwp_info *info)
174{
175 lwp->arch_private = info;
176}
177
178/* See nat/linux-nat.h. */
179
180struct arch_lwp_info *
181lwp_arch_private_info (struct lwp_info *lwp)
182{
183 return lwp->arch_private;
184}
185
186/* See nat/linux-nat.h. */
187
cff068da
GB
188int
189lwp_is_stopped (struct lwp_info *lwp)
190{
191 return lwp->stopped;
192}
193
194/* See nat/linux-nat.h. */
195
196enum target_stop_reason
197lwp_stop_reason (struct lwp_info *lwp)
198{
199 return lwp->stop_reason;
200}
201
0e00e962
AA
202/* See nat/linux-nat.h. */
203
204int
205lwp_is_stepping (struct lwp_info *lwp)
206{
207 return lwp->stepping;
208}
209
05044653
PA
210/* A list of all unknown processes which receive stop signals. Some
211 other process will presumably claim each of these as forked
212 children momentarily. */
24a09b5f 213
05044653
PA
214struct simple_pid_list
215{
216 /* The process ID. */
217 int pid;
218
219 /* The status as reported by waitpid. */
220 int status;
221
222 /* Next in chain. */
223 struct simple_pid_list *next;
224};
05c309a8 225static struct simple_pid_list *stopped_pids;
05044653
PA
226
227/* Trivial list manipulation functions to keep track of a list of new
228 stopped processes. */
229
230static void
231add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
232{
8d749320 233 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
05044653
PA
234
235 new_pid->pid = pid;
236 new_pid->status = status;
237 new_pid->next = *listp;
238 *listp = new_pid;
239}
240
241static int
242pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
243{
244 struct simple_pid_list **p;
245
246 for (p = listp; *p != NULL; p = &(*p)->next)
247 if ((*p)->pid == pid)
248 {
249 struct simple_pid_list *next = (*p)->next;
250
251 *statusp = (*p)->status;
252 xfree (*p);
253 *p = next;
254 return 1;
255 }
256 return 0;
257}
24a09b5f 258
bde24c0a
PA
259enum stopping_threads_kind
260 {
261 /* Not stopping threads presently. */
262 NOT_STOPPING_THREADS,
263
264 /* Stopping threads. */
265 STOPPING_THREADS,
266
267 /* Stopping and suspending threads. */
268 STOPPING_AND_SUSPENDING_THREADS
269 };
270
271/* This is set while stop_all_lwps is in effect. */
6bd434d6 272static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
273
274/* FIXME make into a target method? */
24a09b5f 275int using_threads = 1;
24a09b5f 276
fa593d66
PA
277/* True if we're presently stabilizing threads (moving them out of
278 jump pads). */
279static int stabilizing_threads;
280
f50bf8e5 281static void unsuspend_all_lwps (struct lwp_info *except);
95954743 282static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
00db26fa 283static int lwp_is_marked_dead (struct lwp_info *lwp);
d50171e4 284static int kill_lwp (unsigned long lwpid, int signo);
863d01bd 285static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
ece66d65 286static int linux_low_ptrace_options (int attached);
ced2dffb 287static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
d50171e4 288
582511be
PA
289/* When the event-loop is doing a step-over, this points at the thread
290 being stepped. */
6bd434d6 291static ptid_t step_over_bkpt;
582511be 292
bf9ae9d8
TBA
293bool
294linux_process_target::low_supports_breakpoints ()
295{
296 return false;
297}
d50171e4 298
bf9ae9d8
TBA
299CORE_ADDR
300linux_process_target::low_get_pc (regcache *regcache)
301{
302 return 0;
303}
304
305void
306linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
d50171e4 307{
bf9ae9d8 308 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
d50171e4 309}
0d62e5e8 310
7582c77c
TBA
311std::vector<CORE_ADDR>
312linux_process_target::low_get_next_pcs (regcache *regcache)
313{
314 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
315 "implemented");
316}
317
d4807ea2
TBA
318int
319linux_process_target::low_decr_pc_after_break ()
320{
321 return 0;
322}
323
c2d6af84
PA
324/* True if LWP is stopped in its stepping range. */
325
326static int
327lwp_in_step_range (struct lwp_info *lwp)
328{
329 CORE_ADDR pc = lwp->stop_pc;
330
331 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
332}
333
cdc8e9b2
JB
334/* The event pipe registered as a waitable file in the event loop. */
335static event_pipe linux_event_pipe;
bd99dc85
PA
336
337/* True if we're currently in async mode. */
cdc8e9b2 338#define target_is_async_p() (linux_event_pipe.is_open ())
bd99dc85 339
02fc4de7 340static void send_sigstop (struct lwp_info *lwp);
bd99dc85 341
d0722149
DE
342/* Return non-zero if HEADER is a 64-bit ELF file. */
343
344static int
214d508e 345elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 346{
214d508e
L
347 if (header->e_ident[EI_MAG0] == ELFMAG0
348 && header->e_ident[EI_MAG1] == ELFMAG1
349 && header->e_ident[EI_MAG2] == ELFMAG2
350 && header->e_ident[EI_MAG3] == ELFMAG3)
351 {
352 *machine = header->e_machine;
353 return header->e_ident[EI_CLASS] == ELFCLASS64;
354
355 }
356 *machine = EM_NONE;
357 return -1;
d0722149
DE
358}
359
360/* Return non-zero if FILE is a 64-bit ELF file,
361 zero if the file is not a 64-bit ELF file,
362 and -1 if the file is not accessible or doesn't exist. */
363
be07f1a2 364static int
214d508e 365elf_64_file_p (const char *file, unsigned int *machine)
d0722149 366{
957f3f49 367 Elf64_Ehdr header;
d0722149
DE
368 int fd;
369
370 fd = open (file, O_RDONLY);
371 if (fd < 0)
372 return -1;
373
374 if (read (fd, &header, sizeof (header)) != sizeof (header))
375 {
376 close (fd);
377 return 0;
378 }
379 close (fd);
380
214d508e 381 return elf_64_header_p (&header, machine);
d0722149
DE
382}
383
be07f1a2
PA
384/* Accepts an integer PID; Returns true if the executable PID is
385 running is a 64-bit ELF file.. */
386
387int
214d508e 388linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 389{
d8d2a3ee 390 char file[PATH_MAX];
be07f1a2
PA
391
392 sprintf (file, "/proc/%d/exe", pid);
214d508e 393 return elf_64_file_p (file, machine);
be07f1a2
PA
394}
395
fd000fb3
TBA
396void
397linux_process_target::delete_lwp (lwp_info *lwp)
bd99dc85 398{
fa96cb38
PA
399 struct thread_info *thr = get_lwp_thread (lwp);
400
c058728c 401 threads_debug_printf ("deleting %ld", lwpid_of (thr));
fa96cb38
PA
402
403 remove_thread (thr);
466eecee 404
fd000fb3 405 low_delete_thread (lwp->arch_private);
466eecee 406
013e3554 407 delete lwp;
bd99dc85
PA
408}
409
fd000fb3
TBA
410void
411linux_process_target::low_delete_thread (arch_lwp_info *info)
412{
413 /* Default implementation should be overridden if architecture-specific
414 info is being used. */
415 gdb_assert (info == nullptr);
416}
95954743 417
421490af
PA
418/* Open the /proc/PID/mem file for PROC. */
419
420static void
421open_proc_mem_file (process_info *proc)
422{
423 gdb_assert (proc->priv->mem_fd == -1);
424
425 char filename[64];
426 xsnprintf (filename, sizeof filename, "/proc/%d/mem", proc->pid);
427
428 proc->priv->mem_fd
429 = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
430}
431
fd000fb3 432process_info *
421490af 433linux_process_target::add_linux_process_no_mem_file (int pid, int attached)
95954743
PA
434{
435 struct process_info *proc;
436
95954743 437 proc = add_process (pid, attached);
8d749320 438 proc->priv = XCNEW (struct process_info_private);
95954743 439
fd000fb3 440 proc->priv->arch_private = low_new_process ();
421490af
PA
441 proc->priv->mem_fd = -1;
442
443 return proc;
444}
445
aa5ca48f 446
421490af
PA
447process_info *
448linux_process_target::add_linux_process (int pid, int attached)
449{
450 process_info *proc = add_linux_process_no_mem_file (pid, attached);
451 open_proc_mem_file (proc);
95954743
PA
452 return proc;
453}
454
f551c8ef
SM
455void
456linux_process_target::remove_linux_process (process_info *proc)
457{
458 if (proc->priv->mem_fd >= 0)
459 close (proc->priv->mem_fd);
460
461 this->low_delete_process (proc->priv->arch_private);
462
463 xfree (proc->priv);
464 proc->priv = nullptr;
465
466 remove_process (proc);
467}
468
fd000fb3
TBA
469arch_process_info *
470linux_process_target::low_new_process ()
471{
472 return nullptr;
473}
474
475void
476linux_process_target::low_delete_process (arch_process_info *info)
477{
478 /* Default implementation must be overridden if architecture-specific
479 info exists. */
480 gdb_assert (info == nullptr);
481}
482
483void
484linux_process_target::low_new_fork (process_info *parent, process_info *child)
485{
486 /* Nop. */
487}
488
797bcff5
TBA
489void
490linux_process_target::arch_setup_thread (thread_info *thread)
94585166 491{
24583e45
TBA
492 scoped_restore_current_thread restore_thread;
493 switch_to_thread (thread);
94585166 494
797bcff5 495 low_arch_setup ();
94585166
DB
496}
497
d16f3f6c
TBA
498int
499linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
500 int wstat)
24a09b5f 501{
c12a5089 502 client_state &cs = get_client_state ();
94585166 503 struct lwp_info *event_lwp = *orig_event_lwp;
89a5711c 504 int event = linux_ptrace_get_extended_event (wstat);
de0d863e 505 struct thread_info *event_thr = get_lwp_thread (event_lwp);
24a09b5f 506
183be222 507 gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
65706a29 508
82075af2
JS
509 /* All extended events we currently use are mid-syscall. Only
510 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
511 you have to be using PTRACE_SEIZE to get that. */
512 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
513
c269dbdb
DB
514 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
515 || (event == PTRACE_EVENT_CLONE))
24a09b5f
DJ
516 {
517 unsigned long new_pid;
05044653 518 int ret, status;
24a09b5f 519
de0d863e 520 /* Get the pid of the new lwp. */
d86d4aaf 521 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 522 &new_pid);
24a09b5f
DJ
523
524 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 525 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
526 {
527 /* The new child has a pending SIGSTOP. We can't affect it until it
528 hits the SIGSTOP, but we're already attached. */
529
97438e3f 530 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
531
532 if (ret == -1)
533 perror_with_name ("waiting for new child");
534 else if (ret != new_pid)
535 warning ("wait returned unexpected PID %d", ret);
da5898ce 536 else if (!WIFSTOPPED (status))
24a09b5f
DJ
537 warning ("wait returned unexpected status 0x%x", status);
538 }
539
393a6b59 540 if (debug_threads)
de0d863e 541 {
393a6b59
PA
542 debug_printf ("HEW: Got %s event from LWP %ld, new child is %ld\n",
543 (event == PTRACE_EVENT_FORK ? "fork"
544 : event == PTRACE_EVENT_VFORK ? "vfork"
545 : event == PTRACE_EVENT_CLONE ? "clone"
546 : "???"),
547 ptid_of (event_thr).lwp (),
548 new_pid);
549 }
550
551 ptid_t child_ptid = (event != PTRACE_EVENT_CLONE
552 ? ptid_t (new_pid, new_pid)
553 : ptid_t (ptid_of (event_thr).pid (), new_pid));
de0d863e 554
393a6b59
PA
555 lwp_info *child_lwp = add_lwp (child_ptid);
556 gdb_assert (child_lwp != NULL);
557 child_lwp->stopped = 1;
558 if (event != PTRACE_EVENT_CLONE)
559 child_lwp->must_set_ptrace_flags = 1;
560 child_lwp->status_pending_p = 0;
de0d863e 561
393a6b59 562 thread_info *child_thr = get_lwp_thread (child_lwp);
de0d863e 563
393a6b59
PA
564 /* If we're suspending all threads, leave this one suspended
565 too. If the fork/clone parent is stepping over a breakpoint,
566 all other threads have been suspended already. Leave the
567 child suspended too. */
568 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
569 || event_lwp->bp_reinsert != 0)
570 {
571 threads_debug_printf ("leaving child suspended");
572 child_lwp->suspended = 1;
573 }
574
575 if (event_lwp->bp_reinsert != 0
576 && supports_software_single_step ()
577 && event == PTRACE_EVENT_VFORK)
578 {
579 /* If we leave single-step breakpoints there, child will
580 hit it, so uninsert single-step breakpoints from parent
581 (and child). Once vfork child is done, reinsert
582 them back to parent. */
583 uninsert_single_step_breakpoints (event_thr);
584 }
585
586 if (event != PTRACE_EVENT_CLONE)
587 {
de0d863e
DB
588 /* Add the new process to the tables and clone the breakpoint
589 lists of the parent. We need to do this even if the new process
590 will be detached, since we will need the process object and the
591 breakpoints to remove any breakpoints from memory when we
592 detach, and the client side will access registers. */
393a6b59 593 process_info *child_proc = add_linux_process (new_pid, 0);
de0d863e 594 gdb_assert (child_proc != NULL);
863d01bd 595
393a6b59 596 process_info *parent_proc = get_thread_process (event_thr);
de0d863e 597 child_proc->attached = parent_proc->attached;
2e7b624b 598
63c40ec7 599 clone_all_breakpoints (child_thr, event_thr);
de0d863e 600
51a948fd
AB
601 target_desc_up tdesc = allocate_target_description ();
602 copy_target_description (tdesc.get (), parent_proc->tdesc);
603 child_proc->tdesc = tdesc.release ();
de0d863e 604
3a8a0396 605 /* Clone arch-specific process data. */
fd000fb3 606 low_new_fork (parent_proc, child_proc);
393a6b59 607 }
3a8a0396 608
393a6b59
PA
609 /* Save fork/clone info in the parent thread. */
610 if (event == PTRACE_EVENT_FORK)
611 event_lwp->waitstatus.set_forked (child_ptid);
612 else if (event == PTRACE_EVENT_VFORK)
613 event_lwp->waitstatus.set_vforked (child_ptid);
614 else if (event == PTRACE_EVENT_CLONE
615 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
616 event_lwp->waitstatus.set_thread_cloned (child_ptid);
617
618 if (event != PTRACE_EVENT_CLONE
619 || (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
620 {
de0d863e 621 /* The status_pending field contains bits denoting the
393a6b59
PA
622 extended event, so when the pending event is handled, the
623 handler will look at lwp->waitstatus. */
de0d863e
DB
624 event_lwp->status_pending_p = 1;
625 event_lwp->status_pending = wstat;
626
393a6b59
PA
627 /* Link the threads until the parent's event is passed on to
628 GDB. */
629 event_lwp->relative = child_lwp;
630 child_lwp->relative = event_lwp;
de0d863e
DB
631 }
632
393a6b59
PA
633 /* If the parent thread is doing step-over with single-step
634 breakpoints, the list of single-step breakpoints are cloned
635 from the parent's. Remove them from the child process.
636 In case of vfork, we'll reinsert them back once vforked
637 child is done. */
638 if (event_lwp->bp_reinsert != 0
639 && supports_software_single_step ())
640 {
641 /* The child process is forked and stopped, so it is safe
642 to access its memory without stopping all other threads
643 from other processes. */
644 delete_single_step_breakpoints (child_thr);
e27d73f6 645
393a6b59
PA
646 gdb_assert (has_single_step_breakpoints (event_thr));
647 gdb_assert (!has_single_step_breakpoints (child_thr));
648 }
bde24c0a 649
da5898ce
DJ
650 /* Normally we will get the pending SIGSTOP. But in some cases
651 we might get another signal delivered to the group first.
f21cc1a2 652 If we do get another signal, be sure not to lose it. */
20ba1ce6 653 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 654 {
393a6b59
PA
655 child_lwp->stop_expected = 1;
656 child_lwp->status_pending_p = 1;
657 child_lwp->status_pending = status;
da5898ce 658 }
393a6b59 659 else if (event == PTRACE_EVENT_CLONE && cs.report_thread_events)
65706a29 660 {
393a6b59
PA
661 child_lwp->waitstatus.set_thread_created ();
662 child_lwp->status_pending_p = 1;
663 child_lwp->status_pending = status;
65706a29 664 }
de0d863e 665
393a6b59
PA
666 if (event == PTRACE_EVENT_CLONE)
667 {
a0aad537 668#ifdef USE_THREAD_DB
393a6b59 669 thread_db_notice_clone (event_thr, child_ptid);
a0aad537 670#endif
393a6b59 671 }
86299109 672
393a6b59
PA
673 if (event == PTRACE_EVENT_CLONE
674 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) == 0)
675 {
676 threads_debug_printf
677 ("not reporting clone event from LWP %ld, new child is %ld\n",
678 ptid_of (event_thr).lwp (),
679 new_pid);
680 return 1;
681 }
682
683 /* Leave the child stopped until GDB processes the parent
684 event. */
685 child_thr->last_resume_kind = resume_stop;
686 child_thr->last_status.set_stopped (GDB_SIGNAL_0);
687
688 /* Report the event. */
689 threads_debug_printf
690 ("reporting %s event from LWP %ld, new child is %ld\n",
691 (event == PTRACE_EVENT_FORK ? "fork"
692 : event == PTRACE_EVENT_VFORK ? "vfork"
693 : event == PTRACE_EVENT_CLONE ? "clone"
694 : "???"),
695 ptid_of (event_thr).lwp (),
696 new_pid);
697 return 0;
24a09b5f 698 }
c269dbdb
DB
699 else if (event == PTRACE_EVENT_VFORK_DONE)
700 {
183be222 701 event_lwp->waitstatus.set_vfork_done ();
c269dbdb 702
7582c77c 703 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
2e7b624b 704 {
3b9a79ef 705 reinsert_single_step_breakpoints (event_thr);
2e7b624b 706
3b9a79ef 707 gdb_assert (has_single_step_breakpoints (event_thr));
2e7b624b
YQ
708 }
709
c269dbdb
DB
710 /* Report the event. */
711 return 0;
712 }
c12a5089 713 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
94585166
DB
714 {
715 struct process_info *proc;
f27866ba 716 std::vector<int> syscalls_to_catch;
94585166
DB
717 ptid_t event_ptid;
718 pid_t event_pid;
719
c058728c
SM
720 threads_debug_printf ("Got exec event from LWP %ld",
721 lwpid_of (event_thr));
94585166
DB
722
723 /* Get the event ptid. */
724 event_ptid = ptid_of (event_thr);
e99b03dc 725 event_pid = event_ptid.pid ();
94585166 726
82075af2 727 /* Save the syscall list from the execing process. */
94585166 728 proc = get_thread_process (event_thr);
f27866ba 729 syscalls_to_catch = std::move (proc->syscalls_to_catch);
82075af2
JS
730
731 /* Delete the execing process and all its threads. */
d16f3f6c 732 mourn (proc);
24583e45 733 switch_to_thread (nullptr);
94585166
DB
734
735 /* Create a new process/lwp/thread. */
fd000fb3 736 proc = add_linux_process (event_pid, 0);
94585166
DB
737 event_lwp = add_lwp (event_ptid);
738 event_thr = get_lwp_thread (event_lwp);
739 gdb_assert (current_thread == event_thr);
797bcff5 740 arch_setup_thread (event_thr);
94585166
DB
741
742 /* Set the event status. */
183be222
SM
743 event_lwp->waitstatus.set_execd
744 (make_unique_xstrdup
745 (linux_proc_pid_to_exec_file (lwpid_of (event_thr))));
94585166
DB
746
747 /* Mark the exec status as pending. */
748 event_lwp->stopped = 1;
749 event_lwp->status_pending_p = 1;
750 event_lwp->status_pending = wstat;
751 event_thr->last_resume_kind = resume_continue;
183be222 752 event_thr->last_status.set_ignore ();
94585166 753
82075af2
JS
754 /* Update syscall state in the new lwp, effectively mid-syscall too. */
755 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
756
757 /* Restore the list to catch. Don't rely on the client, which is free
758 to avoid sending a new list when the architecture doesn't change.
759 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
f27866ba 760 proc->syscalls_to_catch = std::move (syscalls_to_catch);
82075af2 761
94585166
DB
762 /* Report the event. */
763 *orig_event_lwp = event_lwp;
764 return 0;
765 }
de0d863e 766
f34652de 767 internal_error (_("unknown ptrace event %d"), event);
24a09b5f
DJ
768}
769
df95181f
TBA
770CORE_ADDR
771linux_process_target::get_pc (lwp_info *lwp)
d50171e4 772{
a9deee17
PA
773 process_info *proc = get_thread_process (get_lwp_thread (lwp));
774 gdb_assert (!proc->starting_up);
d50171e4 775
bf9ae9d8 776 if (!low_supports_breakpoints ())
d50171e4
PA
777 return 0;
778
24583e45
TBA
779 scoped_restore_current_thread restore_thread;
780 switch_to_thread (get_lwp_thread (lwp));
d50171e4 781
a9deee17
PA
782 struct regcache *regcache = get_thread_regcache (current_thread, 1);
783 CORE_ADDR pc = low_get_pc (regcache);
d50171e4 784
c058728c 785 threads_debug_printf ("pc is 0x%lx", (long) pc);
d50171e4 786
d50171e4
PA
787 return pc;
788}
789
9eedd27d
TBA
790void
791linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
82075af2 792{
82075af2
JS
793 struct regcache *regcache;
794
24583e45
TBA
795 scoped_restore_current_thread restore_thread;
796 switch_to_thread (get_lwp_thread (lwp));
82075af2
JS
797
798 regcache = get_thread_regcache (current_thread, 1);
9eedd27d 799 low_get_syscall_trapinfo (regcache, sysno);
82075af2 800
c058728c 801 threads_debug_printf ("get_syscall_trapinfo sysno %d", *sysno);
82075af2
JS
802}
803
9eedd27d
TBA
804void
805linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
806{
807 /* By default, report an unknown system call number. */
808 *sysno = UNKNOWN_SYSCALL;
809}
810
df95181f
TBA
811bool
812linux_process_target::save_stop_reason (lwp_info *lwp)
0d62e5e8 813{
582511be
PA
814 CORE_ADDR pc;
815 CORE_ADDR sw_breakpoint_pc;
3e572f71
PA
816#if USE_SIGTRAP_SIGINFO
817 siginfo_t siginfo;
818#endif
d50171e4 819
bf9ae9d8 820 if (!low_supports_breakpoints ())
df95181f 821 return false;
0d62e5e8 822
a9deee17
PA
823 process_info *proc = get_thread_process (get_lwp_thread (lwp));
824 if (proc->starting_up)
825 {
826 /* Claim we have the stop PC so that the caller doesn't try to
827 fetch it itself. */
828 return true;
829 }
830
582511be 831 pc = get_pc (lwp);
d4807ea2 832 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
d50171e4 833
582511be 834 /* breakpoint_at reads from the current thread. */
24583e45
TBA
835 scoped_restore_current_thread restore_thread;
836 switch_to_thread (get_lwp_thread (lwp));
47c0c975 837
3e572f71
PA
838#if USE_SIGTRAP_SIGINFO
839 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
840 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
841 {
842 if (siginfo.si_signo == SIGTRAP)
843 {
e7ad2f14
PA
844 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
845 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 846 {
e7ad2f14
PA
847 /* The si_code is ambiguous on this arch -- check debug
848 registers. */
849 if (!check_stopped_by_watchpoint (lwp))
850 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
851 }
852 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
853 {
854 /* If we determine the LWP stopped for a SW breakpoint,
855 trust it. Particularly don't check watchpoint
856 registers, because at least on s390, we'd find
857 stopped-by-watchpoint as long as there's a watchpoint
858 set. */
3e572f71 859 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
3e572f71 860 }
e7ad2f14 861 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 862 {
e7ad2f14
PA
863 /* This can indicate either a hardware breakpoint or
864 hardware watchpoint. Check debug registers. */
865 if (!check_stopped_by_watchpoint (lwp))
866 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
3e572f71 867 }
2bf6fb9d
PA
868 else if (siginfo.si_code == TRAP_TRACE)
869 {
e7ad2f14
PA
870 /* We may have single stepped an instruction that
871 triggered a watchpoint. In that case, on some
872 architectures (such as x86), instead of TRAP_HWBKPT,
873 si_code indicates TRAP_TRACE, and we need to check
874 the debug registers separately. */
875 if (!check_stopped_by_watchpoint (lwp))
876 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
2bf6fb9d 877 }
3e572f71
PA
878 }
879 }
880#else
582511be
PA
881 /* We may have just stepped a breakpoint instruction. E.g., in
882 non-stop mode, GDB first tells the thread A to step a range, and
883 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
884 case we need to report the breakpoint PC. */
885 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
d7146cda 886 && low_breakpoint_at (sw_breakpoint_pc))
e7ad2f14
PA
887 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
888
889 if (hardware_breakpoint_inserted_here (pc))
890 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
891
892 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
893 check_stopped_by_watchpoint (lwp);
894#endif
895
896 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
582511be 897 {
c058728c
SM
898 threads_debug_printf
899 ("%s stopped by software breakpoint",
900 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
582511be
PA
901
902 /* Back up the PC if necessary. */
903 if (pc != sw_breakpoint_pc)
e7ad2f14 904 {
582511be
PA
905 struct regcache *regcache
906 = get_thread_regcache (current_thread, 1);
bf9ae9d8 907 low_set_pc (regcache, sw_breakpoint_pc);
582511be
PA
908 }
909
e7ad2f14
PA
910 /* Update this so we record the correct stop PC below. */
911 pc = sw_breakpoint_pc;
582511be 912 }
e7ad2f14 913 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
c058728c
SM
914 threads_debug_printf
915 ("%s stopped by hardware breakpoint",
916 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14 917 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
c058728c
SM
918 threads_debug_printf
919 ("%s stopped by hardware watchpoint",
920 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14 921 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
c058728c
SM
922 threads_debug_printf
923 ("%s stopped by trace",
924 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14
PA
925
926 lwp->stop_pc = pc;
df95181f 927 return true;
0d62e5e8 928}
ce3a066d 929
fd000fb3
TBA
930lwp_info *
931linux_process_target::add_lwp (ptid_t ptid)
611cb4a5 932{
c360a473 933 lwp_info *lwp = new lwp_info;
0d62e5e8 934
754e3168
AH
935 lwp->thread = add_thread (ptid, lwp);
936
fd000fb3 937 low_new_thread (lwp);
aa5ca48f 938
54a0b537 939 return lwp;
0d62e5e8 940}
611cb4a5 941
fd000fb3
TBA
942void
943linux_process_target::low_new_thread (lwp_info *info)
944{
945 /* Nop. */
946}
947
2090129c
SDJ
948/* Callback to be used when calling fork_inferior, responsible for
949 actually initiating the tracing of the inferior. */
950
951static void
952linux_ptrace_fun ()
953{
954 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
955 (PTRACE_TYPE_ARG4) 0) < 0)
50fa3001 956 trace_start_error_with_name ("ptrace");
2090129c
SDJ
957
958 if (setpgid (0, 0) < 0)
959 trace_start_error_with_name ("setpgid");
960
961 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
962 stdout to stderr so that inferior i/o doesn't corrupt the connection.
963 Also, redirect stdin to /dev/null. */
964 if (remote_connection_is_stdio ())
965 {
966 if (close (0) < 0)
967 trace_start_error_with_name ("close");
968 if (open ("/dev/null", O_RDONLY) < 0)
969 trace_start_error_with_name ("open");
970 if (dup2 (2, 1) < 0)
971 trace_start_error_with_name ("dup2");
972 if (write (2, "stdin/stdout redirected\n",
973 sizeof ("stdin/stdout redirected\n") - 1) < 0)
974 {
975 /* Errors ignored. */;
976 }
977 }
978}
979
da6d8c04 980/* Start an inferior process and returns its pid.
2090129c
SDJ
981 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
982 are its arguments. */
da6d8c04 983
15295543
TBA
984int
985linux_process_target::create_inferior (const char *program,
986 const std::vector<char *> &program_args)
da6d8c04 987{
c12a5089 988 client_state &cs = get_client_state ();
a6dbe5df 989 struct lwp_info *new_lwp;
da6d8c04 990 int pid;
95954743 991 ptid_t ptid;
03583c20 992
41272101
TT
993 {
994 maybe_disable_address_space_randomization restore_personality
c12a5089 995 (cs.disable_randomization);
bea571eb 996 std::string str_program_args = construct_inferior_arguments (program_args);
41272101
TT
997
998 pid = fork_inferior (program,
999 str_program_args.c_str (),
1000 get_environ ()->envp (), linux_ptrace_fun,
1001 NULL, NULL, NULL, NULL);
1002 }
03583c20 1003
421490af
PA
1004 /* When spawning a new process, we can't open the mem file yet. We
1005 still have to nurse the process through the shell, and that execs
1006 a couple times. The address space a /proc/PID/mem file is
1007 accessing is destroyed on exec. */
1008 process_info *proc = add_linux_process_no_mem_file (pid, 0);
95954743 1009
184ea2f7 1010 ptid = ptid_t (pid, pid);
95954743 1011 new_lwp = add_lwp (ptid);
a6dbe5df 1012 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 1013
2090129c
SDJ
1014 post_fork_inferior (pid, program);
1015
421490af
PA
1016 /* PROC is now past the shell running the program we want, so we can
1017 open the /proc/PID/mem file. */
1018 open_proc_mem_file (proc);
1019
a9fa9f7d 1020 return pid;
da6d8c04
DJ
1021}
1022
ece66d65
JS
1023/* Implement the post_create_inferior target_ops method. */
1024
6dee9afb
TBA
1025void
1026linux_process_target::post_create_inferior ()
ece66d65
JS
1027{
1028 struct lwp_info *lwp = get_thread_lwp (current_thread);
1029
797bcff5 1030 low_arch_setup ();
ece66d65
JS
1031
1032 if (lwp->must_set_ptrace_flags)
1033 {
1034 struct process_info *proc = current_process ();
1035 int options = linux_low_ptrace_options (proc->attached);
1036
1037 linux_enable_event_reporting (lwpid_of (current_thread), options);
1038 lwp->must_set_ptrace_flags = 0;
1039 }
1040}
1041
7ae1a6a6 1042int
fd000fb3 1043linux_process_target::attach_lwp (ptid_t ptid)
da6d8c04 1044{
54a0b537 1045 struct lwp_info *new_lwp;
e38504b3 1046 int lwpid = ptid.lwp ();
611cb4a5 1047
b8e1b30e 1048 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 1049 != 0)
7ae1a6a6 1050 return errno;
24a09b5f 1051
b3312d80 1052 new_lwp = add_lwp (ptid);
0d62e5e8 1053
a6dbe5df
PA
1054 /* We need to wait for SIGSTOP before being able to make the next
1055 ptrace call on this LWP. */
1056 new_lwp->must_set_ptrace_flags = 1;
1057
644cebc9 1058 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2 1059 {
c058728c 1060 threads_debug_printf ("Attached to a stopped process");
c14d7ab2
PA
1061
1062 /* The process is definitely stopped. It is in a job control
1063 stop, unless the kernel predates the TASK_STOPPED /
1064 TASK_TRACED distinction, in which case it might be in a
1065 ptrace stop. Make sure it is in a ptrace stop; from there we
1066 can kill it, signal it, et cetera.
1067
1068 First make sure there is a pending SIGSTOP. Since we are
1069 already attached, the process can not transition from stopped
1070 to running without a PTRACE_CONT; so we know this signal will
1071 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1072 probably already in the queue (unless this kernel is old
1073 enough to use TASK_STOPPED for ptrace stops); but since
1074 SIGSTOP is not an RT signal, it can only be queued once. */
1075 kill_lwp (lwpid, SIGSTOP);
1076
1077 /* Finally, resume the stopped process. This will deliver the
1078 SIGSTOP (or a higher priority signal, just like normal
1079 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 1080 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
1081 }
1082
0d62e5e8 1083 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
1084 brings it to a halt.
1085
1086 There are several cases to consider here:
1087
1088 1) gdbserver has already attached to the process and is being notified
1b3f6016 1089 of a new thread that is being created.
d50171e4
PA
1090 In this case we should ignore that SIGSTOP and resume the
1091 process. This is handled below by setting stop_expected = 1,
8336d594 1092 and the fact that add_thread sets last_resume_kind ==
d50171e4 1093 resume_continue.
0e21c1ec
DE
1094
1095 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
1096 to it via attach_inferior.
1097 In this case we want the process thread to stop.
d50171e4
PA
1098 This is handled by having linux_attach set last_resume_kind ==
1099 resume_stop after we return.
e3deef73
LM
1100
1101 If the pid we are attaching to is also the tgid, we attach to and
1102 stop all the existing threads. Otherwise, we attach to pid and
1103 ignore any other threads in the same group as this pid.
0e21c1ec
DE
1104
1105 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
1106 existing threads.
1107 In this case we want the thread to stop.
1108 FIXME: This case is currently not properly handled.
1109 We should wait for the SIGSTOP but don't. Things work apparently
1110 because enough time passes between when we ptrace (ATTACH) and when
1111 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
1112
1113 On the other hand, if we are currently trying to stop all threads, we
1114 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 1115 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
1116 end of the list, and so the new thread has not yet reached
1117 wait_for_sigstop (but will). */
d50171e4 1118 new_lwp->stop_expected = 1;
0d62e5e8 1119
7ae1a6a6 1120 return 0;
95954743
PA
1121}
1122
8784d563
PA
1123/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1124 already attached. Returns true if a new LWP is found, false
1125 otherwise. */
1126
1127static int
1128attach_proc_task_lwp_callback (ptid_t ptid)
1129{
1130 /* Is this a new thread? */
1131 if (find_thread_ptid (ptid) == NULL)
1132 {
e38504b3 1133 int lwpid = ptid.lwp ();
8784d563
PA
1134 int err;
1135
c058728c 1136 threads_debug_printf ("Found new lwp %d", lwpid);
8784d563 1137
fd000fb3 1138 err = the_linux_target->attach_lwp (ptid);
8784d563
PA
1139
1140 /* Be quiet if we simply raced with the thread exiting. EPERM
1141 is returned if the thread's task still exists, and is marked
1142 as exited or zombie, as well as other conditions, so in that
1143 case, confirm the status in /proc/PID/status. */
1144 if (err == ESRCH
1145 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
c058728c
SM
1146 threads_debug_printf
1147 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1148 lwpid, err, safe_strerror (err));
8784d563
PA
1149 else if (err != 0)
1150 {
4d9b86e1 1151 std::string reason
50fa3001 1152 = linux_ptrace_attach_fail_reason_string (ptid, err);
4d9b86e1
SM
1153
1154 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
8784d563
PA
1155 }
1156
1157 return 1;
1158 }
1159 return 0;
1160}
1161
500c1d85
PA
1162static void async_file_mark (void);
1163
e3deef73
LM
1164/* Attach to PID. If PID is the tgid, attach to it and all
1165 of its threads. */
1166
ef03dad8
TBA
1167int
1168linux_process_target::attach (unsigned long pid)
0d62e5e8 1169{
500c1d85
PA
1170 struct process_info *proc;
1171 struct thread_info *initial_thread;
184ea2f7 1172 ptid_t ptid = ptid_t (pid, pid);
7ae1a6a6
PA
1173 int err;
1174
421490af
PA
1175 /* Delay opening the /proc/PID/mem file until we've successfully
1176 attached. */
1177 proc = add_linux_process_no_mem_file (pid, 1);
df0da8a2 1178
e3deef73
LM
1179 /* Attach to PID. We will check for other threads
1180 soon. */
fd000fb3 1181 err = attach_lwp (ptid);
7ae1a6a6 1182 if (err != 0)
4d9b86e1 1183 {
f551c8ef 1184 this->remove_linux_process (proc);
4d9b86e1 1185
50fa3001
SDJ
1186 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1187 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
4d9b86e1 1188 }
7ae1a6a6 1189
421490af
PA
1190 open_proc_mem_file (proc);
1191
500c1d85
PA
1192 /* Don't ignore the initial SIGSTOP if we just attached to this
1193 process. It will be collected by wait shortly. */
184ea2f7 1194 initial_thread = find_thread_ptid (ptid_t (pid, pid));
59487af3 1195 gdb_assert (initial_thread != nullptr);
500c1d85 1196 initial_thread->last_resume_kind = resume_stop;
0d62e5e8 1197
8784d563
PA
1198 /* We must attach to every LWP. If /proc is mounted, use that to
1199 find them now. On the one hand, the inferior may be using raw
1200 clone instead of using pthreads. On the other hand, even if it
1201 is using pthreads, GDB may not be connected yet (thread_db needs
1202 to do symbol lookups, through qSymbol). Also, thread_db walks
1203 structures in the inferior's address space to find the list of
1204 threads/LWPs, and those structures may well be corrupted. Note
1205 that once thread_db is loaded, we'll still use it to list threads
1206 and associate pthread info with each LWP. */
1207 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
500c1d85
PA
1208
1209 /* GDB will shortly read the xml target description for this
1210 process, to figure out the process' architecture. But the target
1211 description is only filled in when the first process/thread in
1212 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1213 that now, otherwise, if GDB is fast enough, it could read the
1214 target description _before_ that initial stop. */
1215 if (non_stop)
1216 {
1217 struct lwp_info *lwp;
1218 int wstat, lwpid;
f2907e49 1219 ptid_t pid_ptid = ptid_t (pid);
500c1d85 1220
d16f3f6c 1221 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
500c1d85
PA
1222 gdb_assert (lwpid > 0);
1223
f2907e49 1224 lwp = find_lwp_pid (ptid_t (lwpid));
59487af3 1225 gdb_assert (lwp != nullptr);
500c1d85
PA
1226
1227 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1228 {
1229 lwp->status_pending_p = 1;
1230 lwp->status_pending = wstat;
1231 }
1232
1233 initial_thread->last_resume_kind = resume_continue;
1234
1235 async_file_mark ();
1236
1237 gdb_assert (proc->tdesc != NULL);
1238 }
1239
95954743
PA
1240 return 0;
1241}
1242
95954743 1243static int
e4eb0dec 1244last_thread_of_process_p (int pid)
95954743 1245{
e4eb0dec 1246 bool seen_one = false;
95954743 1247
da4ae14a 1248 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
95954743 1249 {
e4eb0dec
SM
1250 if (!seen_one)
1251 {
1252 /* This is the first thread of this process we see. */
1253 seen_one = true;
1254 return false;
1255 }
1256 else
1257 {
1258 /* This is the second thread of this process we see. */
1259 return true;
1260 }
1261 });
da6d8c04 1262
e4eb0dec 1263 return thread == NULL;
95954743
PA
1264}
1265
da84f473
PA
1266/* Kill LWP. */
1267
1268static void
1269linux_kill_one_lwp (struct lwp_info *lwp)
1270{
d86d4aaf
DE
1271 struct thread_info *thr = get_lwp_thread (lwp);
1272 int pid = lwpid_of (thr);
da84f473
PA
1273
1274 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1275 there is no signal context, and ptrace(PTRACE_KILL) (or
1276 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1277 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1278 alternative is to kill with SIGKILL. We only need one SIGKILL
1279 per process, not one for each thread. But since we still support
4a6ed09b
PA
1280 support debugging programs using raw clone without CLONE_THREAD,
1281 we send one for each thread. For years, we used PTRACE_KILL
1282 only, so we're being a bit paranoid about some old kernels where
1283 PTRACE_KILL might work better (dubious if there are any such, but
1284 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1285 second, and so we're fine everywhere. */
da84f473
PA
1286
1287 errno = 0;
69ff6be5 1288 kill_lwp (pid, SIGKILL);
da84f473 1289 if (debug_threads)
ce9e3fe7
PA
1290 {
1291 int save_errno = errno;
1292
c058728c
SM
1293 threads_debug_printf ("kill_lwp (SIGKILL) %s, 0, 0 (%s)",
1294 target_pid_to_str (ptid_of (thr)).c_str (),
1295 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1296 }
da84f473
PA
1297
1298 errno = 0;
b8e1b30e 1299 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 1300 if (debug_threads)
ce9e3fe7
PA
1301 {
1302 int save_errno = errno;
1303
c058728c
SM
1304 threads_debug_printf ("PTRACE_KILL %s, 0, 0 (%s)",
1305 target_pid_to_str (ptid_of (thr)).c_str (),
1306 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1307 }
da84f473
PA
1308}
1309
e76126e8
PA
1310/* Kill LWP and wait for it to die. */
1311
1312static void
1313kill_wait_lwp (struct lwp_info *lwp)
1314{
1315 struct thread_info *thr = get_lwp_thread (lwp);
e99b03dc 1316 int pid = ptid_of (thr).pid ();
e38504b3 1317 int lwpid = ptid_of (thr).lwp ();
e76126e8
PA
1318 int wstat;
1319 int res;
1320
c058728c 1321 threads_debug_printf ("killing lwp %d, for pid: %d", lwpid, pid);
e76126e8
PA
1322
1323 do
1324 {
1325 linux_kill_one_lwp (lwp);
1326
1327 /* Make sure it died. Notes:
1328
1329 - The loop is most likely unnecessary.
1330
d16f3f6c 1331 - We don't use wait_for_event as that could delete lwps
e76126e8
PA
1332 while we're iterating over them. We're not interested in
1333 any pending status at this point, only in making sure all
1334 wait status on the kernel side are collected until the
1335 process is reaped.
1336
1337 - We don't use __WALL here as the __WALL emulation relies on
1338 SIGCHLD, and killing a stopped process doesn't generate
1339 one, nor an exit status.
1340 */
1341 res = my_waitpid (lwpid, &wstat, 0);
1342 if (res == -1 && errno == ECHILD)
1343 res = my_waitpid (lwpid, &wstat, __WCLONE);
1344 } while (res > 0 && WIFSTOPPED (wstat));
1345
586b02a9
PA
1346 /* Even if it was stopped, the child may have already disappeared.
1347 E.g., if it was killed by SIGKILL. */
1348 if (res < 0 && errno != ECHILD)
1349 perror_with_name ("kill_wait_lwp");
e76126e8
PA
1350}
1351
578290ec 1352/* Callback for `for_each_thread'. Kills an lwp of a given process,
da84f473 1353 except the leader. */
95954743 1354
578290ec
SM
1355static void
1356kill_one_lwp_callback (thread_info *thread, int pid)
da6d8c04 1357{
54a0b537 1358 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 1359
fd500816
DJ
1360 /* We avoid killing the first thread here, because of a Linux kernel (at
1361 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1362 the children get a chance to be reaped, it will remain a zombie
1363 forever. */
95954743 1364
d86d4aaf 1365 if (lwpid_of (thread) == pid)
95954743 1366 {
c058728c
SM
1367 threads_debug_printf ("is last of process %s",
1368 target_pid_to_str (thread->id).c_str ());
578290ec 1369 return;
95954743 1370 }
fd500816 1371
e76126e8 1372 kill_wait_lwp (lwp);
da6d8c04
DJ
1373}
1374
c6885a57
TBA
1375int
1376linux_process_target::kill (process_info *process)
0d62e5e8 1377{
a780ef4f 1378 int pid = process->pid;
9d606399 1379
f9e39928
PA
1380 /* If we're killing a running inferior, make sure it is stopped
1381 first, as PTRACE_KILL will not work otherwise. */
7984d532 1382 stop_all_lwps (0, NULL);
f9e39928 1383
578290ec
SM
1384 for_each_thread (pid, [&] (thread_info *thread)
1385 {
1386 kill_one_lwp_callback (thread, pid);
1387 });
fd500816 1388
54a0b537 1389 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1390 thread in the list, so do so now. */
a780ef4f 1391 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
bd99dc85 1392
784867a5 1393 if (lwp == NULL)
c058728c 1394 threads_debug_printf ("cannot find lwp for pid: %d", pid);
784867a5 1395 else
e76126e8 1396 kill_wait_lwp (lwp);
2d717e4f 1397
8adb37b9 1398 mourn (process);
f9e39928
PA
1399
1400 /* Since we presently can only stop all lwps of all processes, we
1401 need to unstop lwps of other processes. */
7984d532 1402 unstop_all_lwps (0, NULL);
95954743 1403 return 0;
0d62e5e8
DJ
1404}
1405
9b224c5e
PA
1406/* Get pending signal of THREAD, for detaching purposes. This is the
1407 signal the thread last stopped for, which we need to deliver to the
1408 thread when detaching, otherwise, it'd be suppressed/lost. */
1409
1410static int
1411get_detach_signal (struct thread_info *thread)
1412{
c12a5089 1413 client_state &cs = get_client_state ();
a493e3e2 1414 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1415 int status;
1416 struct lwp_info *lp = get_thread_lwp (thread);
1417
1418 if (lp->status_pending_p)
1419 status = lp->status_pending;
1420 else
1421 {
1422 /* If the thread had been suspended by gdbserver, and it stopped
1423 cleanly, then it'll have stopped with SIGSTOP. But we don't
1424 want to deliver that SIGSTOP. */
183be222
SM
1425 if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1426 || thread->last_status.sig () == GDB_SIGNAL_0)
9b224c5e
PA
1427 return 0;
1428
1429 /* Otherwise, we may need to deliver the signal we
1430 intercepted. */
1431 status = lp->last_status;
1432 }
1433
1434 if (!WIFSTOPPED (status))
1435 {
c058728c
SM
1436 threads_debug_printf ("lwp %s hasn't stopped: no pending signal",
1437 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e
PA
1438 return 0;
1439 }
1440
1441 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1442 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e 1443 {
c058728c
SM
1444 threads_debug_printf ("lwp %s had stopped with extended "
1445 "status: no pending signal",
1446 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e
PA
1447 return 0;
1448 }
1449
2ea28649 1450 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e 1451
c12a5089 1452 if (cs.program_signals_p && !cs.program_signals[signo])
9b224c5e 1453 {
c058728c
SM
1454 threads_debug_printf ("lwp %s had signal %s, but it is in nopass state",
1455 target_pid_to_str (ptid_of (thread)).c_str (),
1456 gdb_signal_to_string (signo));
9b224c5e
PA
1457 return 0;
1458 }
c12a5089 1459 else if (!cs.program_signals_p
9b224c5e
PA
1460 /* If we have no way to know which signals GDB does not
1461 want to have passed to the program, assume
1462 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1463 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e 1464 {
c058728c
SM
1465 threads_debug_printf ("lwp %s had signal %s, "
1466 "but we don't know if we should pass it. "
1467 "Default to not.",
1468 target_pid_to_str (ptid_of (thread)).c_str (),
1469 gdb_signal_to_string (signo));
9b224c5e
PA
1470 return 0;
1471 }
1472 else
1473 {
c058728c
SM
1474 threads_debug_printf ("lwp %s has pending signal %s: delivering it",
1475 target_pid_to_str (ptid_of (thread)).c_str (),
1476 gdb_signal_to_string (signo));
9b224c5e
PA
1477
1478 return WSTOPSIG (status);
1479 }
1480}
1481
fd000fb3
TBA
1482void
1483linux_process_target::detach_one_lwp (lwp_info *lwp)
6ad8ae5c 1484{
ced2dffb 1485 struct thread_info *thread = get_lwp_thread (lwp);
9b224c5e 1486 int sig;
ced2dffb 1487 int lwpid;
6ad8ae5c 1488
9b224c5e 1489 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1490 if (lwp->stop_expected)
ae13219e 1491 {
c058728c
SM
1492 threads_debug_printf ("Sending SIGCONT to %s",
1493 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e 1494
d86d4aaf 1495 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1496 lwp->stop_expected = 0;
ae13219e
DJ
1497 }
1498
9b224c5e
PA
1499 /* Pass on any pending signal for this thread. */
1500 sig = get_detach_signal (thread);
1501
ced2dffb
PA
1502 /* Preparing to resume may try to write registers, and fail if the
1503 lwp is zombie. If that happens, ignore the error. We'll handle
1504 it below, when detach fails with ESRCH. */
a70b8144 1505 try
ced2dffb
PA
1506 {
1507 /* Flush any pending changes to the process's registers. */
1508 regcache_invalidate_thread (thread);
1509
1510 /* Finally, let it resume. */
d7599cc0 1511 low_prepare_to_resume (lwp);
ced2dffb 1512 }
230d2906 1513 catch (const gdb_exception_error &ex)
ced2dffb
PA
1514 {
1515 if (!check_ptrace_stopped_lwp_gone (lwp))
eedc3f4f 1516 throw;
ced2dffb 1517 }
ced2dffb
PA
1518
1519 lwpid = lwpid_of (thread);
1520 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1521 (PTRACE_TYPE_ARG4) (long) sig) < 0)
ced2dffb
PA
1522 {
1523 int save_errno = errno;
1524
1525 /* We know the thread exists, so ESRCH must mean the lwp is
1526 zombie. This can happen if one of the already-detached
1527 threads exits the whole thread group. In that case we're
1528 still attached, and must reap the lwp. */
1529 if (save_errno == ESRCH)
1530 {
1531 int ret, status;
1532
1533 ret = my_waitpid (lwpid, &status, __WALL);
1534 if (ret == -1)
1535 {
1536 warning (_("Couldn't reap LWP %d while detaching: %s"),
6d91ce9a 1537 lwpid, safe_strerror (errno));
ced2dffb
PA
1538 }
1539 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1540 {
1541 warning (_("Reaping LWP %d while detaching "
1542 "returned unexpected status 0x%x"),
1543 lwpid, status);
1544 }
1545 }
1546 else
1547 {
1548 error (_("Can't detach %s: %s"),
61d7f128 1549 target_pid_to_str (ptid_of (thread)).c_str (),
6d91ce9a 1550 safe_strerror (save_errno));
ced2dffb
PA
1551 }
1552 }
c058728c
SM
1553 else
1554 threads_debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)",
1555 target_pid_to_str (ptid_of (thread)).c_str (),
1556 strsignal (sig));
bd99dc85
PA
1557
1558 delete_lwp (lwp);
ced2dffb
PA
1559}
1560
9061c9cf
TBA
1561int
1562linux_process_target::detach (process_info *process)
95954743 1563{
ced2dffb 1564 struct lwp_info *main_lwp;
95954743 1565
863d01bd
PA
1566 /* As there's a step over already in progress, let it finish first,
1567 otherwise nesting a stabilize_threads operation on top gets real
1568 messy. */
1569 complete_ongoing_step_over ();
1570
f9e39928 1571 /* Stop all threads before detaching. First, ptrace requires that
30baf67b 1572 the thread is stopped to successfully detach. Second, thread_db
f9e39928
PA
1573 may need to uninstall thread event breakpoints from memory, which
1574 only works with a stopped process anyway. */
7984d532 1575 stop_all_lwps (0, NULL);
f9e39928 1576
ca5c370d 1577#ifdef USE_THREAD_DB
8336d594 1578 thread_db_detach (process);
ca5c370d
PA
1579#endif
1580
fa593d66 1581 /* Stabilize threads (move out of jump pads). */
5c9eb2f2 1582 target_stabilize_threads ();
fa593d66 1583
ced2dffb
PA
1584 /* Detach from the clone lwps first. If the thread group exits just
1585 while we're detaching, we must reap the clone lwps before we're
1586 able to reap the leader. */
fd000fb3
TBA
1587 for_each_thread (process->pid, [this] (thread_info *thread)
1588 {
1589 /* We don't actually detach from the thread group leader just yet.
1590 If the thread group exits, we must reap the zombie clone lwps
1591 before we're able to reap the leader. */
1592 if (thread->id.pid () == thread->id.lwp ())
1593 return;
1594
1595 lwp_info *lwp = get_thread_lwp (thread);
1596 detach_one_lwp (lwp);
1597 });
ced2dffb 1598
ef2ddb33 1599 main_lwp = find_lwp_pid (ptid_t (process->pid));
59487af3 1600 gdb_assert (main_lwp != nullptr);
fd000fb3 1601 detach_one_lwp (main_lwp);
8336d594 1602
8adb37b9 1603 mourn (process);
f9e39928
PA
1604
1605 /* Since we presently can only stop all lwps of all processes, we
1606 need to unstop lwps of other processes. */
7984d532 1607 unstop_all_lwps (0, NULL);
f9e39928
PA
1608 return 0;
1609}
1610
1611/* Remove all LWPs that belong to process PROC from the lwp list. */
1612
8adb37b9
TBA
1613void
1614linux_process_target::mourn (process_info *process)
8336d594 1615{
8336d594
PA
1616#ifdef USE_THREAD_DB
1617 thread_db_mourn (process);
1618#endif
1619
fd000fb3 1620 for_each_thread (process->pid, [this] (thread_info *thread)
6b2a85da
SM
1621 {
1622 delete_lwp (get_thread_lwp (thread));
1623 });
f9e39928 1624
f551c8ef 1625 this->remove_linux_process (process);
8336d594
PA
1626}
1627
95a49a39
TBA
1628void
1629linux_process_target::join (int pid)
444d6139 1630{
444d6139
PA
1631 int status, ret;
1632
1633 do {
d105de22 1634 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1635 if (WIFEXITED (status) || WIFSIGNALED (status))
1636 break;
1637 } while (ret != -1 || errno != ECHILD);
1638}
1639
13d3d99b
TBA
1640/* Return true if the given thread is still alive. */
1641
1642bool
1643linux_process_target::thread_alive (ptid_t ptid)
0d62e5e8 1644{
95954743
PA
1645 struct lwp_info *lwp = find_lwp_pid (ptid);
1646
1647 /* We assume we always know if a thread exits. If a whole process
1648 exited but we still haven't been able to report it to GDB, we'll
1649 hold on to the last lwp of the dead process. */
1650 if (lwp != NULL)
00db26fa 1651 return !lwp_is_marked_dead (lwp);
0d62e5e8
DJ
1652 else
1653 return 0;
1654}
1655
df95181f
TBA
1656bool
1657linux_process_target::thread_still_has_status_pending (thread_info *thread)
582511be
PA
1658{
1659 struct lwp_info *lp = get_thread_lwp (thread);
1660
1661 if (!lp->status_pending_p)
1662 return 0;
1663
582511be 1664 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1665 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1666 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be 1667 {
582511be
PA
1668 CORE_ADDR pc;
1669 int discard = 0;
1670
1671 gdb_assert (lp->last_status != 0);
1672
1673 pc = get_pc (lp);
1674
24583e45
TBA
1675 scoped_restore_current_thread restore_thread;
1676 switch_to_thread (thread);
582511be
PA
1677
1678 if (pc != lp->stop_pc)
1679 {
c058728c
SM
1680 threads_debug_printf ("PC of %ld changed",
1681 lwpid_of (thread));
582511be
PA
1682 discard = 1;
1683 }
3e572f71
PA
1684
1685#if !USE_SIGTRAP_SIGINFO
15c66dd6 1686 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
d7146cda 1687 && !low_breakpoint_at (pc))
582511be 1688 {
c058728c
SM
1689 threads_debug_printf ("previous SW breakpoint of %ld gone",
1690 lwpid_of (thread));
582511be
PA
1691 discard = 1;
1692 }
15c66dd6 1693 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1694 && !hardware_breakpoint_inserted_here (pc))
1695 {
c058728c
SM
1696 threads_debug_printf ("previous HW breakpoint of %ld gone",
1697 lwpid_of (thread));
582511be
PA
1698 discard = 1;
1699 }
3e572f71 1700#endif
582511be 1701
582511be
PA
1702 if (discard)
1703 {
c058728c 1704 threads_debug_printf ("discarding pending breakpoint status");
582511be
PA
1705 lp->status_pending_p = 0;
1706 return 0;
1707 }
1708 }
1709
1710 return 1;
1711}
1712
a681f9c9
PA
1713/* Returns true if LWP is resumed from the client's perspective. */
1714
1715static int
1716lwp_resumed (struct lwp_info *lwp)
1717{
1718 struct thread_info *thread = get_lwp_thread (lwp);
1719
1720 if (thread->last_resume_kind != resume_stop)
1721 return 1;
1722
1723 /* Did gdb send us a `vCont;t', but we haven't reported the
1724 corresponding stop to gdb yet? If so, the thread is still
1725 resumed/running from gdb's perspective. */
1726 if (thread->last_resume_kind == resume_stop
183be222 1727 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
a681f9c9
PA
1728 return 1;
1729
1730 return 0;
1731}
1732
df95181f
TBA
1733bool
1734linux_process_target::status_pending_p_callback (thread_info *thread,
1735 ptid_t ptid)
0d62e5e8 1736{
582511be 1737 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1738
1739 /* Check if we're only interested in events from a specific process
afa8d396 1740 or a specific LWP. */
83e1b6c1 1741 if (!thread->id.matches (ptid))
95954743 1742 return 0;
0d62e5e8 1743
a681f9c9
PA
1744 if (!lwp_resumed (lp))
1745 return 0;
1746
582511be 1747 if (lp->status_pending_p
df95181f 1748 && !thread_still_has_status_pending (thread))
582511be 1749 {
df95181f 1750 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
582511be
PA
1751 return 0;
1752 }
0d62e5e8 1753
582511be 1754 return lp->status_pending_p;
0d62e5e8
DJ
1755}
1756
95954743
PA
1757struct lwp_info *
1758find_lwp_pid (ptid_t ptid)
1759{
d4895ba2
SM
1760 long lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1761 thread_info *thread = find_thread ([lwp] (thread_info *thr_arg)
454296a2 1762 {
da4ae14a 1763 return thr_arg->id.lwp () == lwp;
454296a2 1764 });
d86d4aaf
DE
1765
1766 if (thread == NULL)
1767 return NULL;
1768
9c80ecd6 1769 return get_thread_lwp (thread);
95954743
PA
1770}
1771
fa96cb38 1772/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1773
fa96cb38
PA
1774static int
1775num_lwps (int pid)
1776{
fa96cb38 1777 int count = 0;
0d62e5e8 1778
4d3bb80e
SM
1779 for_each_thread (pid, [&] (thread_info *thread)
1780 {
9c80ecd6 1781 count++;
4d3bb80e 1782 });
3aee8918 1783
fa96cb38
PA
1784 return count;
1785}
d61ddec4 1786
6d4ee8c6
GB
1787/* See nat/linux-nat.h. */
1788
1789struct lwp_info *
1790iterate_over_lwps (ptid_t filter,
d3a70e03 1791 gdb::function_view<iterate_over_lwps_ftype> callback)
6d4ee8c6 1792{
da4ae14a 1793 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
6d1e5673 1794 {
da4ae14a 1795 lwp_info *lwp = get_thread_lwp (thr_arg);
6d1e5673 1796
d3a70e03 1797 return callback (lwp);
6d1e5673 1798 });
6d4ee8c6 1799
9c80ecd6 1800 if (thread == NULL)
6d4ee8c6
GB
1801 return NULL;
1802
9c80ecd6 1803 return get_thread_lwp (thread);
6d4ee8c6
GB
1804}
1805
fd000fb3
TBA
1806void
1807linux_process_target::check_zombie_leaders ()
fa96cb38 1808{
aa40a989
PA
1809 for_each_process ([this] (process_info *proc)
1810 {
1811 pid_t leader_pid = pid_of (proc);
1812 lwp_info *leader_lp = find_lwp_pid (ptid_t (leader_pid));
1813
1814 threads_debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1815 "num_lwps=%d, zombie=%d",
1816 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1817 linux_proc_pid_is_zombie (leader_pid));
1818
1819 if (leader_lp != NULL && !leader_lp->stopped
1820 /* Check if there are other threads in the group, as we may
8a841a35
PA
1821 have raced with the inferior simply exiting. Note this
1822 isn't a watertight check. If the inferior is
1823 multi-threaded and is exiting, it may be we see the
1824 leader as zombie before we reap all the non-leader
1825 threads. See comments below. */
aa40a989
PA
1826 && !last_thread_of_process_p (leader_pid)
1827 && linux_proc_pid_is_zombie (leader_pid))
1828 {
8a841a35
PA
1829 /* A zombie leader in a multi-threaded program can mean one
1830 of three things:
1831
1832 #1 - Only the leader exited, not the whole program, e.g.,
1833 with pthread_exit. Since we can't reap the leader's exit
1834 status until all other threads are gone and reaped too,
1835 we want to delete the zombie leader right away, as it
1836 can't be debugged, we can't read its registers, etc.
1837 This is the main reason we check for zombie leaders
1838 disappearing.
1839
1840 #2 - The whole thread-group/process exited (a group exit,
1841 via e.g. exit(3), and there is (or will be shortly) an
1842 exit reported for each thread in the process, and then
1843 finally an exit for the leader once the non-leaders are
1844 reaped.
1845
1846 #3 - There are 3 or more threads in the group, and a
1847 thread other than the leader exec'd. See comments on
1848 exec events at the top of the file.
1849
1850 Ideally we would never delete the leader for case #2.
1851 Instead, we want to collect the exit status of each
1852 non-leader thread, and then finally collect the exit
1853 status of the leader as normal and use its exit code as
1854 whole-process exit code. Unfortunately, there's no
1855 race-free way to distinguish cases #1 and #2. We can't
1856 assume the exit events for the non-leaders threads are
1857 already pending in the kernel, nor can we assume the
1858 non-leader threads are in zombie state already. Between
1859 the leader becoming zombie and the non-leaders exiting
1860 and becoming zombie themselves, there's a small time
1861 window, so such a check would be racy. Temporarily
1862 pausing all threads and checking to see if all threads
1863 exit or not before re-resuming them would work in the
1864 case that all threads are running right now, but it
1865 wouldn't work if some thread is currently already
1866 ptrace-stopped, e.g., due to scheduler-locking.
1867
1868 So what we do is we delete the leader anyhow, and then
1869 later on when we see its exit status, we re-add it back.
1870 We also make sure that we only report a whole-process
1871 exit when we see the leader exiting, as opposed to when
1872 the last LWP in the LWP list exits, which can be a
1873 non-leader if we deleted the leader here. */
aa40a989 1874 threads_debug_printf ("Thread group leader %d zombie "
8a841a35
PA
1875 "(it exited, or another thread execd), "
1876 "deleting it.",
aa40a989 1877 leader_pid);
aa40a989
PA
1878 delete_lwp (leader_lp);
1879 }
9179355e 1880 });
fa96cb38 1881}
c3adc08c 1882
a1385b7b
SM
1883/* Callback for `find_thread'. Returns the first LWP that is not
1884 stopped. */
d50171e4 1885
a1385b7b
SM
1886static bool
1887not_stopped_callback (thread_info *thread, ptid_t filter)
fa96cb38 1888{
a1385b7b
SM
1889 if (!thread->id.matches (filter))
1890 return false;
47c0c975 1891
a1385b7b 1892 lwp_info *lwp = get_thread_lwp (thread);
fa96cb38 1893
a1385b7b 1894 return !lwp->stopped;
0d62e5e8 1895}
611cb4a5 1896
863d01bd
PA
1897/* Increment LWP's suspend count. */
1898
1899static void
1900lwp_suspended_inc (struct lwp_info *lwp)
1901{
1902 lwp->suspended++;
1903
c058728c
SM
1904 if (lwp->suspended > 4)
1905 threads_debug_printf
1906 ("LWP %ld has a suspiciously high suspend count, suspended=%d",
1907 lwpid_of (get_lwp_thread (lwp)), lwp->suspended);
863d01bd
PA
1908}
1909
1910/* Decrement LWP's suspend count. */
1911
1912static void
1913lwp_suspended_decr (struct lwp_info *lwp)
1914{
1915 lwp->suspended--;
1916
1917 if (lwp->suspended < 0)
1918 {
1919 struct thread_info *thread = get_lwp_thread (lwp);
1920
f34652de 1921 internal_error ("unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
863d01bd
PA
1922 lwp->suspended);
1923 }
1924}
1925
219f2f23
PA
1926/* This function should only be called if the LWP got a SIGTRAP.
1927
1928 Handle any tracepoint steps or hits. Return true if a tracepoint
1929 event was handled, 0 otherwise. */
1930
1931static int
1932handle_tracepoints (struct lwp_info *lwp)
1933{
1934 struct thread_info *tinfo = get_lwp_thread (lwp);
1935 int tpoint_related_event = 0;
1936
582511be
PA
1937 gdb_assert (lwp->suspended == 0);
1938
7984d532
PA
1939 /* If this tracepoint hit causes a tracing stop, we'll immediately
1940 uninsert tracepoints. To do this, we temporarily pause all
1941 threads, unpatch away, and then unpause threads. We need to make
1942 sure the unpausing doesn't resume LWP too. */
863d01bd 1943 lwp_suspended_inc (lwp);
7984d532 1944
219f2f23
PA
1945 /* And we need to be sure that any all-threads-stopping doesn't try
1946 to move threads out of the jump pads, as it could deadlock the
1947 inferior (LWP could be in the jump pad, maybe even holding the
1948 lock.) */
1949
1950 /* Do any necessary step collect actions. */
1951 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1952
fa593d66
PA
1953 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1954
219f2f23
PA
1955 /* See if we just hit a tracepoint and do its main collect
1956 actions. */
1957 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1958
863d01bd 1959 lwp_suspended_decr (lwp);
7984d532
PA
1960
1961 gdb_assert (lwp->suspended == 0);
229d26fc
SM
1962 gdb_assert (!stabilizing_threads
1963 || (lwp->collecting_fast_tracepoint
1964 != fast_tpoint_collect_result::not_collecting));
7984d532 1965
219f2f23
PA
1966 if (tpoint_related_event)
1967 {
c058728c 1968 threads_debug_printf ("got a tracepoint event");
219f2f23
PA
1969 return 1;
1970 }
1971
1972 return 0;
1973}
1974
13e567af
TBA
1975fast_tpoint_collect_result
1976linux_process_target::linux_fast_tracepoint_collecting
1977 (lwp_info *lwp, fast_tpoint_collect_status *status)
fa593d66
PA
1978{
1979 CORE_ADDR thread_area;
d86d4aaf 1980 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66 1981
fa593d66
PA
1982 /* Get the thread area address. This is used to recognize which
1983 thread is which when tracing with the in-process agent library.
1984 We don't read anything from the address, and treat it as opaque;
1985 it's the address itself that we assume is unique per-thread. */
13e567af 1986 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
229d26fc 1987 return fast_tpoint_collect_result::not_collecting;
fa593d66
PA
1988
1989 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1990}
1991
13e567af
TBA
1992int
1993linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1994{
1995 return -1;
1996}
1997
d16f3f6c
TBA
1998bool
1999linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
fa593d66 2000{
24583e45
TBA
2001 scoped_restore_current_thread restore_thread;
2002 switch_to_thread (get_lwp_thread (lwp));
fa593d66
PA
2003
2004 if ((wstat == NULL
2005 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2006 && supports_fast_tracepoints ()
58b4daa5 2007 && agent_loaded_p ())
fa593d66
PA
2008 {
2009 struct fast_tpoint_collect_status status;
fa593d66 2010
c058728c
SM
2011 threads_debug_printf
2012 ("Checking whether LWP %ld needs to move out of the jump pad.",
2013 lwpid_of (current_thread));
fa593d66 2014
229d26fc
SM
2015 fast_tpoint_collect_result r
2016 = linux_fast_tracepoint_collecting (lwp, &status);
fa593d66
PA
2017
2018 if (wstat == NULL
2019 || (WSTOPSIG (*wstat) != SIGILL
2020 && WSTOPSIG (*wstat) != SIGFPE
2021 && WSTOPSIG (*wstat) != SIGSEGV
2022 && WSTOPSIG (*wstat) != SIGBUS))
2023 {
2024 lwp->collecting_fast_tracepoint = r;
2025
229d26fc 2026 if (r != fast_tpoint_collect_result::not_collecting)
fa593d66 2027 {
229d26fc
SM
2028 if (r == fast_tpoint_collect_result::before_insn
2029 && lwp->exit_jump_pad_bkpt == NULL)
fa593d66
PA
2030 {
2031 /* Haven't executed the original instruction yet.
2032 Set breakpoint there, and wait till it's hit,
2033 then single-step until exiting the jump pad. */
2034 lwp->exit_jump_pad_bkpt
2035 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2036 }
2037
c058728c
SM
2038 threads_debug_printf
2039 ("Checking whether LWP %ld needs to move out of the jump pad..."
2040 " it does", lwpid_of (current_thread));
fa593d66 2041
d16f3f6c 2042 return true;
fa593d66
PA
2043 }
2044 }
2045 else
2046 {
2047 /* If we get a synchronous signal while collecting, *and*
2048 while executing the (relocated) original instruction,
2049 reset the PC to point at the tpoint address, before
2050 reporting to GDB. Otherwise, it's an IPA lib bug: just
2051 report the signal to GDB, and pray for the best. */
2052
229d26fc
SM
2053 lwp->collecting_fast_tracepoint
2054 = fast_tpoint_collect_result::not_collecting;
fa593d66 2055
229d26fc 2056 if (r != fast_tpoint_collect_result::not_collecting
fa593d66
PA
2057 && (status.adjusted_insn_addr <= lwp->stop_pc
2058 && lwp->stop_pc < status.adjusted_insn_addr_end))
2059 {
2060 siginfo_t info;
2061 struct regcache *regcache;
2062
2063 /* The si_addr on a few signals references the address
2064 of the faulting instruction. Adjust that as
2065 well. */
2066 if ((WSTOPSIG (*wstat) == SIGILL
2067 || WSTOPSIG (*wstat) == SIGFPE
2068 || WSTOPSIG (*wstat) == SIGBUS
2069 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 2070 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2071 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
2072 /* Final check just to make sure we don't clobber
2073 the siginfo of non-kernel-sent signals. */
2074 && (uintptr_t) info.si_addr == lwp->stop_pc)
2075 {
2076 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 2077 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2078 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
2079 }
2080
0bfdf32f 2081 regcache = get_thread_regcache (current_thread, 1);
bf9ae9d8 2082 low_set_pc (regcache, status.tpoint_addr);
fa593d66
PA
2083 lwp->stop_pc = status.tpoint_addr;
2084
2085 /* Cancel any fast tracepoint lock this thread was
2086 holding. */
2087 force_unlock_trace_buffer ();
2088 }
2089
2090 if (lwp->exit_jump_pad_bkpt != NULL)
2091 {
c058728c
SM
2092 threads_debug_printf
2093 ("Cancelling fast exit-jump-pad: removing bkpt."
2094 "stopping all threads momentarily.");
fa593d66
PA
2095
2096 stop_all_lwps (1, lwp);
fa593d66
PA
2097
2098 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2099 lwp->exit_jump_pad_bkpt = NULL;
2100
2101 unstop_all_lwps (1, lwp);
2102
2103 gdb_assert (lwp->suspended >= 0);
2104 }
2105 }
2106 }
2107
c058728c
SM
2108 threads_debug_printf
2109 ("Checking whether LWP %ld needs to move out of the jump pad... no",
2110 lwpid_of (current_thread));
0cccb683 2111
d16f3f6c 2112 return false;
fa593d66
PA
2113}
2114
2115/* Enqueue one signal in the "signals to report later when out of the
2116 jump pad" list. */
2117
2118static void
2119enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2120{
d86d4aaf 2121 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66 2122
c058728c
SM
2123 threads_debug_printf ("Deferring signal %d for LWP %ld.",
2124 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2125
2126 if (debug_threads)
2127 {
013e3554 2128 for (const auto &sig : lwp->pending_signals_to_report)
c058728c 2129 threads_debug_printf (" Already queued %d", sig.signal);
fa593d66 2130
c058728c 2131 threads_debug_printf (" (no more currently queued signals)");
fa593d66
PA
2132 }
2133
1a981360
PA
2134 /* Don't enqueue non-RT signals if they are already in the deferred
2135 queue. (SIGSTOP being the easiest signal to see ending up here
2136 twice) */
2137 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2138 {
013e3554 2139 for (const auto &sig : lwp->pending_signals_to_report)
1a981360 2140 {
013e3554 2141 if (sig.signal == WSTOPSIG (*wstat))
1a981360 2142 {
c058728c
SM
2143 threads_debug_printf
2144 ("Not requeuing already queued non-RT signal %d for LWP %ld",
2145 sig.signal, lwpid_of (thread));
1a981360
PA
2146 return;
2147 }
2148 }
2149 }
2150
013e3554 2151 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
8d749320 2152
d86d4aaf 2153 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554 2154 &lwp->pending_signals_to_report.back ().info);
fa593d66
PA
2155}
2156
2157/* Dequeue one signal from the "signals to report later when out of
2158 the jump pad" list. */
2159
2160static int
2161dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2162{
d86d4aaf
DE
2163 struct thread_info *thread = get_lwp_thread (lwp);
2164
013e3554 2165 if (!lwp->pending_signals_to_report.empty ())
fa593d66 2166 {
013e3554 2167 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
fa593d66 2168
013e3554
TBA
2169 *wstat = W_STOPCODE (p_sig.signal);
2170 if (p_sig.info.si_signo != 0)
d86d4aaf 2171 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554
TBA
2172 &p_sig.info);
2173
2174 lwp->pending_signals_to_report.pop_front ();
fa593d66 2175
c058728c
SM
2176 threads_debug_printf ("Reporting deferred signal %d for LWP %ld.",
2177 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2178
2179 if (debug_threads)
2180 {
013e3554 2181 for (const auto &sig : lwp->pending_signals_to_report)
c058728c 2182 threads_debug_printf (" Still queued %d", sig.signal);
fa593d66 2183
c058728c 2184 threads_debug_printf (" (no more queued signals)");
fa593d66
PA
2185 }
2186
2187 return 1;
2188 }
2189
2190 return 0;
2191}
2192
ac1bbaca
TBA
2193bool
2194linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
582511be 2195{
24583e45
TBA
2196 scoped_restore_current_thread restore_thread;
2197 switch_to_thread (get_lwp_thread (child));
d50171e4 2198
ac1bbaca
TBA
2199 if (low_stopped_by_watchpoint ())
2200 {
2201 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2202 child->stopped_data_address = low_stopped_data_address ();
2203 }
582511be 2204
ac1bbaca
TBA
2205 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2206}
d50171e4 2207
ac1bbaca
TBA
2208bool
2209linux_process_target::low_stopped_by_watchpoint ()
2210{
2211 return false;
2212}
d50171e4 2213
ac1bbaca
TBA
2214CORE_ADDR
2215linux_process_target::low_stopped_data_address ()
2216{
2217 return 0;
c4d9ceb6
YQ
2218}
2219
de0d863e
DB
2220/* Return the ptrace options that we want to try to enable. */
2221
2222static int
2223linux_low_ptrace_options (int attached)
2224{
c12a5089 2225 client_state &cs = get_client_state ();
de0d863e
DB
2226 int options = 0;
2227
2228 if (!attached)
2229 options |= PTRACE_O_EXITKILL;
2230
c12a5089 2231 if (cs.report_fork_events)
de0d863e
DB
2232 options |= PTRACE_O_TRACEFORK;
2233
c12a5089 2234 if (cs.report_vfork_events)
c269dbdb
DB
2235 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2236
c12a5089 2237 if (cs.report_exec_events)
94585166
DB
2238 options |= PTRACE_O_TRACEEXEC;
2239
82075af2
JS
2240 options |= PTRACE_O_TRACESYSGOOD;
2241
de0d863e
DB
2242 return options;
2243}
2244
1a48f002 2245void
d16f3f6c 2246linux_process_target::filter_event (int lwpid, int wstat)
fa96cb38
PA
2247{
2248 struct lwp_info *child;
2249 struct thread_info *thread;
582511be 2250 int have_stop_pc = 0;
fa96cb38 2251
f2907e49 2252 child = find_lwp_pid (ptid_t (lwpid));
fa96cb38 2253
5406bc3f
PA
2254 /* Check for events reported by anything not in our LWP list. */
2255 if (child == nullptr)
94585166 2256 {
5406bc3f
PA
2257 if (WIFSTOPPED (wstat))
2258 {
2259 if (WSTOPSIG (wstat) == SIGTRAP
2260 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2261 {
2262 /* A non-leader thread exec'ed after we've seen the
2263 leader zombie, and removed it from our lists (in
2264 check_zombie_leaders). The non-leader thread changes
2265 its tid to the tgid. */
2266 threads_debug_printf
2267 ("Re-adding thread group leader LWP %d after exec.",
2268 lwpid);
94585166 2269
5406bc3f
PA
2270 child = add_lwp (ptid_t (lwpid, lwpid));
2271 child->stopped = 1;
2272 switch_to_thread (child->thread);
2273 }
2274 else
2275 {
2276 /* A process we are controlling has forked and the new
2277 child's stop was reported to us by the kernel. Save
2278 its PID and go back to waiting for the fork event to
2279 be reported - the stopped process might be returned
2280 from waitpid before or after the fork event is. */
2281 threads_debug_printf
2282 ("Saving LWP %d status %s in stopped_pids list",
2283 lwpid, status_to_str (wstat).c_str ());
2284 add_to_pid_list (&stopped_pids, lwpid, wstat);
2285 }
2286 }
2287 else
2288 {
2289 /* Don't report an event for the exit of an LWP not in our
2290 list, i.e. not part of any inferior we're debugging.
2291 This can happen if we detach from a program we originally
8a841a35
PA
2292 forked and then it exits. However, note that we may have
2293 earlier deleted a leader of an inferior we're debugging,
2294 in check_zombie_leaders. Re-add it back here if so. */
2295 find_process ([&] (process_info *proc)
2296 {
2297 if (proc->pid == lwpid)
2298 {
2299 threads_debug_printf
2300 ("Re-adding thread group leader LWP %d after exit.",
2301 lwpid);
2302
2303 child = add_lwp (ptid_t (lwpid, lwpid));
2304 return true;
2305 }
2306 return false;
2307 });
5406bc3f 2308 }
94585166 2309
5406bc3f
PA
2310 if (child == nullptr)
2311 return;
fa96cb38 2312 }
fa96cb38
PA
2313
2314 thread = get_lwp_thread (child);
2315
2316 child->stopped = 1;
2317
2318 child->last_status = wstat;
2319
582511be
PA
2320 /* Check if the thread has exited. */
2321 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2322 {
c058728c 2323 threads_debug_printf ("%d exited", lwpid);
f50bf8e5
YQ
2324
2325 if (finish_step_over (child))
2326 {
2327 /* Unsuspend all other LWPs, and set them back running again. */
2328 unsuspend_all_lwps (child);
2329 }
2330
8a841a35
PA
2331 /* If this is not the leader LWP, then the exit signal was not
2332 the end of the debugged application and should be ignored,
2333 unless GDB wants to hear about thread exits. */
48989498 2334 if (report_exit_events_for (thread) || is_leader (thread))
582511be 2335 {
65706a29
PA
2336 /* Since events are serialized to GDB core, and we can't
2337 report this one right now. Leave the status pending for
2338 the next time we're able to report it. */
2339 mark_lwp_dead (child, wstat);
1a48f002 2340 return;
582511be
PA
2341 }
2342 else
2343 {
65706a29 2344 delete_lwp (child);
1a48f002 2345 return;
582511be
PA
2346 }
2347 }
2348
2349 gdb_assert (WIFSTOPPED (wstat));
2350
fa96cb38
PA
2351 if (WIFSTOPPED (wstat))
2352 {
2353 struct process_info *proc;
2354
c06cbd92 2355 /* Architecture-specific setup after inferior is running. */
fa96cb38 2356 proc = find_process_pid (pid_of (thread));
c06cbd92 2357 if (proc->tdesc == NULL)
fa96cb38 2358 {
c06cbd92
YQ
2359 if (proc->attached)
2360 {
c06cbd92
YQ
2361 /* This needs to happen after we have attached to the
2362 inferior and it is stopped for the first time, but
2363 before we access any inferior registers. */
797bcff5 2364 arch_setup_thread (thread);
c06cbd92
YQ
2365 }
2366 else
2367 {
2368 /* The process is started, but GDBserver will do
2369 architecture-specific setup after the program stops at
2370 the first instruction. */
2371 child->status_pending_p = 1;
2372 child->status_pending = wstat;
1a48f002 2373 return;
c06cbd92 2374 }
fa96cb38
PA
2375 }
2376 }
2377
fa96cb38
PA
2378 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2379 {
beed38b8 2380 struct process_info *proc = find_process_pid (pid_of (thread));
de0d863e 2381 int options = linux_low_ptrace_options (proc->attached);
beed38b8 2382
de0d863e 2383 linux_enable_event_reporting (lwpid, options);
fa96cb38
PA
2384 child->must_set_ptrace_flags = 0;
2385 }
2386
82075af2
JS
2387 /* Always update syscall_state, even if it will be filtered later. */
2388 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2389 {
2390 child->syscall_state
2391 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2392 ? TARGET_WAITKIND_SYSCALL_RETURN
2393 : TARGET_WAITKIND_SYSCALL_ENTRY);
2394 }
2395 else
2396 {
2397 /* Almost all other ptrace-stops are known to be outside of system
2398 calls, with further exceptions in handle_extended_wait. */
2399 child->syscall_state = TARGET_WAITKIND_IGNORE;
2400 }
2401
e7ad2f14
PA
2402 /* Be careful to not overwrite stop_pc until save_stop_reason is
2403 called. */
fa96cb38 2404 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2405 && linux_is_extended_waitstatus (wstat))
fa96cb38 2406 {
582511be 2407 child->stop_pc = get_pc (child);
94585166 2408 if (handle_extended_wait (&child, wstat))
de0d863e
DB
2409 {
2410 /* The event has been handled, so just return without
2411 reporting it. */
1a48f002 2412 return;
de0d863e 2413 }
fa96cb38
PA
2414 }
2415
80aea927 2416 if (linux_wstatus_maybe_breakpoint (wstat))
582511be 2417 {
e7ad2f14 2418 if (save_stop_reason (child))
582511be
PA
2419 have_stop_pc = 1;
2420 }
2421
2422 if (!have_stop_pc)
2423 child->stop_pc = get_pc (child);
2424
fa96cb38
PA
2425 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2426 && child->stop_expected)
2427 {
c058728c
SM
2428 threads_debug_printf ("Expected stop.");
2429
fa96cb38
PA
2430 child->stop_expected = 0;
2431
2432 if (thread->last_resume_kind == resume_stop)
2433 {
2434 /* We want to report the stop to the core. Treat the
2435 SIGSTOP as a normal event. */
c058728c
SM
2436 threads_debug_printf ("resume_stop SIGSTOP caught for %s.",
2437 target_pid_to_str (ptid_of (thread)).c_str ());
fa96cb38
PA
2438 }
2439 else if (stopping_threads != NOT_STOPPING_THREADS)
2440 {
2441 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2442 pending. */
c058728c
SM
2443 threads_debug_printf ("SIGSTOP caught for %s while stopping threads.",
2444 target_pid_to_str (ptid_of (thread)).c_str ());
1a48f002 2445 return;
fa96cb38
PA
2446 }
2447 else
2448 {
2bf6fb9d 2449 /* This is a delayed SIGSTOP. Filter out the event. */
c058728c 2450 threads_debug_printf ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2bf6fb9d 2451 child->stepping ? "step" : "continue",
61d7f128 2452 target_pid_to_str (ptid_of (thread)).c_str ());
2bf6fb9d 2453
df95181f 2454 resume_one_lwp (child, child->stepping, 0, NULL);
1a48f002 2455 return;
fa96cb38
PA
2456 }
2457 }
2458
582511be
PA
2459 child->status_pending_p = 1;
2460 child->status_pending = wstat;
1a48f002 2461 return;
fa96cb38
PA
2462}
2463
b31cdfa6
TBA
2464bool
2465linux_process_target::maybe_hw_step (thread_info *thread)
f79b145d 2466{
b31cdfa6
TBA
2467 if (supports_hardware_single_step ())
2468 return true;
f79b145d
YQ
2469 else
2470 {
3b9a79ef 2471 /* GDBserver must insert single-step breakpoint for software
f79b145d 2472 single step. */
3b9a79ef 2473 gdb_assert (has_single_step_breakpoints (thread));
b31cdfa6 2474 return false;
f79b145d
YQ
2475 }
2476}
2477
df95181f
TBA
2478void
2479linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
20ba1ce6 2480{
20ba1ce6
PA
2481 struct lwp_info *lp = get_thread_lwp (thread);
2482
2483 if (lp->stopped
863d01bd 2484 && !lp->suspended
20ba1ce6 2485 && !lp->status_pending_p
183be222 2486 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
20ba1ce6 2487 {
8901d193
YQ
2488 int step = 0;
2489
2490 if (thread->last_resume_kind == resume_step)
b6d8d612
KB
2491 {
2492 if (supports_software_single_step ())
2493 install_software_single_step_breakpoints (lp);
2494
2495 step = maybe_hw_step (thread);
2496 }
20ba1ce6 2497
c058728c
SM
2498 threads_debug_printf ("resuming stopped-resumed LWP %s at %s: step=%d",
2499 target_pid_to_str (ptid_of (thread)).c_str (),
2500 paddress (lp->stop_pc), step);
20ba1ce6 2501
df95181f 2502 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
20ba1ce6
PA
2503 }
2504}
2505
d16f3f6c
TBA
2506int
2507linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2508 ptid_t filter_ptid,
2509 int *wstatp, int options)
0d62e5e8 2510{
d86d4aaf 2511 struct thread_info *event_thread;
d50171e4 2512 struct lwp_info *event_child, *requested_child;
fa96cb38 2513 sigset_t block_mask, prev_mask;
d50171e4 2514
fa96cb38 2515 retry:
d86d4aaf
DE
2516 /* N.B. event_thread points to the thread_info struct that contains
2517 event_child. Keep them in sync. */
2518 event_thread = NULL;
d50171e4
PA
2519 event_child = NULL;
2520 requested_child = NULL;
0d62e5e8 2521
95954743 2522 /* Check for a lwp with a pending status. */
bd99dc85 2523
d7e15655 2524 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
0d62e5e8 2525 {
83e1b6c1
SM
2526 event_thread = find_thread_in_random ([&] (thread_info *thread)
2527 {
2528 return status_pending_p_callback (thread, filter_ptid);
2529 });
2530
d86d4aaf 2531 if (event_thread != NULL)
c058728c
SM
2532 {
2533 event_child = get_thread_lwp (event_thread);
2534 threads_debug_printf ("Got a pending child %ld", lwpid_of (event_thread));
2535 }
0d62e5e8 2536 }
d7e15655 2537 else if (filter_ptid != null_ptid)
0d62e5e8 2538 {
fa96cb38 2539 requested_child = find_lwp_pid (filter_ptid);
59487af3 2540 gdb_assert (requested_child != nullptr);
d50171e4 2541
bde24c0a 2542 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66 2543 && requested_child->status_pending_p
229d26fc
SM
2544 && (requested_child->collecting_fast_tracepoint
2545 != fast_tpoint_collect_result::not_collecting))
fa593d66
PA
2546 {
2547 enqueue_one_deferred_signal (requested_child,
2548 &requested_child->status_pending);
2549 requested_child->status_pending_p = 0;
2550 requested_child->status_pending = 0;
df95181f 2551 resume_one_lwp (requested_child, 0, 0, NULL);
fa593d66
PA
2552 }
2553
2554 if (requested_child->suspended
2555 && requested_child->status_pending_p)
38e08fca 2556 {
f34652de 2557 internal_error ("requesting an event out of a"
38e08fca
GB
2558 " suspended child?");
2559 }
fa593d66 2560
d50171e4 2561 if (requested_child->status_pending_p)
d86d4aaf
DE
2562 {
2563 event_child = requested_child;
2564 event_thread = get_lwp_thread (event_child);
2565 }
0d62e5e8 2566 }
611cb4a5 2567
0d62e5e8
DJ
2568 if (event_child != NULL)
2569 {
c058728c
SM
2570 threads_debug_printf ("Got an event from pending child %ld (%04x)",
2571 lwpid_of (event_thread),
2572 event_child->status_pending);
2573
fa96cb38 2574 *wstatp = event_child->status_pending;
bd99dc85
PA
2575 event_child->status_pending_p = 0;
2576 event_child->status_pending = 0;
24583e45 2577 switch_to_thread (event_thread);
d86d4aaf 2578 return lwpid_of (event_thread);
0d62e5e8
DJ
2579 }
2580
fa96cb38
PA
2581 /* But if we don't find a pending event, we'll have to wait.
2582
2583 We only enter this loop if no process has a pending wait status.
2584 Thus any action taken in response to a wait status inside this
2585 loop is responding as soon as we detect the status, not after any
2586 pending events. */
d8301ad1 2587
fa96cb38
PA
2588 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2589 all signals while here. */
2590 sigfillset (&block_mask);
21987b9c 2591 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
fa96cb38 2592
582511be
PA
2593 /* Always pull all events out of the kernel. We'll randomly select
2594 an event LWP out of all that have events, to prevent
2595 starvation. */
fa96cb38 2596 while (event_child == NULL)
0d62e5e8 2597 {
fa96cb38 2598 pid_t ret = 0;
0d62e5e8 2599
fa96cb38
PA
2600 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2601 quirks:
0d62e5e8 2602
fa96cb38
PA
2603 - If the thread group leader exits while other threads in the
2604 thread group still exist, waitpid(TGID, ...) hangs. That
2605 waitpid won't return an exit status until the other threads
2606 in the group are reaped.
611cb4a5 2607
fa96cb38
PA
2608 - When a non-leader thread execs, that thread just vanishes
2609 without reporting an exit (so we'd hang if we waited for it
2610 explicitly in that case). The exec event is reported to
94585166 2611 the TGID pid. */
fa96cb38
PA
2612 errno = 0;
2613 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2614
c058728c
SM
2615 threads_debug_printf ("waitpid(-1, ...) returned %d, %s",
2616 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
0d62e5e8 2617
fa96cb38 2618 if (ret > 0)
0d62e5e8 2619 {
c058728c
SM
2620 threads_debug_printf ("waitpid %ld received %s",
2621 (long) ret, status_to_str (*wstatp).c_str ());
89be2091 2622
582511be
PA
2623 /* Filter all events. IOW, leave all events pending. We'll
2624 randomly select an event LWP out of all that have events
2625 below. */
d16f3f6c 2626 filter_event (ret, *wstatp);
fa96cb38
PA
2627 /* Retry until nothing comes out of waitpid. A single
2628 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2629 continue;
2630 }
2631
20ba1ce6
PA
2632 /* Now that we've pulled all events out of the kernel, resume
2633 LWPs that don't have an interesting event to report. */
2634 if (stopping_threads == NOT_STOPPING_THREADS)
df95181f
TBA
2635 for_each_thread ([this] (thread_info *thread)
2636 {
2637 resume_stopped_resumed_lwps (thread);
2638 });
20ba1ce6
PA
2639
2640 /* ... and find an LWP with a status to report to the core, if
2641 any. */
83e1b6c1
SM
2642 event_thread = find_thread_in_random ([&] (thread_info *thread)
2643 {
2644 return status_pending_p_callback (thread, filter_ptid);
2645 });
2646
582511be
PA
2647 if (event_thread != NULL)
2648 {
2649 event_child = get_thread_lwp (event_thread);
2650 *wstatp = event_child->status_pending;
2651 event_child->status_pending_p = 0;
2652 event_child->status_pending = 0;
2653 break;
2654 }
2655
fa96cb38
PA
2656 /* Check for zombie thread group leaders. Those can't be reaped
2657 until all other threads in the thread group are. */
2658 check_zombie_leaders ();
2659
a1385b7b
SM
2660 auto not_stopped = [&] (thread_info *thread)
2661 {
2662 return not_stopped_callback (thread, wait_ptid);
2663 };
2664
fa96cb38
PA
2665 /* If there are no resumed children left in the set of LWPs we
2666 want to wait for, bail. We can't just block in
2667 waitpid/sigsuspend, because lwps might have been left stopped
2668 in trace-stop state, and we'd be stuck forever waiting for
2669 their status to change (which would only happen if we resumed
2670 them). Even if WNOHANG is set, this return code is preferred
2671 over 0 (below), as it is more detailed. */
a1385b7b 2672 if (find_thread (not_stopped) == NULL)
a6dbe5df 2673 {
c058728c
SM
2674 threads_debug_printf ("exit (no unwaited-for LWP)");
2675
21987b9c 2676 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2677 return -1;
a6dbe5df
PA
2678 }
2679
fa96cb38
PA
2680 /* No interesting event to report to the caller. */
2681 if ((options & WNOHANG))
24a09b5f 2682 {
c058728c 2683 threads_debug_printf ("WNOHANG set, no event found");
fa96cb38 2684
21987b9c 2685 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2686 return 0;
24a09b5f
DJ
2687 }
2688
fa96cb38 2689 /* Block until we get an event reported with SIGCHLD. */
c058728c 2690 threads_debug_printf ("sigsuspend'ing");
d50171e4 2691
fa96cb38 2692 sigsuspend (&prev_mask);
21987b9c 2693 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38
PA
2694 goto retry;
2695 }
d50171e4 2696
21987b9c 2697 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2698
24583e45 2699 switch_to_thread (event_thread);
d50171e4 2700
fa96cb38
PA
2701 return lwpid_of (event_thread);
2702}
2703
d16f3f6c
TBA
2704int
2705linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
fa96cb38 2706{
d16f3f6c 2707 return wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2708}
2709
6bf5e0ba
PA
2710/* Select one LWP out of those that have events pending. */
2711
2712static void
2713select_event_lwp (struct lwp_info **orig_lp)
2714{
582511be
PA
2715 struct thread_info *event_thread = NULL;
2716
2717 /* In all-stop, give preference to the LWP that is being
2718 single-stepped. There will be at most one, and it's the LWP that
2719 the core is most interested in. If we didn't do this, then we'd
2720 have to handle pending step SIGTRAPs somehow in case the core
2721 later continues the previously-stepped thread, otherwise we'd
2722 report the pending SIGTRAP, and the core, not having stepped the
2723 thread, wouldn't understand what the trap was for, and therefore
2724 would report it to the user as a random signal. */
2725 if (!non_stop)
6bf5e0ba 2726 {
39a64da5
SM
2727 event_thread = find_thread ([] (thread_info *thread)
2728 {
2729 lwp_info *lp = get_thread_lwp (thread);
2730
183be222 2731 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
39a64da5
SM
2732 && thread->last_resume_kind == resume_step
2733 && lp->status_pending_p);
2734 });
2735
582511be 2736 if (event_thread != NULL)
c058728c
SM
2737 threads_debug_printf
2738 ("Select single-step %s",
2739 target_pid_to_str (ptid_of (event_thread)).c_str ());
6bf5e0ba 2740 }
582511be 2741 if (event_thread == NULL)
6bf5e0ba
PA
2742 {
2743 /* No single-stepping LWP. Select one at random, out of those
dda83cd7 2744 which have had events. */
6bf5e0ba 2745
b0319eaa 2746 event_thread = find_thread_in_random ([&] (thread_info *thread)
39a64da5
SM
2747 {
2748 lwp_info *lp = get_thread_lwp (thread);
2749
b0319eaa 2750 /* Only resumed LWPs that have an event pending. */
183be222 2751 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
b0319eaa 2752 && lp->status_pending_p);
39a64da5 2753 });
6bf5e0ba
PA
2754 }
2755
d86d4aaf 2756 if (event_thread != NULL)
6bf5e0ba 2757 {
d86d4aaf
DE
2758 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2759
6bf5e0ba
PA
2760 /* Switch the event LWP. */
2761 *orig_lp = event_lp;
2762 }
2763}
2764
7984d532
PA
2765/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2766 NULL. */
2767
2768static void
2769unsuspend_all_lwps (struct lwp_info *except)
2770{
139720c5
SM
2771 for_each_thread ([&] (thread_info *thread)
2772 {
2773 lwp_info *lwp = get_thread_lwp (thread);
2774
2775 if (lwp != except)
2776 lwp_suspended_decr (lwp);
2777 });
7984d532
PA
2778}
2779
5a6b0a41 2780static bool lwp_running (thread_info *thread);
fa593d66
PA
2781
2782/* Stabilize threads (move out of jump pads).
2783
2784 If a thread is midway collecting a fast tracepoint, we need to
2785 finish the collection and move it out of the jump pad before
2786 reporting the signal.
2787
2788 This avoids recursion while collecting (when a signal arrives
2789 midway, and the signal handler itself collects), which would trash
2790 the trace buffer. In case the user set a breakpoint in a signal
2791 handler, this avoids the backtrace showing the jump pad, etc..
2792 Most importantly, there are certain things we can't do safely if
2793 threads are stopped in a jump pad (or in its callee's). For
2794 example:
2795
2796 - starting a new trace run. A thread still collecting the
2797 previous run, could trash the trace buffer when resumed. The trace
2798 buffer control structures would have been reset but the thread had
2799 no way to tell. The thread could even midway memcpy'ing to the
2800 buffer, which would mean that when resumed, it would clobber the
2801 trace buffer that had been set for a new run.
2802
2803 - we can't rewrite/reuse the jump pads for new tracepoints
2804 safely. Say you do tstart while a thread is stopped midway while
2805 collecting. When the thread is later resumed, it finishes the
2806 collection, and returns to the jump pad, to execute the original
2807 instruction that was under the tracepoint jump at the time the
2808 older run had been started. If the jump pad had been rewritten
2809 since for something else in the new run, the thread would now
2810 execute the wrong / random instructions. */
2811
5c9eb2f2
TBA
2812void
2813linux_process_target::stabilize_threads ()
fa593d66 2814{
13e567af
TBA
2815 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2816 {
2817 return stuck_in_jump_pad (thread);
2818 });
fa593d66 2819
d86d4aaf 2820 if (thread_stuck != NULL)
fa593d66 2821 {
c058728c
SM
2822 threads_debug_printf ("can't stabilize, LWP %ld is stuck in jump pad",
2823 lwpid_of (thread_stuck));
fa593d66
PA
2824 return;
2825 }
2826
24583e45 2827 scoped_restore_current_thread restore_thread;
fa593d66
PA
2828
2829 stabilizing_threads = 1;
2830
2831 /* Kick 'em all. */
d16f3f6c
TBA
2832 for_each_thread ([this] (thread_info *thread)
2833 {
2834 move_out_of_jump_pad (thread);
2835 });
fa593d66
PA
2836
2837 /* Loop until all are stopped out of the jump pads. */
5a6b0a41 2838 while (find_thread (lwp_running) != NULL)
fa593d66
PA
2839 {
2840 struct target_waitstatus ourstatus;
2841 struct lwp_info *lwp;
fa593d66
PA
2842 int wstat;
2843
2844 /* Note that we go through the full wait even loop. While
2845 moving threads out of jump pad, we need to be able to step
2846 over internal breakpoints and such. */
d16f3f6c 2847 wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66 2848
183be222 2849 if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
fa593d66 2850 {
0bfdf32f 2851 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2852
2853 /* Lock it. */
863d01bd 2854 lwp_suspended_inc (lwp);
fa593d66 2855
183be222 2856 if (ourstatus.sig () != GDB_SIGNAL_0
0bfdf32f 2857 || current_thread->last_resume_kind == resume_stop)
fa593d66 2858 {
183be222 2859 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
fa593d66
PA
2860 enqueue_one_deferred_signal (lwp, &wstat);
2861 }
2862 }
2863 }
2864
fcdad592 2865 unsuspend_all_lwps (NULL);
fa593d66
PA
2866
2867 stabilizing_threads = 0;
2868
b4d51a55 2869 if (debug_threads)
fa593d66 2870 {
13e567af
TBA
2871 thread_stuck = find_thread ([this] (thread_info *thread)
2872 {
2873 return stuck_in_jump_pad (thread);
2874 });
fcb056a5 2875
d86d4aaf 2876 if (thread_stuck != NULL)
c058728c
SM
2877 threads_debug_printf
2878 ("couldn't stabilize, LWP %ld got stuck in jump pad",
2879 lwpid_of (thread_stuck));
fa593d66
PA
2880 }
2881}
2882
582511be
PA
2883/* Convenience function that is called when the kernel reports an
2884 event that is not passed out to GDB. */
2885
2886static ptid_t
2887ignore_event (struct target_waitstatus *ourstatus)
2888{
2889 /* If we got an event, there may still be others, as a single
2890 SIGCHLD can indicate more than one child stopped. This forces
2891 another target_wait call. */
2892 async_file_mark ();
2893
183be222 2894 ourstatus->set_ignore ();
582511be
PA
2895 return null_ptid;
2896}
2897
fd000fb3
TBA
2898ptid_t
2899linux_process_target::filter_exit_event (lwp_info *event_child,
2900 target_waitstatus *ourstatus)
65706a29
PA
2901{
2902 struct thread_info *thread = get_lwp_thread (event_child);
2903 ptid_t ptid = ptid_of (thread);
2904
48989498
PA
2905 /* Note we must filter TARGET_WAITKIND_SIGNALLED as well, otherwise
2906 if a non-leader thread exits with a signal, we'd report it to the
2907 core which would interpret it as the whole-process exiting.
2908 There is no TARGET_WAITKIND_THREAD_SIGNALLED event kind. */
2909 if (ourstatus->kind () != TARGET_WAITKIND_EXITED
2910 && ourstatus->kind () != TARGET_WAITKIND_SIGNALLED)
2911 return ptid;
2912
8a841a35 2913 if (!is_leader (thread))
65706a29 2914 {
48989498 2915 if (report_exit_events_for (thread))
183be222 2916 ourstatus->set_thread_exited (0);
65706a29 2917 else
183be222 2918 ourstatus->set_ignore ();
65706a29
PA
2919
2920 delete_lwp (event_child);
2921 }
2922 return ptid;
2923}
2924
82075af2
JS
2925/* Returns 1 if GDB is interested in any event_child syscalls. */
2926
2927static int
2928gdb_catching_syscalls_p (struct lwp_info *event_child)
2929{
2930 struct thread_info *thread = get_lwp_thread (event_child);
2931 struct process_info *proc = get_thread_process (thread);
2932
f27866ba 2933 return !proc->syscalls_to_catch.empty ();
82075af2
JS
2934}
2935
9eedd27d
TBA
2936bool
2937linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
82075af2 2938{
4cc32bec 2939 int sysno;
82075af2
JS
2940 struct thread_info *thread = get_lwp_thread (event_child);
2941 struct process_info *proc = get_thread_process (thread);
2942
f27866ba 2943 if (proc->syscalls_to_catch.empty ())
9eedd27d 2944 return false;
82075af2 2945
f27866ba 2946 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
9eedd27d 2947 return true;
82075af2 2948
4cc32bec 2949 get_syscall_trapinfo (event_child, &sysno);
f27866ba
SM
2950
2951 for (int iter : proc->syscalls_to_catch)
82075af2 2952 if (iter == sysno)
9eedd27d 2953 return true;
82075af2 2954
9eedd27d 2955 return false;
82075af2
JS
2956}
2957
d16f3f6c
TBA
2958ptid_t
2959linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
b60cea74 2960 target_wait_flags target_options)
da6d8c04 2961{
c058728c
SM
2962 THREADS_SCOPED_DEBUG_ENTER_EXIT;
2963
c12a5089 2964 client_state &cs = get_client_state ();
e5f1222d 2965 int w;
fc7238bb 2966 struct lwp_info *event_child;
bd99dc85 2967 int options;
bd99dc85 2968 int pid;
6bf5e0ba
PA
2969 int step_over_finished;
2970 int bp_explains_trap;
2971 int maybe_internal_trap;
2972 int report_to_gdb;
219f2f23 2973 int trace_event;
c2d6af84 2974 int in_step_range;
f2faf941 2975 int any_resumed;
bd99dc85 2976
c058728c 2977 threads_debug_printf ("[%s]", target_pid_to_str (ptid).c_str ());
87ce2a04 2978
bd99dc85
PA
2979 /* Translate generic target options into linux options. */
2980 options = __WALL;
2981 if (target_options & TARGET_WNOHANG)
2982 options |= WNOHANG;
0d62e5e8 2983
fa593d66
PA
2984 bp_explains_trap = 0;
2985 trace_event = 0;
c2d6af84 2986 in_step_range = 0;
183be222 2987 ourstatus->set_ignore ();
bd99dc85 2988
83e1b6c1
SM
2989 auto status_pending_p_any = [&] (thread_info *thread)
2990 {
2991 return status_pending_p_callback (thread, minus_one_ptid);
2992 };
2993
a1385b7b
SM
2994 auto not_stopped = [&] (thread_info *thread)
2995 {
2996 return not_stopped_callback (thread, minus_one_ptid);
2997 };
2998
f2faf941 2999 /* Find a resumed LWP, if any. */
83e1b6c1 3000 if (find_thread (status_pending_p_any) != NULL)
f2faf941 3001 any_resumed = 1;
a1385b7b 3002 else if (find_thread (not_stopped) != NULL)
f2faf941
PA
3003 any_resumed = 1;
3004 else
3005 any_resumed = 0;
3006
d7e15655 3007 if (step_over_bkpt == null_ptid)
d16f3f6c 3008 pid = wait_for_event (ptid, &w, options);
6bf5e0ba
PA
3009 else
3010 {
c058728c
SM
3011 threads_debug_printf ("step_over_bkpt set [%s], doing a blocking wait",
3012 target_pid_to_str (step_over_bkpt).c_str ());
d16f3f6c 3013 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
6bf5e0ba
PA
3014 }
3015
f2faf941 3016 if (pid == 0 || (pid == -1 && !any_resumed))
87ce2a04 3017 {
fa96cb38
PA
3018 gdb_assert (target_options & TARGET_WNOHANG);
3019
c058728c 3020 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_IGNORE");
fa96cb38 3021
183be222 3022 ourstatus->set_ignore ();
87ce2a04
DE
3023 return null_ptid;
3024 }
fa96cb38
PA
3025 else if (pid == -1)
3026 {
c058728c 3027 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_NO_RESUMED");
bd99dc85 3028
183be222 3029 ourstatus->set_no_resumed ();
fa96cb38
PA
3030 return null_ptid;
3031 }
0d62e5e8 3032
0bfdf32f 3033 event_child = get_thread_lwp (current_thread);
0d62e5e8 3034
d16f3f6c 3035 /* wait_for_event only returns an exit status for the last
fa96cb38
PA
3036 child of a process. Report it. */
3037 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 3038 {
fa96cb38 3039 if (WIFEXITED (w))
0d62e5e8 3040 {
183be222 3041 ourstatus->set_exited (WEXITSTATUS (w));
bd99dc85 3042
c058728c
SM
3043 threads_debug_printf
3044 ("ret = %s, exited with retcode %d",
3045 target_pid_to_str (ptid_of (current_thread)).c_str (),
3046 WEXITSTATUS (w));
fa96cb38
PA
3047 }
3048 else
3049 {
183be222 3050 ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
5b1c542e 3051
c058728c
SM
3052 threads_debug_printf
3053 ("ret = %s, terminated with signal %d",
3054 target_pid_to_str (ptid_of (current_thread)).c_str (),
3055 WTERMSIG (w));
0d62e5e8 3056 }
fa96cb38 3057
48989498 3058 return filter_exit_event (event_child, ourstatus);
da6d8c04
DJ
3059 }
3060
2d97cd35
AT
3061 /* If step-over executes a breakpoint instruction, in the case of a
3062 hardware single step it means a gdb/gdbserver breakpoint had been
3063 planted on top of a permanent breakpoint, in the case of a software
3064 single step it may just mean that gdbserver hit the reinsert breakpoint.
e7ad2f14 3065 The PC has been adjusted by save_stop_reason to point at
2d97cd35
AT
3066 the breakpoint address.
3067 So in the case of the hardware single step advance the PC manually
3068 past the breakpoint and in the case of software single step advance only
3b9a79ef 3069 if it's not the single_step_breakpoint we are hitting.
2d97cd35
AT
3070 This avoids that a program would keep trapping a permanent breakpoint
3071 forever. */
d7e15655 3072 if (step_over_bkpt != null_ptid
2d97cd35
AT
3073 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3074 && (event_child->stepping
3b9a79ef 3075 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
8090aef2 3076 {
dd373349
AT
3077 int increment_pc = 0;
3078 int breakpoint_kind = 0;
3079 CORE_ADDR stop_pc = event_child->stop_pc;
3080
d16f3f6c
TBA
3081 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3082 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
8090aef2 3083
c058728c
SM
3084 threads_debug_printf
3085 ("step-over for %s executed software breakpoint",
3086 target_pid_to_str (ptid_of (current_thread)).c_str ());
8090aef2
PA
3087
3088 if (increment_pc != 0)
3089 {
3090 struct regcache *regcache
3091 = get_thread_regcache (current_thread, 1);
3092
3093 event_child->stop_pc += increment_pc;
bf9ae9d8 3094 low_set_pc (regcache, event_child->stop_pc);
8090aef2 3095
d7146cda 3096 if (!low_breakpoint_at (event_child->stop_pc))
15c66dd6 3097 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
3098 }
3099 }
3100
6bf5e0ba
PA
3101 /* If this event was not handled before, and is not a SIGTRAP, we
3102 report it. SIGILL and SIGSEGV are also treated as traps in case
3103 a breakpoint is inserted at the current PC. If this target does
3104 not support internal breakpoints at all, we also report the
3105 SIGTRAP without further processing; it's of no concern to us. */
3106 maybe_internal_trap
bf9ae9d8 3107 = (low_supports_breakpoints ()
6bf5e0ba
PA
3108 && (WSTOPSIG (w) == SIGTRAP
3109 || ((WSTOPSIG (w) == SIGILL
3110 || WSTOPSIG (w) == SIGSEGV)
d7146cda 3111 && low_breakpoint_at (event_child->stop_pc))));
6bf5e0ba
PA
3112
3113 if (maybe_internal_trap)
3114 {
3115 /* Handle anything that requires bookkeeping before deciding to
3116 report the event or continue waiting. */
3117
3118 /* First check if we can explain the SIGTRAP with an internal
3119 breakpoint, or if we should possibly report the event to GDB.
3120 Do this before anything that may remove or insert a
3121 breakpoint. */
3122 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3123
3124 /* We have a SIGTRAP, possibly a step-over dance has just
3125 finished. If so, tweak the state machine accordingly,
3b9a79ef
YQ
3126 reinsert breakpoints and delete any single-step
3127 breakpoints. */
6bf5e0ba
PA
3128 step_over_finished = finish_step_over (event_child);
3129
3130 /* Now invoke the callbacks of any internal breakpoints there. */
3131 check_breakpoints (event_child->stop_pc);
3132
219f2f23
PA
3133 /* Handle tracepoint data collecting. This may overflow the
3134 trace buffer, and cause a tracing stop, removing
3135 breakpoints. */
3136 trace_event = handle_tracepoints (event_child);
3137
6bf5e0ba 3138 if (bp_explains_trap)
c058728c 3139 threads_debug_printf ("Hit a gdbserver breakpoint.");
6bf5e0ba
PA
3140 }
3141 else
3142 {
3143 /* We have some other signal, possibly a step-over dance was in
3144 progress, and it should be cancelled too. */
3145 step_over_finished = finish_step_over (event_child);
fa593d66
PA
3146 }
3147
3148 /* We have all the data we need. Either report the event to GDB, or
3149 resume threads and keep waiting for more. */
3150
3151 /* If we're collecting a fast tracepoint, finish the collection and
3152 move out of the jump pad before delivering a signal. See
3153 linux_stabilize_threads. */
3154
3155 if (WIFSTOPPED (w)
3156 && WSTOPSIG (w) != SIGTRAP
3157 && supports_fast_tracepoints ()
58b4daa5 3158 && agent_loaded_p ())
fa593d66 3159 {
c058728c
SM
3160 threads_debug_printf ("Got signal %d for LWP %ld. Check if we need "
3161 "to defer or adjust it.",
3162 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3163
3164 /* Allow debugging the jump pad itself. */
0bfdf32f 3165 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
3166 && maybe_move_out_of_jump_pad (event_child, &w))
3167 {
3168 enqueue_one_deferred_signal (event_child, &w);
3169
c058728c
SM
3170 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad)",
3171 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66 3172
df95181f 3173 resume_one_lwp (event_child, 0, 0, NULL);
582511be
PA
3174
3175 return ignore_event (ourstatus);
fa593d66
PA
3176 }
3177 }
219f2f23 3178
229d26fc
SM
3179 if (event_child->collecting_fast_tracepoint
3180 != fast_tpoint_collect_result::not_collecting)
fa593d66 3181 {
c058728c
SM
3182 threads_debug_printf
3183 ("LWP %ld was trying to move out of the jump pad (%d). "
3184 "Check if we're already there.",
3185 lwpid_of (current_thread),
3186 (int) event_child->collecting_fast_tracepoint);
fa593d66
PA
3187
3188 trace_event = 1;
3189
3190 event_child->collecting_fast_tracepoint
3191 = linux_fast_tracepoint_collecting (event_child, NULL);
3192
229d26fc
SM
3193 if (event_child->collecting_fast_tracepoint
3194 != fast_tpoint_collect_result::before_insn)
fa593d66
PA
3195 {
3196 /* No longer need this breakpoint. */
3197 if (event_child->exit_jump_pad_bkpt != NULL)
3198 {
c058728c
SM
3199 threads_debug_printf
3200 ("No longer need exit-jump-pad bkpt; removing it."
3201 "stopping all threads momentarily.");
fa593d66
PA
3202
3203 /* Other running threads could hit this breakpoint.
3204 We don't handle moribund locations like GDB does,
3205 instead we always pause all threads when removing
3206 breakpoints, so that any step-over or
3207 decr_pc_after_break adjustment is always taken
3208 care of while the breakpoint is still
3209 inserted. */
3210 stop_all_lwps (1, event_child);
fa593d66
PA
3211
3212 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3213 event_child->exit_jump_pad_bkpt = NULL;
3214
3215 unstop_all_lwps (1, event_child);
3216
3217 gdb_assert (event_child->suspended >= 0);
3218 }
3219 }
3220
229d26fc
SM
3221 if (event_child->collecting_fast_tracepoint
3222 == fast_tpoint_collect_result::not_collecting)
fa593d66 3223 {
c058728c
SM
3224 threads_debug_printf
3225 ("fast tracepoint finished collecting successfully.");
fa593d66
PA
3226
3227 /* We may have a deferred signal to report. */
3228 if (dequeue_one_deferred_signal (event_child, &w))
c058728c 3229 threads_debug_printf ("dequeued one signal.");
3c11dd79 3230 else
fa593d66 3231 {
c058728c 3232 threads_debug_printf ("no deferred signals.");
fa593d66
PA
3233
3234 if (stabilizing_threads)
3235 {
183be222 3236 ourstatus->set_stopped (GDB_SIGNAL_0);
87ce2a04 3237
c058728c
SM
3238 threads_debug_printf
3239 ("ret = %s, stopped while stabilizing threads",
3240 target_pid_to_str (ptid_of (current_thread)).c_str ());
87ce2a04 3241
0bfdf32f 3242 return ptid_of (current_thread);
fa593d66
PA
3243 }
3244 }
3245 }
6bf5e0ba
PA
3246 }
3247
e471f25b
PA
3248 /* Check whether GDB would be interested in this event. */
3249
82075af2
JS
3250 /* Check if GDB is interested in this syscall. */
3251 if (WIFSTOPPED (w)
3252 && WSTOPSIG (w) == SYSCALL_SIGTRAP
9eedd27d 3253 && !gdb_catch_this_syscall (event_child))
82075af2 3254 {
c058728c
SM
3255 threads_debug_printf ("Ignored syscall for LWP %ld.",
3256 lwpid_of (current_thread));
82075af2 3257
df95181f 3258 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
edeeb602 3259
82075af2
JS
3260 return ignore_event (ourstatus);
3261 }
3262
e471f25b
PA
3263 /* If GDB is not interested in this signal, don't stop other
3264 threads, and don't report it to GDB. Just resume the inferior
3265 right away. We do this for threading-related signals as well as
3266 any that GDB specifically requested we ignore. But never ignore
3267 SIGSTOP if we sent it ourselves, and do not ignore signals when
3268 stepping - they may require special handling to skip the signal
c9587f88
AT
3269 handler. Also never ignore signals that could be caused by a
3270 breakpoint. */
e471f25b 3271 if (WIFSTOPPED (w)
0bfdf32f 3272 && current_thread->last_resume_kind != resume_step
e471f25b 3273 && (
1a981360 3274#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 3275 (current_process ()->priv->thread_db != NULL
e471f25b
PA
3276 && (WSTOPSIG (w) == __SIGRTMIN
3277 || WSTOPSIG (w) == __SIGRTMIN + 1))
3278 ||
3279#endif
c12a5089 3280 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 3281 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
3282 && current_thread->last_resume_kind == resume_stop)
3283 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
3284 {
3285 siginfo_t info, *info_p;
3286
c058728c
SM
3287 threads_debug_printf ("Ignored signal %d for LWP %ld.",
3288 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 3289
0bfdf32f 3290 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 3291 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
3292 info_p = &info;
3293 else
3294 info_p = NULL;
863d01bd
PA
3295
3296 if (step_over_finished)
3297 {
3298 /* We cancelled this thread's step-over above. We still
3299 need to unsuspend all other LWPs, and set them back
3300 running again while the signal handler runs. */
3301 unsuspend_all_lwps (event_child);
3302
3303 /* Enqueue the pending signal info so that proceed_all_lwps
3304 doesn't lose it. */
3305 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3306
3307 proceed_all_lwps ();
3308 }
3309 else
3310 {
df95181f
TBA
3311 resume_one_lwp (event_child, event_child->stepping,
3312 WSTOPSIG (w), info_p);
863d01bd 3313 }
edeeb602 3314
582511be 3315 return ignore_event (ourstatus);
e471f25b
PA
3316 }
3317
c2d6af84
PA
3318 /* Note that all addresses are always "out of the step range" when
3319 there's no range to begin with. */
3320 in_step_range = lwp_in_step_range (event_child);
3321
3322 /* If GDB wanted this thread to single step, and the thread is out
3323 of the step range, we always want to report the SIGTRAP, and let
3324 GDB handle it. Watchpoints should always be reported. So should
3325 signals we can't explain. A SIGTRAP we can't explain could be a
3326 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3327 do, we're be able to handle GDB breakpoints on top of internal
3328 breakpoints, by handling the internal breakpoint and still
3329 reporting the event to GDB. If we don't, we're out of luck, GDB
863d01bd
PA
3330 won't see the breakpoint hit. If we see a single-step event but
3331 the thread should be continuing, don't pass the trap to gdb.
3332 That indicates that we had previously finished a single-step but
3333 left the single-step pending -- see
3334 complete_ongoing_step_over. */
6bf5e0ba 3335 report_to_gdb = (!maybe_internal_trap
0bfdf32f 3336 || (current_thread->last_resume_kind == resume_step
c2d6af84 3337 && !in_step_range)
15c66dd6 3338 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
863d01bd
PA
3339 || (!in_step_range
3340 && !bp_explains_trap
3341 && !trace_event
3342 && !step_over_finished
3343 && !(current_thread->last_resume_kind == resume_continue
3344 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
9f3a5c85 3345 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5 3346 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
de0d863e 3347 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
183be222 3348 || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
d3ce09f5
SS
3349
3350 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
3351
3352 /* We found no reason GDB would want us to stop. We either hit one
3353 of our own breakpoints, or finished an internal step GDB
3354 shouldn't know about. */
3355 if (!report_to_gdb)
3356 {
c058728c
SM
3357 if (bp_explains_trap)
3358 threads_debug_printf ("Hit a gdbserver breakpoint.");
3359
3360 if (step_over_finished)
3361 threads_debug_printf ("Step-over finished.");
3362
3363 if (trace_event)
3364 threads_debug_printf ("Tracepoint event.");
3365
3366 if (lwp_in_step_range (event_child))
3367 threads_debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).",
3368 paddress (event_child->stop_pc),
3369 paddress (event_child->step_range_start),
3370 paddress (event_child->step_range_end));
6bf5e0ba
PA
3371
3372 /* We're not reporting this breakpoint to GDB, so apply the
3373 decr_pc_after_break adjustment to the inferior's regcache
3374 ourselves. */
3375
bf9ae9d8 3376 if (low_supports_breakpoints ())
6bf5e0ba
PA
3377 {
3378 struct regcache *regcache
0bfdf32f 3379 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3380 low_set_pc (regcache, event_child->stop_pc);
6bf5e0ba
PA
3381 }
3382
7984d532 3383 if (step_over_finished)
e3652c84
YQ
3384 {
3385 /* If we have finished stepping over a breakpoint, we've
3386 stopped and suspended all LWPs momentarily except the
3387 stepping one. This is where we resume them all again.
3388 We're going to keep waiting, so use proceed, which
3389 handles stepping over the next breakpoint. */
3390 unsuspend_all_lwps (event_child);
3391 }
3392 else
3393 {
3394 /* Remove the single-step breakpoints if any. Note that
3395 there isn't single-step breakpoint if we finished stepping
3396 over. */
7582c77c 3397 if (supports_software_single_step ()
e3652c84
YQ
3398 && has_single_step_breakpoints (current_thread))
3399 {
3400 stop_all_lwps (0, event_child);
3401 delete_single_step_breakpoints (current_thread);
3402 unstop_all_lwps (0, event_child);
3403 }
3404 }
7984d532 3405
c058728c 3406 threads_debug_printf ("proceeding all threads.");
edeeb602 3407
c058728c 3408 proceed_all_lwps ();
edeeb602 3409
582511be 3410 return ignore_event (ourstatus);
6bf5e0ba
PA
3411 }
3412
c058728c
SM
3413 if (debug_threads)
3414 {
3415 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3416 threads_debug_printf ("LWP %ld: extended event with waitstatus %s",
3417 lwpid_of (get_lwp_thread (event_child)),
3418 event_child->waitstatus.to_string ().c_str ());
3419
3420 if (current_thread->last_resume_kind == resume_step)
3421 {
3422 if (event_child->step_range_start == event_child->step_range_end)
3423 threads_debug_printf
3424 ("GDB wanted to single-step, reporting event.");
3425 else if (!lwp_in_step_range (event_child))
3426 threads_debug_printf ("Out of step range, reporting event.");
3427 }
3428
3429 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3430 threads_debug_printf ("Stopped by watchpoint.");
3431 else if (gdb_breakpoint_here (event_child->stop_pc))
3432 threads_debug_printf ("Stopped by GDB breakpoint.");
3433 }
3434
3435 threads_debug_printf ("Hit a non-gdbserver trap event.");
6bf5e0ba
PA
3436
3437 /* Alright, we're going to report a stop. */
3438
3b9a79ef 3439 /* Remove single-step breakpoints. */
7582c77c 3440 if (supports_software_single_step ())
8901d193 3441 {
3b9a79ef 3442 /* Remove single-step breakpoints or not. It it is true, stop all
8901d193
YQ
3443 lwps, so that other threads won't hit the breakpoint in the
3444 staled memory. */
3b9a79ef 3445 int remove_single_step_breakpoints_p = 0;
8901d193
YQ
3446
3447 if (non_stop)
3448 {
3b9a79ef
YQ
3449 remove_single_step_breakpoints_p
3450 = has_single_step_breakpoints (current_thread);
8901d193
YQ
3451 }
3452 else
3453 {
3454 /* In all-stop, a stop reply cancels all previous resume
3b9a79ef 3455 requests. Delete all single-step breakpoints. */
8901d193 3456
9c80ecd6
SM
3457 find_thread ([&] (thread_info *thread) {
3458 if (has_single_step_breakpoints (thread))
3459 {
3460 remove_single_step_breakpoints_p = 1;
3461 return true;
3462 }
8901d193 3463
9c80ecd6
SM
3464 return false;
3465 });
8901d193
YQ
3466 }
3467
3b9a79ef 3468 if (remove_single_step_breakpoints_p)
8901d193 3469 {
3b9a79ef 3470 /* If we remove single-step breakpoints from memory, stop all lwps,
8901d193
YQ
3471 so that other threads won't hit the breakpoint in the staled
3472 memory. */
3473 stop_all_lwps (0, event_child);
3474
3475 if (non_stop)
3476 {
3b9a79ef
YQ
3477 gdb_assert (has_single_step_breakpoints (current_thread));
3478 delete_single_step_breakpoints (current_thread);
8901d193
YQ
3479 }
3480 else
3481 {
9c80ecd6
SM
3482 for_each_thread ([] (thread_info *thread){
3483 if (has_single_step_breakpoints (thread))
3484 delete_single_step_breakpoints (thread);
3485 });
8901d193
YQ
3486 }
3487
3488 unstop_all_lwps (0, event_child);
3489 }
3490 }
3491
582511be 3492 if (!stabilizing_threads)
6bf5e0ba
PA
3493 {
3494 /* In all-stop, stop all threads. */
582511be
PA
3495 if (!non_stop)
3496 stop_all_lwps (0, NULL);
6bf5e0ba 3497
c03e6ccc 3498 if (step_over_finished)
582511be
PA
3499 {
3500 if (!non_stop)
3501 {
3502 /* If we were doing a step-over, all other threads but
3503 the stepping one had been paused in start_step_over,
3504 with their suspend counts incremented. We don't want
3505 to do a full unstop/unpause, because we're in
3506 all-stop mode (so we want threads stopped), but we
3507 still need to unsuspend the other threads, to
3508 decrement their `suspended' count back. */
3509 unsuspend_all_lwps (event_child);
3510 }
3511 else
3512 {
3513 /* If we just finished a step-over, then all threads had
3514 been momentarily paused. In all-stop, that's fine,
3515 we want threads stopped by now anyway. In non-stop,
3516 we need to re-resume threads that GDB wanted to be
3517 running. */
3518 unstop_all_lwps (1, event_child);
3519 }
3520 }
c03e6ccc 3521
3aa5cfa0
AT
3522 /* If we're not waiting for a specific LWP, choose an event LWP
3523 from among those that have had events. Giving equal priority
3524 to all LWPs that have had events helps prevent
3525 starvation. */
d7e15655 3526 if (ptid == minus_one_ptid)
3aa5cfa0
AT
3527 {
3528 event_child->status_pending_p = 1;
3529 event_child->status_pending = w;
3530
3531 select_event_lwp (&event_child);
3532
3533 /* current_thread and event_child must stay in sync. */
24583e45 3534 switch_to_thread (get_lwp_thread (event_child));
3aa5cfa0
AT
3535
3536 event_child->status_pending_p = 0;
3537 w = event_child->status_pending;
3538 }
3539
3540
fa593d66 3541 /* Stabilize threads (move out of jump pads). */
582511be 3542 if (!non_stop)
5c9eb2f2 3543 target_stabilize_threads ();
6bf5e0ba
PA
3544 }
3545 else
3546 {
3547 /* If we just finished a step-over, then all threads had been
3548 momentarily paused. In all-stop, that's fine, we want
3549 threads stopped by now anyway. In non-stop, we need to
3550 re-resume threads that GDB wanted to be running. */
3551 if (step_over_finished)
7984d532 3552 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3553 }
3554
e88cf517
SM
3555 /* At this point, we haven't set OURSTATUS. This is where we do it. */
3556 gdb_assert (ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3557
183be222 3558 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
de0d863e 3559 {
393a6b59
PA
3560 /* If the reported event is an exit, fork, vfork, clone or exec,
3561 let GDB know. */
5a04c4cf 3562
393a6b59
PA
3563 /* Break the unreported fork/vfork/clone relationship chain. */
3564 if (is_new_child_status (event_child->waitstatus.kind ()))
5a04c4cf 3565 {
393a6b59
PA
3566 event_child->relative->relative = NULL;
3567 event_child->relative = NULL;
5a04c4cf
PA
3568 }
3569
00db26fa 3570 *ourstatus = event_child->waitstatus;
de0d863e 3571 /* Clear the event lwp's waitstatus since we handled it already. */
183be222 3572 event_child->waitstatus.set_ignore ();
de0d863e
DB
3573 }
3574 else
183be222 3575 {
e88cf517 3576 /* The LWP stopped due to a plain signal or a syscall signal. Either way,
3bfdcabb 3577 event_child->waitstatus wasn't filled in with the details, so look at
e88cf517
SM
3578 the wait status W. */
3579 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3580 {
3581 int syscall_number;
3582
3583 get_syscall_trapinfo (event_child, &syscall_number);
3584 if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3585 ourstatus->set_syscall_entry (syscall_number);
3586 else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3587 ourstatus->set_syscall_return (syscall_number);
3588 else
3589 gdb_assert_not_reached ("unexpected syscall state");
3590 }
3591 else if (current_thread->last_resume_kind == resume_stop
3592 && WSTOPSIG (w) == SIGSTOP)
3593 {
3594 /* A thread that has been requested to stop by GDB with vCont;t,
3595 and it stopped cleanly, so report as SIG0. The use of
3596 SIGSTOP is an implementation detail. */
3597 ourstatus->set_stopped (GDB_SIGNAL_0);
3598 }
3599 else
3600 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
183be222 3601 }
5b1c542e 3602
582511be 3603 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3604 it was a software breakpoint, and the client doesn't know we can
3605 adjust the breakpoint ourselves. */
3606 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
c12a5089 3607 && !cs.swbreak_feature)
582511be 3608 {
d4807ea2 3609 int decr_pc = low_decr_pc_after_break ();
582511be
PA
3610
3611 if (decr_pc != 0)
3612 {
3613 struct regcache *regcache
3614 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3615 low_set_pc (regcache, event_child->stop_pc + decr_pc);
582511be
PA
3616 }
3617 }
3618
d7e15655 3619 gdb_assert (step_over_bkpt == null_ptid);
d50171e4 3620
e48359ea 3621 threads_debug_printf ("ret = %s, %s",
c058728c 3622 target_pid_to_str (ptid_of (current_thread)).c_str (),
e48359ea 3623 ourstatus->to_string ().c_str ());
bd99dc85 3624
48989498 3625 return filter_exit_event (event_child, ourstatus);
bd99dc85
PA
3626}
3627
3628/* Get rid of any pending event in the pipe. */
3629static void
3630async_file_flush (void)
3631{
cdc8e9b2 3632 linux_event_pipe.flush ();
bd99dc85
PA
3633}
3634
3635/* Put something in the pipe, so the event loop wakes up. */
3636static void
3637async_file_mark (void)
3638{
cdc8e9b2 3639 linux_event_pipe.mark ();
bd99dc85
PA
3640}
3641
6532e7e3
TBA
3642ptid_t
3643linux_process_target::wait (ptid_t ptid,
3644 target_waitstatus *ourstatus,
b60cea74 3645 target_wait_flags target_options)
bd99dc85 3646{
95954743 3647 ptid_t event_ptid;
bd99dc85 3648
bd99dc85
PA
3649 /* Flush the async file first. */
3650 if (target_is_async_p ())
3651 async_file_flush ();
3652
582511be
PA
3653 do
3654 {
d16f3f6c 3655 event_ptid = wait_1 (ptid, ourstatus, target_options);
582511be
PA
3656 }
3657 while ((target_options & TARGET_WNOHANG) == 0
183be222 3658 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3659
3660 /* If at least one stop was reported, there may be more. A single
3661 SIGCHLD can signal more than one child stop. */
3662 if (target_is_async_p ()
3663 && (target_options & TARGET_WNOHANG) != 0
d7e15655 3664 && event_ptid != null_ptid)
bd99dc85
PA
3665 async_file_mark ();
3666
3667 return event_ptid;
da6d8c04
DJ
3668}
3669
c5f62d5f 3670/* Send a signal to an LWP. */
fd500816
DJ
3671
3672static int
a1928bad 3673kill_lwp (unsigned long lwpid, int signo)
fd500816 3674{
4a6ed09b 3675 int ret;
fd500816 3676
4a6ed09b
PA
3677 errno = 0;
3678 ret = syscall (__NR_tkill, lwpid, signo);
3679 if (errno == ENOSYS)
3680 {
3681 /* If tkill fails, then we are not using nptl threads, a
3682 configuration we no longer support. */
3683 perror_with_name (("tkill"));
3684 }
3685 return ret;
fd500816
DJ
3686}
3687
964e4306
PA
3688void
3689linux_stop_lwp (struct lwp_info *lwp)
3690{
3691 send_sigstop (lwp);
3692}
3693
0d62e5e8 3694static void
02fc4de7 3695send_sigstop (struct lwp_info *lwp)
0d62e5e8 3696{
bd99dc85 3697 int pid;
0d62e5e8 3698
d86d4aaf 3699 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3700
0d62e5e8
DJ
3701 /* If we already have a pending stop signal for this process, don't
3702 send another. */
54a0b537 3703 if (lwp->stop_expected)
0d62e5e8 3704 {
c058728c 3705 threads_debug_printf ("Have pending sigstop for lwp %d", pid);
ae13219e 3706
0d62e5e8
DJ
3707 return;
3708 }
3709
c058728c 3710 threads_debug_printf ("Sending sigstop to lwp %d", pid);
0d62e5e8 3711
d50171e4 3712 lwp->stop_expected = 1;
bd99dc85 3713 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3714}
3715
df3e4dbe
SM
3716static void
3717send_sigstop (thread_info *thread, lwp_info *except)
02fc4de7 3718{
d86d4aaf 3719 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3720
7984d532
PA
3721 /* Ignore EXCEPT. */
3722 if (lwp == except)
df3e4dbe 3723 return;
7984d532 3724
02fc4de7 3725 if (lwp->stopped)
df3e4dbe 3726 return;
02fc4de7
PA
3727
3728 send_sigstop (lwp);
7984d532
PA
3729}
3730
3731/* Increment the suspend count of an LWP, and stop it, if not stopped
3732 yet. */
df3e4dbe
SM
3733static void
3734suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
7984d532 3735{
d86d4aaf 3736 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3737
3738 /* Ignore EXCEPT. */
3739 if (lwp == except)
df3e4dbe 3740 return;
7984d532 3741
863d01bd 3742 lwp_suspended_inc (lwp);
7984d532 3743
df3e4dbe 3744 send_sigstop (thread, except);
02fc4de7
PA
3745}
3746
95954743
PA
3747static void
3748mark_lwp_dead (struct lwp_info *lwp, int wstat)
3749{
95954743
PA
3750 /* Store the exit status for later. */
3751 lwp->status_pending_p = 1;
3752 lwp->status_pending = wstat;
3753
00db26fa
PA
3754 /* Store in waitstatus as well, as there's nothing else to process
3755 for this event. */
3756 if (WIFEXITED (wstat))
183be222 3757 lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
00db26fa 3758 else if (WIFSIGNALED (wstat))
183be222 3759 lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
00db26fa 3760
95954743
PA
3761 /* Prevent trying to stop it. */
3762 lwp->stopped = 1;
3763
3764 /* No further stops are expected from a dead lwp. */
3765 lwp->stop_expected = 0;
3766}
3767
00db26fa
PA
3768/* Return true if LWP has exited already, and has a pending exit event
3769 to report to GDB. */
3770
3771static int
3772lwp_is_marked_dead (struct lwp_info *lwp)
3773{
3774 return (lwp->status_pending_p
3775 && (WIFEXITED (lwp->status_pending)
3776 || WIFSIGNALED (lwp->status_pending)));
3777}
3778
d16f3f6c
TBA
3779void
3780linux_process_target::wait_for_sigstop ()
0d62e5e8 3781{
0bfdf32f 3782 struct thread_info *saved_thread;
95954743 3783 ptid_t saved_tid;
fa96cb38
PA
3784 int wstat;
3785 int ret;
0d62e5e8 3786
0bfdf32f
GB
3787 saved_thread = current_thread;
3788 if (saved_thread != NULL)
9c80ecd6 3789 saved_tid = saved_thread->id;
bd99dc85 3790 else
95954743 3791 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3792
20ac1cdb
TBA
3793 scoped_restore_current_thread restore_thread;
3794
c058728c 3795 threads_debug_printf ("pulling events");
d50171e4 3796
fa96cb38
PA
3797 /* Passing NULL_PTID as filter indicates we want all events to be
3798 left pending. Eventually this returns when there are no
3799 unwaited-for children left. */
d16f3f6c 3800 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
fa96cb38 3801 gdb_assert (ret == -1);
0d62e5e8 3802
13d3d99b 3803 if (saved_thread == NULL || mythread_alive (saved_tid))
20ac1cdb 3804 return;
0d62e5e8
DJ
3805 else
3806 {
c058728c 3807 threads_debug_printf ("Previously current thread died.");
0d62e5e8 3808
f0db101d
PA
3809 /* We can't change the current inferior behind GDB's back,
3810 otherwise, a subsequent command may apply to the wrong
3811 process. */
20ac1cdb
TBA
3812 restore_thread.dont_restore ();
3813 switch_to_thread (nullptr);
0d62e5e8
DJ
3814 }
3815}
3816
13e567af
TBA
3817bool
3818linux_process_target::stuck_in_jump_pad (thread_info *thread)
fa593d66 3819{
d86d4aaf 3820 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3821
863d01bd
PA
3822 if (lwp->suspended != 0)
3823 {
f34652de 3824 internal_error ("LWP %ld is suspended, suspended=%d\n",
863d01bd
PA
3825 lwpid_of (thread), lwp->suspended);
3826 }
fa593d66
PA
3827 gdb_assert (lwp->stopped);
3828
3829 /* Allow debugging the jump pad, gdb_collect, etc.. */
3830 return (supports_fast_tracepoints ()
58b4daa5 3831 && agent_loaded_p ()
fa593d66 3832 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3833 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66 3834 || thread->last_resume_kind == resume_step)
229d26fc
SM
3835 && (linux_fast_tracepoint_collecting (lwp, NULL)
3836 != fast_tpoint_collect_result::not_collecting));
fa593d66
PA
3837}
3838
d16f3f6c
TBA
3839void
3840linux_process_target::move_out_of_jump_pad (thread_info *thread)
fa593d66 3841{
d86d4aaf 3842 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3843 int *wstat;
3844
863d01bd
PA
3845 if (lwp->suspended != 0)
3846 {
f34652de 3847 internal_error ("LWP %ld is suspended, suspended=%d\n",
863d01bd
PA
3848 lwpid_of (thread), lwp->suspended);
3849 }
fa593d66
PA
3850 gdb_assert (lwp->stopped);
3851
f0ce0d3a 3852 /* For gdb_breakpoint_here. */
24583e45
TBA
3853 scoped_restore_current_thread restore_thread;
3854 switch_to_thread (thread);
f0ce0d3a 3855
fa593d66
PA
3856 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3857
3858 /* Allow debugging the jump pad, gdb_collect, etc. */
3859 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3860 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3861 && thread->last_resume_kind != resume_step
3862 && maybe_move_out_of_jump_pad (lwp, wstat))
3863 {
c058728c
SM
3864 threads_debug_printf ("LWP %ld needs stabilizing (in jump pad)",
3865 lwpid_of (thread));
fa593d66
PA
3866
3867 if (wstat)
3868 {
3869 lwp->status_pending_p = 0;
3870 enqueue_one_deferred_signal (lwp, wstat);
3871
c058728c
SM
3872 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad",
3873 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
3874 }
3875
df95181f 3876 resume_one_lwp (lwp, 0, 0, NULL);
fa593d66
PA
3877 }
3878 else
863d01bd 3879 lwp_suspended_inc (lwp);
fa593d66
PA
3880}
3881
5a6b0a41
SM
3882static bool
3883lwp_running (thread_info *thread)
fa593d66 3884{
d86d4aaf 3885 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3886
00db26fa 3887 if (lwp_is_marked_dead (lwp))
5a6b0a41
SM
3888 return false;
3889
3890 return !lwp->stopped;
fa593d66
PA
3891}
3892
d16f3f6c
TBA
3893void
3894linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
0d62e5e8 3895{
bde24c0a
PA
3896 /* Should not be called recursively. */
3897 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3898
c058728c
SM
3899 THREADS_SCOPED_DEBUG_ENTER_EXIT;
3900
3901 threads_debug_printf
3902 ("%s, except=%s", suspend ? "stop-and-suspend" : "stop",
3903 (except != NULL
3904 ? target_pid_to_str (ptid_of (get_lwp_thread (except))).c_str ()
3905 : "none"));
87ce2a04 3906
bde24c0a
PA
3907 stopping_threads = (suspend
3908 ? STOPPING_AND_SUSPENDING_THREADS
3909 : STOPPING_THREADS);
7984d532
PA
3910
3911 if (suspend)
df3e4dbe
SM
3912 for_each_thread ([&] (thread_info *thread)
3913 {
3914 suspend_and_send_sigstop (thread, except);
3915 });
7984d532 3916 else
df3e4dbe
SM
3917 for_each_thread ([&] (thread_info *thread)
3918 {
3919 send_sigstop (thread, except);
3920 });
3921
fa96cb38 3922 wait_for_sigstop ();
bde24c0a 3923 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04 3924
c058728c 3925 threads_debug_printf ("setting stopping_threads back to !stopping");
0d62e5e8
DJ
3926}
3927
863d01bd
PA
3928/* Enqueue one signal in the chain of signals which need to be
3929 delivered to this process on next resume. */
3930
3931static void
3932enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3933{
013e3554
TBA
3934 lwp->pending_signals.emplace_back (signal);
3935 if (info == nullptr)
3936 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
863d01bd 3937 else
013e3554 3938 lwp->pending_signals.back ().info = *info;
863d01bd
PA
3939}
3940
df95181f
TBA
3941void
3942linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
fa5308bd 3943{
984a2c04
YQ
3944 struct thread_info *thread = get_lwp_thread (lwp);
3945 struct regcache *regcache = get_thread_regcache (thread, 1);
8ce47547 3946
24583e45 3947 scoped_restore_current_thread restore_thread;
984a2c04 3948
24583e45 3949 switch_to_thread (thread);
7582c77c 3950 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
fa5308bd 3951
a0ff9e1a 3952 for (CORE_ADDR pc : next_pcs)
3b9a79ef 3953 set_single_step_breakpoint (pc, current_ptid);
fa5308bd
AT
3954}
3955
df95181f
TBA
3956int
3957linux_process_target::single_step (lwp_info* lwp)
7fe5e27e
AT
3958{
3959 int step = 0;
3960
b31cdfa6 3961 if (supports_hardware_single_step ())
7fe5e27e
AT
3962 {
3963 step = 1;
3964 }
7582c77c 3965 else if (supports_software_single_step ())
7fe5e27e
AT
3966 {
3967 install_software_single_step_breakpoints (lwp);
3968 step = 0;
3969 }
3970 else
c058728c 3971 threads_debug_printf ("stepping is not implemented on this target");
7fe5e27e
AT
3972
3973 return step;
3974}
3975
35ac8b3e 3976/* The signal can be delivered to the inferior if we are not trying to
5b061e98
YQ
3977 finish a fast tracepoint collect. Since signal can be delivered in
3978 the step-over, the program may go to signal handler and trap again
3979 after return from the signal handler. We can live with the spurious
3980 double traps. */
35ac8b3e
YQ
3981
3982static int
3983lwp_signal_can_be_delivered (struct lwp_info *lwp)
3984{
229d26fc
SM
3985 return (lwp->collecting_fast_tracepoint
3986 == fast_tpoint_collect_result::not_collecting);
35ac8b3e
YQ
3987}
3988
df95181f
TBA
3989void
3990linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
3991 int signal, siginfo_t *info)
da6d8c04 3992{
d86d4aaf 3993 struct thread_info *thread = get_lwp_thread (lwp);
82075af2 3994 int ptrace_request;
c06cbd92
YQ
3995 struct process_info *proc = get_thread_process (thread);
3996
3997 /* Note that target description may not be initialised
3998 (proc->tdesc == NULL) at this point because the program hasn't
3999 stopped at the first instruction yet. It means GDBserver skips
4000 the extra traps from the wrapper program (see option --wrapper).
4001 Code in this function that requires register access should be
4002 guarded by proc->tdesc == NULL or something else. */
0d62e5e8 4003
54a0b537 4004 if (lwp->stopped == 0)
0d62e5e8
DJ
4005 return;
4006
183be222 4007 gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
65706a29 4008
229d26fc
SM
4009 fast_tpoint_collect_result fast_tp_collecting
4010 = lwp->collecting_fast_tracepoint;
fa593d66 4011
229d26fc
SM
4012 gdb_assert (!stabilizing_threads
4013 || (fast_tp_collecting
4014 != fast_tpoint_collect_result::not_collecting));
fa593d66 4015
219f2f23
PA
4016 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4017 user used the "jump" command, or "set $pc = foo"). */
c06cbd92 4018 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
219f2f23
PA
4019 {
4020 /* Collecting 'while-stepping' actions doesn't make sense
4021 anymore. */
d86d4aaf 4022 release_while_stepping_state_list (thread);
219f2f23
PA
4023 }
4024
0d62e5e8 4025 /* If we have pending signals or status, and a new signal, enqueue the
35ac8b3e
YQ
4026 signal. Also enqueue the signal if it can't be delivered to the
4027 inferior right now. */
0d62e5e8 4028 if (signal != 0
fa593d66 4029 && (lwp->status_pending_p
013e3554 4030 || !lwp->pending_signals.empty ()
35ac8b3e 4031 || !lwp_signal_can_be_delivered (lwp)))
94610ec4
YQ
4032 {
4033 enqueue_pending_signal (lwp, signal, info);
4034
4035 /* Postpone any pending signal. It was enqueued above. */
4036 signal = 0;
4037 }
0d62e5e8 4038
d50171e4
PA
4039 if (lwp->status_pending_p)
4040 {
c058728c
SM
4041 threads_debug_printf
4042 ("Not resuming lwp %ld (%s, stop %s); has pending status",
4043 lwpid_of (thread), step ? "step" : "continue",
4044 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
4045 return;
4046 }
0d62e5e8 4047
24583e45
TBA
4048 scoped_restore_current_thread restore_thread;
4049 switch_to_thread (thread);
0d62e5e8 4050
0d62e5e8
DJ
4051 /* This bit needs some thinking about. If we get a signal that
4052 we must report while a single-step reinsert is still pending,
4053 we often end up resuming the thread. It might be better to
4054 (ew) allow a stack of pending events; then we could be sure that
4055 the reinsert happened right away and not lose any signals.
4056
4057 Making this stack would also shrink the window in which breakpoints are
54a0b537 4058 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
4059 complete correctness, so it won't solve that problem. It may be
4060 worthwhile just to solve this one, however. */
54a0b537 4061 if (lwp->bp_reinsert != 0)
0d62e5e8 4062 {
c058728c
SM
4063 threads_debug_printf (" pending reinsert at 0x%s",
4064 paddress (lwp->bp_reinsert));
d50171e4 4065
b31cdfa6 4066 if (supports_hardware_single_step ())
d50171e4 4067 {
229d26fc 4068 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
4069 {
4070 if (step == 0)
9986ba08 4071 warning ("BAD - reinserting but not stepping.");
fa593d66 4072 if (lwp->suspended)
9986ba08
PA
4073 warning ("BAD - reinserting and suspended(%d).",
4074 lwp->suspended);
fa593d66 4075 }
d50171e4 4076 }
f79b145d
YQ
4077
4078 step = maybe_hw_step (thread);
0d62e5e8
DJ
4079 }
4080
229d26fc 4081 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
c058728c
SM
4082 threads_debug_printf
4083 ("lwp %ld wants to get out of fast tracepoint jump pad "
4084 "(exit-jump-pad-bkpt)", lwpid_of (thread));
4085
229d26fc 4086 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
fa593d66 4087 {
c058728c
SM
4088 threads_debug_printf
4089 ("lwp %ld wants to get out of fast tracepoint jump pad single-stepping",
4090 lwpid_of (thread));
fa593d66 4091
b31cdfa6 4092 if (supports_hardware_single_step ())
fa593d66
PA
4093 step = 1;
4094 else
38e08fca 4095 {
f34652de 4096 internal_error ("moving out of jump pad single-stepping"
38e08fca
GB
4097 " not implemented on this target");
4098 }
fa593d66
PA
4099 }
4100
219f2f23
PA
4101 /* If we have while-stepping actions in this thread set it stepping.
4102 If we have a signal to deliver, it may or may not be set to
4103 SIG_IGN, we don't know. Assume so, and allow collecting
4104 while-stepping into a signal handler. A possible smart thing to
4105 do would be to set an internal breakpoint at the signal return
4106 address, continue, and carry on catching this while-stepping
4107 action only when that breakpoint is hit. A future
4108 enhancement. */
7fe5e27e 4109 if (thread->while_stepping != NULL)
219f2f23 4110 {
c058728c
SM
4111 threads_debug_printf
4112 ("lwp %ld has a while-stepping action -> forcing step.",
4113 lwpid_of (thread));
7fe5e27e
AT
4114
4115 step = single_step (lwp);
219f2f23
PA
4116 }
4117
bf9ae9d8 4118 if (proc->tdesc != NULL && low_supports_breakpoints ())
0d62e5e8 4119 {
0bfdf32f 4120 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be 4121
bf9ae9d8 4122 lwp->stop_pc = low_get_pc (regcache);
582511be 4123
c058728c
SM
4124 threads_debug_printf (" %s from pc 0x%lx", step ? "step" : "continue",
4125 (long) lwp->stop_pc);
0d62e5e8
DJ
4126 }
4127
35ac8b3e
YQ
4128 /* If we have pending signals, consume one if it can be delivered to
4129 the inferior. */
013e3554 4130 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
0d62e5e8 4131 {
013e3554 4132 const pending_signal &p_sig = lwp->pending_signals.front ();
0d62e5e8 4133
013e3554
TBA
4134 signal = p_sig.signal;
4135 if (p_sig.info.si_signo != 0)
d86d4aaf 4136 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554 4137 &p_sig.info);
32ca6d61 4138
013e3554 4139 lwp->pending_signals.pop_front ();
0d62e5e8
DJ
4140 }
4141
c058728c
SM
4142 threads_debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)",
4143 lwpid_of (thread), step ? "step" : "continue", signal,
4144 lwp->stop_expected ? "expected" : "not expected");
94610ec4 4145
d7599cc0 4146 low_prepare_to_resume (lwp);
aa5ca48f 4147
d86d4aaf 4148 regcache_invalidate_thread (thread);
da6d8c04 4149 errno = 0;
54a0b537 4150 lwp->stepping = step;
82075af2
JS
4151 if (step)
4152 ptrace_request = PTRACE_SINGLESTEP;
4153 else if (gdb_catching_syscalls_p (lwp))
4154 ptrace_request = PTRACE_SYSCALL;
4155 else
4156 ptrace_request = PTRACE_CONT;
4157 ptrace (ptrace_request,
4158 lwpid_of (thread),
b8e1b30e 4159 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
4160 /* Coerce to a uintptr_t first to avoid potential gcc warning
4161 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4162 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 4163
da6d8c04 4164 if (errno)
20471e00
SM
4165 {
4166 int saved_errno = errno;
4167
4168 threads_debug_printf ("ptrace errno = %d (%s)",
4169 saved_errno, strerror (saved_errno));
4170
4171 errno = saved_errno;
4172 perror_with_name ("resuming thread");
4173 }
23f238d3
PA
4174
4175 /* Successfully resumed. Clear state that no longer makes sense,
4176 and mark the LWP as running. Must not do this before resuming
4177 otherwise if that fails other code will be confused. E.g., we'd
4178 later try to stop the LWP and hang forever waiting for a stop
4179 status. Note that we must not throw after this is cleared,
4180 otherwise handle_zombie_lwp_error would get confused. */
4181 lwp->stopped = 0;
4182 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4183}
4184
d7599cc0
TBA
4185void
4186linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4187{
4188 /* Nop. */
4189}
4190
23f238d3
PA
4191/* Called when we try to resume a stopped LWP and that errors out. If
4192 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4193 or about to become), discard the error, clear any pending status
4194 the LWP may have, and return true (we'll collect the exit status
4195 soon enough). Otherwise, return false. */
4196
4197static int
4198check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4199{
4200 struct thread_info *thread = get_lwp_thread (lp);
4201
4202 /* If we get an error after resuming the LWP successfully, we'd
4203 confuse !T state for the LWP being gone. */
4204 gdb_assert (lp->stopped);
4205
4206 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4207 because even if ptrace failed with ESRCH, the tracee may be "not
4208 yet fully dead", but already refusing ptrace requests. In that
4209 case the tracee has 'R (Running)' state for a little bit
4210 (observed in Linux 3.18). See also the note on ESRCH in the
4211 ptrace(2) man page. Instead, check whether the LWP has any state
4212 other than ptrace-stopped. */
4213
4214 /* Don't assume anything if /proc/PID/status can't be read. */
4215 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 4216 {
23f238d3
PA
4217 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4218 lp->status_pending_p = 0;
4219 return 1;
4220 }
4221 return 0;
4222}
4223
df95181f
TBA
4224void
4225linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4226 siginfo_t *info)
23f238d3 4227{
a70b8144 4228 try
23f238d3 4229 {
df95181f 4230 resume_one_lwp_throw (lwp, step, signal, info);
23f238d3 4231 }
230d2906 4232 catch (const gdb_exception_error &ex)
23f238d3 4233 {
20471e00
SM
4234 if (check_ptrace_stopped_lwp_gone (lwp))
4235 {
4236 /* This could because we tried to resume an LWP after its leader
4237 exited. Mark it as resumed, so we can collect an exit event
4238 from it. */
4239 lwp->stopped = 0;
4240 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4241 }
4242 else
eedc3f4f 4243 throw;
3221518c 4244 }
da6d8c04
DJ
4245}
4246
5fdda392
SM
4247/* This function is called once per thread via for_each_thread.
4248 We look up which resume request applies to THREAD and mark it with a
4249 pointer to the appropriate resume request.
5544ad89
DJ
4250
4251 This algorithm is O(threads * resume elements), but resume elements
4252 is small (and will remain small at least until GDB supports thread
4253 suspension). */
ebcf782c 4254
5fdda392
SM
4255static void
4256linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
0d62e5e8 4257{
d86d4aaf 4258 struct lwp_info *lwp = get_thread_lwp (thread);
64386c31 4259
5fdda392 4260 for (int ndx = 0; ndx < n; ndx++)
95954743 4261 {
5fdda392 4262 ptid_t ptid = resume[ndx].thread;
d7e15655 4263 if (ptid == minus_one_ptid
9c80ecd6 4264 || ptid == thread->id
0c9070b3
YQ
4265 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4266 of PID'. */
e99b03dc 4267 || (ptid.pid () == pid_of (thread)
0e998d96 4268 && (ptid.is_pid ()
e38504b3 4269 || ptid.lwp () == -1)))
95954743 4270 {
5fdda392 4271 if (resume[ndx].kind == resume_stop
8336d594 4272 && thread->last_resume_kind == resume_stop)
d50171e4 4273 {
c058728c
SM
4274 threads_debug_printf
4275 ("already %s LWP %ld at GDB's request",
4276 (thread->last_status.kind () == TARGET_WAITKIND_STOPPED
4277 ? "stopped" : "stopping"),
4278 lwpid_of (thread));
d50171e4
PA
4279
4280 continue;
4281 }
4282
5a04c4cf
PA
4283 /* Ignore (wildcard) resume requests for already-resumed
4284 threads. */
5fdda392 4285 if (resume[ndx].kind != resume_stop
5a04c4cf
PA
4286 && thread->last_resume_kind != resume_stop)
4287 {
c058728c
SM
4288 threads_debug_printf
4289 ("already %s LWP %ld at GDB's request",
4290 (thread->last_resume_kind == resume_step
4291 ? "stepping" : "continuing"),
4292 lwpid_of (thread));
5a04c4cf
PA
4293 continue;
4294 }
4295
393a6b59
PA
4296 /* Don't let wildcard resumes resume fork/vfork/clone
4297 children that GDB does not yet know are new children. */
4298 if (lwp->relative != NULL)
5a04c4cf 4299 {
393a6b59 4300 struct lwp_info *rel = lwp->relative;
5a04c4cf
PA
4301
4302 if (rel->status_pending_p
393a6b59 4303 && is_new_child_status (rel->waitstatus.kind ()))
5a04c4cf 4304 {
c058728c
SM
4305 threads_debug_printf
4306 ("not resuming LWP %ld: has queued stop reply",
4307 lwpid_of (thread));
5a04c4cf
PA
4308 continue;
4309 }
4310 }
4311
4312 /* If the thread has a pending event that has already been
4313 reported to GDBserver core, but GDB has not pulled the
4314 event out of the vStopped queue yet, likewise, ignore the
4315 (wildcard) resume request. */
9c80ecd6 4316 if (in_queued_stop_replies (thread->id))
5a04c4cf 4317 {
c058728c
SM
4318 threads_debug_printf
4319 ("not resuming LWP %ld: has queued stop reply",
4320 lwpid_of (thread));
5a04c4cf
PA
4321 continue;
4322 }
4323
5fdda392 4324 lwp->resume = &resume[ndx];
8336d594 4325 thread->last_resume_kind = lwp->resume->kind;
fa593d66 4326
c2d6af84
PA
4327 lwp->step_range_start = lwp->resume->step_range_start;
4328 lwp->step_range_end = lwp->resume->step_range_end;
4329
fa593d66
PA
4330 /* If we had a deferred signal to report, dequeue one now.
4331 This can happen if LWP gets more than one signal while
4332 trying to get out of a jump pad. */
4333 if (lwp->stopped
4334 && !lwp->status_pending_p
4335 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4336 {
4337 lwp->status_pending_p = 1;
4338
c058728c
SM
4339 threads_debug_printf
4340 ("Dequeueing deferred signal %d for LWP %ld, "
4341 "leaving status pending.",
4342 WSTOPSIG (lwp->status_pending),
4343 lwpid_of (thread));
fa593d66
PA
4344 }
4345
5fdda392 4346 return;
95954743
PA
4347 }
4348 }
2bd7c093
PA
4349
4350 /* No resume action for this thread. */
4351 lwp->resume = NULL;
5544ad89
DJ
4352}
4353
df95181f
TBA
4354bool
4355linux_process_target::resume_status_pending (thread_info *thread)
5544ad89 4356{
d86d4aaf 4357 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4358
bd99dc85
PA
4359 /* LWPs which will not be resumed are not interesting, because
4360 we might not wait for them next time through linux_wait. */
2bd7c093 4361 if (lwp->resume == NULL)
25c28b4d 4362 return false;
64386c31 4363
df95181f 4364 return thread_still_has_status_pending (thread);
d50171e4
PA
4365}
4366
df95181f
TBA
4367bool
4368linux_process_target::thread_needs_step_over (thread_info *thread)
d50171e4 4369{
d86d4aaf 4370 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4371 CORE_ADDR pc;
c06cbd92
YQ
4372 struct process_info *proc = get_thread_process (thread);
4373
4374 /* GDBserver is skipping the extra traps from the wrapper program,
4375 don't have to do step over. */
4376 if (proc->tdesc == NULL)
eca55aec 4377 return false;
d50171e4
PA
4378
4379 /* LWPs which will not be resumed are not interesting, because we
4380 might not wait for them next time through linux_wait. */
4381
4382 if (!lwp->stopped)
4383 {
c058728c
SM
4384 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped",
4385 lwpid_of (thread));
eca55aec 4386 return false;
d50171e4
PA
4387 }
4388
8336d594 4389 if (thread->last_resume_kind == resume_stop)
d50171e4 4390 {
c058728c
SM
4391 threads_debug_printf
4392 ("Need step over [LWP %ld]? Ignoring, should remain stopped",
4393 lwpid_of (thread));
eca55aec 4394 return false;
d50171e4
PA
4395 }
4396
7984d532
PA
4397 gdb_assert (lwp->suspended >= 0);
4398
4399 if (lwp->suspended)
4400 {
c058728c
SM
4401 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, suspended",
4402 lwpid_of (thread));
eca55aec 4403 return false;
7984d532
PA
4404 }
4405
bd99dc85 4406 if (lwp->status_pending_p)
d50171e4 4407 {
c058728c
SM
4408 threads_debug_printf
4409 ("Need step over [LWP %ld]? Ignoring, has pending status.",
4410 lwpid_of (thread));
eca55aec 4411 return false;
d50171e4
PA
4412 }
4413
4414 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4415 or we have. */
4416 pc = get_pc (lwp);
4417
4418 /* If the PC has changed since we stopped, then don't do anything,
4419 and let the breakpoint/tracepoint be hit. This happens if, for
4420 instance, GDB handled the decr_pc_after_break subtraction itself,
4421 GDB is OOL stepping this thread, or the user has issued a "jump"
4422 command, or poked thread's registers herself. */
4423 if (pc != lwp->stop_pc)
4424 {
c058728c
SM
4425 threads_debug_printf
4426 ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4427 "Old stop_pc was 0x%s, PC is now 0x%s", lwpid_of (thread),
4428 paddress (lwp->stop_pc), paddress (pc));
eca55aec 4429 return false;
d50171e4
PA
4430 }
4431
484b3c32
YQ
4432 /* On software single step target, resume the inferior with signal
4433 rather than stepping over. */
7582c77c 4434 if (supports_software_single_step ()
013e3554 4435 && !lwp->pending_signals.empty ()
484b3c32
YQ
4436 && lwp_signal_can_be_delivered (lwp))
4437 {
c058728c
SM
4438 threads_debug_printf
4439 ("Need step over [LWP %ld]? Ignoring, has pending signals.",
4440 lwpid_of (thread));
484b3c32 4441
eca55aec 4442 return false;
484b3c32
YQ
4443 }
4444
24583e45
TBA
4445 scoped_restore_current_thread restore_thread;
4446 switch_to_thread (thread);
d50171e4 4447
8b07ae33 4448 /* We can only step over breakpoints we know about. */
fa593d66 4449 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 4450 {
8b07ae33 4451 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
4452 though. If the condition is being evaluated on the target's side
4453 and it evaluate to false, step over this breakpoint as well. */
4454 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
4455 && gdb_condition_true_at_breakpoint (pc)
4456 && gdb_no_commands_at_breakpoint (pc))
8b07ae33 4457 {
c058728c
SM
4458 threads_debug_printf ("Need step over [LWP %ld]? yes, but found"
4459 " GDB breakpoint at 0x%s; skipping step over",
4460 lwpid_of (thread), paddress (pc));
d50171e4 4461
eca55aec 4462 return false;
8b07ae33
PA
4463 }
4464 else
4465 {
c058728c
SM
4466 threads_debug_printf ("Need step over [LWP %ld]? yes, "
4467 "found breakpoint at 0x%s",
4468 lwpid_of (thread), paddress (pc));
d50171e4 4469
8b07ae33 4470 /* We've found an lwp that needs stepping over --- return 1 so
8f86d7aa 4471 that find_thread stops looking. */
eca55aec 4472 return true;
8b07ae33 4473 }
d50171e4
PA
4474 }
4475
c058728c
SM
4476 threads_debug_printf
4477 ("Need step over [LWP %ld]? No, no breakpoint found at 0x%s",
4478 lwpid_of (thread), paddress (pc));
c6ecbae5 4479
eca55aec 4480 return false;
5544ad89
DJ
4481}
4482
d16f3f6c
TBA
4483void
4484linux_process_target::start_step_over (lwp_info *lwp)
d50171e4 4485{
d86d4aaf 4486 struct thread_info *thread = get_lwp_thread (lwp);
d50171e4 4487 CORE_ADDR pc;
d50171e4 4488
c058728c
SM
4489 threads_debug_printf ("Starting step-over on LWP %ld. Stopping all threads",
4490 lwpid_of (thread));
d50171e4 4491
7984d532 4492 stop_all_lwps (1, lwp);
863d01bd
PA
4493
4494 if (lwp->suspended != 0)
4495 {
f34652de 4496 internal_error ("LWP %ld suspended=%d\n", lwpid_of (thread),
863d01bd
PA
4497 lwp->suspended);
4498 }
d50171e4 4499
c058728c 4500 threads_debug_printf ("Done stopping all threads for step-over.");
d50171e4
PA
4501
4502 /* Note, we should always reach here with an already adjusted PC,
4503 either by GDB (if we're resuming due to GDB's request), or by our
4504 caller, if we just finished handling an internal breakpoint GDB
4505 shouldn't care about. */
4506 pc = get_pc (lwp);
4507
24583e45
TBA
4508 bool step = false;
4509 {
4510 scoped_restore_current_thread restore_thread;
4511 switch_to_thread (thread);
d50171e4 4512
24583e45
TBA
4513 lwp->bp_reinsert = pc;
4514 uninsert_breakpoints_at (pc);
4515 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4 4516
24583e45
TBA
4517 step = single_step (lwp);
4518 }
d50171e4 4519
df95181f 4520 resume_one_lwp (lwp, step, 0, NULL);
d50171e4
PA
4521
4522 /* Require next event from this LWP. */
9c80ecd6 4523 step_over_bkpt = thread->id;
d50171e4
PA
4524}
4525
b31cdfa6
TBA
4526bool
4527linux_process_target::finish_step_over (lwp_info *lwp)
d50171e4
PA
4528{
4529 if (lwp->bp_reinsert != 0)
4530 {
24583e45 4531 scoped_restore_current_thread restore_thread;
f79b145d 4532
c058728c 4533 threads_debug_printf ("Finished step over.");
d50171e4 4534
24583e45 4535 switch_to_thread (get_lwp_thread (lwp));
f79b145d 4536
d50171e4
PA
4537 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4538 may be no breakpoint to reinsert there by now. */
4539 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4540 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4541
4542 lwp->bp_reinsert = 0;
4543
3b9a79ef
YQ
4544 /* Delete any single-step breakpoints. No longer needed. We
4545 don't have to worry about other threads hitting this trap,
4546 and later not being able to explain it, because we were
4547 stepping over a breakpoint, and we hold all threads but
4548 LWP stopped while doing that. */
b31cdfa6 4549 if (!supports_hardware_single_step ())
f79b145d 4550 {
3b9a79ef
YQ
4551 gdb_assert (has_single_step_breakpoints (current_thread));
4552 delete_single_step_breakpoints (current_thread);
f79b145d 4553 }
d50171e4
PA
4554
4555 step_over_bkpt = null_ptid;
b31cdfa6 4556 return true;
d50171e4
PA
4557 }
4558 else
b31cdfa6 4559 return false;
d50171e4
PA
4560}
4561
d16f3f6c
TBA
4562void
4563linux_process_target::complete_ongoing_step_over ()
863d01bd 4564{
d7e15655 4565 if (step_over_bkpt != null_ptid)
863d01bd
PA
4566 {
4567 struct lwp_info *lwp;
4568 int wstat;
4569 int ret;
4570
c058728c 4571 threads_debug_printf ("detach: step over in progress, finish it first");
863d01bd
PA
4572
4573 /* Passing NULL_PTID as filter indicates we want all events to
4574 be left pending. Eventually this returns when there are no
4575 unwaited-for children left. */
d16f3f6c
TBA
4576 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4577 __WALL);
863d01bd
PA
4578 gdb_assert (ret == -1);
4579
4580 lwp = find_lwp_pid (step_over_bkpt);
4581 if (lwp != NULL)
7e9cf1fe
PA
4582 {
4583 finish_step_over (lwp);
4584
4585 /* If we got our step SIGTRAP, don't leave it pending,
4586 otherwise we would report it to GDB as a spurious
4587 SIGTRAP. */
4588 gdb_assert (lwp->status_pending_p);
4589 if (WIFSTOPPED (lwp->status_pending)
4590 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4591 {
4592 thread_info *thread = get_lwp_thread (lwp);
4593 if (thread->last_resume_kind != resume_step)
4594 {
c058728c 4595 threads_debug_printf ("detach: discard step-over SIGTRAP");
7e9cf1fe
PA
4596
4597 lwp->status_pending_p = 0;
4598 lwp->status_pending = 0;
4599 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4600 }
4601 else
c058728c
SM
4602 threads_debug_printf
4603 ("detach: resume_step, not discarding step-over SIGTRAP");
7e9cf1fe
PA
4604 }
4605 }
863d01bd
PA
4606 step_over_bkpt = null_ptid;
4607 unsuspend_all_lwps (lwp);
4608 }
4609}
4610
df95181f
TBA
4611void
4612linux_process_target::resume_one_thread (thread_info *thread,
4613 bool leave_all_stopped)
5544ad89 4614{
d86d4aaf 4615 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4616 int leave_pending;
5544ad89 4617
2bd7c093 4618 if (lwp->resume == NULL)
c80825ff 4619 return;
5544ad89 4620
bd99dc85 4621 if (lwp->resume->kind == resume_stop)
5544ad89 4622 {
c058728c
SM
4623 threads_debug_printf ("resume_stop request for LWP %ld",
4624 lwpid_of (thread));
bd99dc85
PA
4625
4626 if (!lwp->stopped)
4627 {
c058728c 4628 threads_debug_printf ("stopping LWP %ld", lwpid_of (thread));
bd99dc85 4629
d50171e4
PA
4630 /* Stop the thread, and wait for the event asynchronously,
4631 through the event loop. */
02fc4de7 4632 send_sigstop (lwp);
bd99dc85
PA
4633 }
4634 else
4635 {
c058728c 4636 threads_debug_printf ("already stopped LWP %ld", lwpid_of (thread));
d50171e4
PA
4637
4638 /* The LWP may have been stopped in an internal event that
4639 was not meant to be notified back to GDB (e.g., gdbserver
4640 breakpoint), so we should be reporting a stop event in
4641 this case too. */
4642
4643 /* If the thread already has a pending SIGSTOP, this is a
4644 no-op. Otherwise, something later will presumably resume
4645 the thread and this will cause it to cancel any pending
4646 operation, due to last_resume_kind == resume_stop. If
4647 the thread already has a pending status to report, we
4648 will still report it the next time we wait - see
4649 status_pending_p_callback. */
1a981360
PA
4650
4651 /* If we already have a pending signal to report, then
4652 there's no need to queue a SIGSTOP, as this means we're
4653 midway through moving the LWP out of the jumppad, and we
4654 will report the pending signal as soon as that is
4655 finished. */
013e3554 4656 if (lwp->pending_signals_to_report.empty ())
1a981360 4657 send_sigstop (lwp);
bd99dc85 4658 }
32ca6d61 4659
bd99dc85
PA
4660 /* For stop requests, we're done. */
4661 lwp->resume = NULL;
183be222 4662 thread->last_status.set_ignore ();
c80825ff 4663 return;
5544ad89
DJ
4664 }
4665
bd99dc85 4666 /* If this thread which is about to be resumed has a pending status,
863d01bd
PA
4667 then don't resume it - we can just report the pending status.
4668 Likewise if it is suspended, because e.g., another thread is
4669 stepping past a breakpoint. Make sure to queue any signals that
4670 would otherwise be sent. In all-stop mode, we do this decision
4671 based on if *any* thread has a pending status. If there's a
4672 thread that needs the step-over-breakpoint dance, then don't
4673 resume any other thread but that particular one. */
4674 leave_pending = (lwp->suspended
4675 || lwp->status_pending_p
4676 || leave_all_stopped);
5544ad89 4677
0e9a339e
YQ
4678 /* If we have a new signal, enqueue the signal. */
4679 if (lwp->resume->sig != 0)
4680 {
4681 siginfo_t info, *info_p;
4682
4683 /* If this is the same signal we were previously stopped by,
4684 make sure to queue its siginfo. */
4685 if (WIFSTOPPED (lwp->last_status)
4686 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4687 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4688 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4689 info_p = &info;
4690 else
4691 info_p = NULL;
4692
4693 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4694 }
4695
d50171e4 4696 if (!leave_pending)
bd99dc85 4697 {
c058728c 4698 threads_debug_printf ("resuming LWP %ld", lwpid_of (thread));
5544ad89 4699
9c80ecd6 4700 proceed_one_lwp (thread, NULL);
bd99dc85
PA
4701 }
4702 else
c058728c 4703 threads_debug_printf ("leaving LWP %ld stopped", lwpid_of (thread));
5544ad89 4704
183be222 4705 thread->last_status.set_ignore ();
bd99dc85 4706 lwp->resume = NULL;
0d62e5e8
DJ
4707}
4708
0e4d7e35
TBA
4709void
4710linux_process_target::resume (thread_resume *resume_info, size_t n)
0d62e5e8 4711{
d86d4aaf 4712 struct thread_info *need_step_over = NULL;
c6ecbae5 4713
c058728c 4714 THREADS_SCOPED_DEBUG_ENTER_EXIT;
87ce2a04 4715
5fdda392
SM
4716 for_each_thread ([&] (thread_info *thread)
4717 {
4718 linux_set_resume_request (thread, resume_info, n);
4719 });
5544ad89 4720
d50171e4
PA
4721 /* If there is a thread which would otherwise be resumed, which has
4722 a pending status, then don't resume any threads - we can just
4723 report the pending status. Make sure to queue any signals that
4724 would otherwise be sent. In non-stop mode, we'll apply this
4725 logic to each thread individually. We consume all pending events
4726 before considering to start a step-over (in all-stop). */
25c28b4d 4727 bool any_pending = false;
bd99dc85 4728 if (!non_stop)
df95181f
TBA
4729 any_pending = find_thread ([this] (thread_info *thread)
4730 {
4731 return resume_status_pending (thread);
4732 }) != nullptr;
d50171e4
PA
4733
4734 /* If there is a thread which would otherwise be resumed, which is
4735 stopped at a breakpoint that needs stepping over, then don't
4736 resume any threads - have it step over the breakpoint with all
4737 other threads stopped, then resume all threads again. Make sure
4738 to queue any signals that would otherwise be delivered or
4739 queued. */
bf9ae9d8 4740 if (!any_pending && low_supports_breakpoints ())
df95181f
TBA
4741 need_step_over = find_thread ([this] (thread_info *thread)
4742 {
4743 return thread_needs_step_over (thread);
4744 });
d50171e4 4745
c80825ff 4746 bool leave_all_stopped = (need_step_over != NULL || any_pending);
d50171e4 4747
c058728c
SM
4748 if (need_step_over != NULL)
4749 threads_debug_printf ("Not resuming all, need step over");
4750 else if (any_pending)
4751 threads_debug_printf ("Not resuming, all-stop and found "
4752 "an LWP with pending status");
4753 else
4754 threads_debug_printf ("Resuming, no pending status or step over needed");
d50171e4
PA
4755
4756 /* Even if we're leaving threads stopped, queue all signals we'd
4757 otherwise deliver. */
c80825ff
SM
4758 for_each_thread ([&] (thread_info *thread)
4759 {
df95181f 4760 resume_one_thread (thread, leave_all_stopped);
c80825ff 4761 });
d50171e4
PA
4762
4763 if (need_step_over)
d86d4aaf 4764 start_step_over (get_thread_lwp (need_step_over));
87ce2a04 4765
1bebeeca
PA
4766 /* We may have events that were pending that can/should be sent to
4767 the client now. Trigger a linux_wait call. */
4768 if (target_is_async_p ())
4769 async_file_mark ();
d50171e4
PA
4770}
4771
df95181f
TBA
4772void
4773linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
d50171e4 4774{
d86d4aaf 4775 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4776 int step;
4777
7984d532 4778 if (lwp == except)
e2b44075 4779 return;
d50171e4 4780
c058728c 4781 threads_debug_printf ("lwp %ld", lwpid_of (thread));
d50171e4
PA
4782
4783 if (!lwp->stopped)
4784 {
c058728c 4785 threads_debug_printf (" LWP %ld already running", lwpid_of (thread));
e2b44075 4786 return;
d50171e4
PA
4787 }
4788
02fc4de7 4789 if (thread->last_resume_kind == resume_stop
183be222 4790 && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
d50171e4 4791 {
c058728c
SM
4792 threads_debug_printf (" client wants LWP to remain %ld stopped",
4793 lwpid_of (thread));
e2b44075 4794 return;
d50171e4
PA
4795 }
4796
4797 if (lwp->status_pending_p)
4798 {
c058728c
SM
4799 threads_debug_printf (" LWP %ld has pending status, leaving stopped",
4800 lwpid_of (thread));
e2b44075 4801 return;
d50171e4
PA
4802 }
4803
7984d532
PA
4804 gdb_assert (lwp->suspended >= 0);
4805
d50171e4
PA
4806 if (lwp->suspended)
4807 {
c058728c 4808 threads_debug_printf (" LWP %ld is suspended", lwpid_of (thread));
e2b44075 4809 return;
d50171e4
PA
4810 }
4811
1a981360 4812 if (thread->last_resume_kind == resume_stop
013e3554 4813 && lwp->pending_signals_to_report.empty ()
229d26fc
SM
4814 && (lwp->collecting_fast_tracepoint
4815 == fast_tpoint_collect_result::not_collecting))
02fc4de7
PA
4816 {
4817 /* We haven't reported this LWP as stopped yet (otherwise, the
4818 last_status.kind check above would catch it, and we wouldn't
4819 reach here. This LWP may have been momentarily paused by a
4820 stop_all_lwps call while handling for example, another LWP's
4821 step-over. In that case, the pending expected SIGSTOP signal
4822 that was queued at vCont;t handling time will have already
4823 been consumed by wait_for_sigstop, and so we need to requeue
4824 another one here. Note that if the LWP already has a SIGSTOP
4825 pending, this is a no-op. */
4826
c058728c
SM
4827 threads_debug_printf
4828 ("Client wants LWP %ld to stop. Making sure it has a SIGSTOP pending",
4829 lwpid_of (thread));
02fc4de7
PA
4830
4831 send_sigstop (lwp);
4832 }
4833
863d01bd
PA
4834 if (thread->last_resume_kind == resume_step)
4835 {
c058728c
SM
4836 threads_debug_printf (" stepping LWP %ld, client wants it stepping",
4837 lwpid_of (thread));
8901d193 4838
3b9a79ef 4839 /* If resume_step is requested by GDB, install single-step
8901d193 4840 breakpoints when the thread is about to be actually resumed if
3b9a79ef 4841 the single-step breakpoints weren't removed. */
7582c77c 4842 if (supports_software_single_step ()
3b9a79ef 4843 && !has_single_step_breakpoints (thread))
8901d193
YQ
4844 install_software_single_step_breakpoints (lwp);
4845
4846 step = maybe_hw_step (thread);
863d01bd
PA
4847 }
4848 else if (lwp->bp_reinsert != 0)
4849 {
c058728c
SM
4850 threads_debug_printf (" stepping LWP %ld, reinsert set",
4851 lwpid_of (thread));
f79b145d
YQ
4852
4853 step = maybe_hw_step (thread);
863d01bd
PA
4854 }
4855 else
4856 step = 0;
4857
df95181f 4858 resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
4859}
4860
df95181f
TBA
4861void
4862linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4863 lwp_info *except)
7984d532 4864{
d86d4aaf 4865 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
4866
4867 if (lwp == except)
e2b44075 4868 return;
7984d532 4869
863d01bd 4870 lwp_suspended_decr (lwp);
7984d532 4871
e2b44075 4872 proceed_one_lwp (thread, except);
d50171e4
PA
4873}
4874
d16f3f6c
TBA
4875void
4876linux_process_target::proceed_all_lwps ()
d50171e4 4877{
d86d4aaf 4878 struct thread_info *need_step_over;
d50171e4
PA
4879
4880 /* If there is a thread which would otherwise be resumed, which is
4881 stopped at a breakpoint that needs stepping over, then don't
4882 resume any threads - have it step over the breakpoint with all
4883 other threads stopped, then resume all threads again. */
4884
bf9ae9d8 4885 if (low_supports_breakpoints ())
d50171e4 4886 {
df95181f
TBA
4887 need_step_over = find_thread ([this] (thread_info *thread)
4888 {
4889 return thread_needs_step_over (thread);
4890 });
d50171e4
PA
4891
4892 if (need_step_over != NULL)
4893 {
c058728c
SM
4894 threads_debug_printf ("found thread %ld needing a step-over",
4895 lwpid_of (need_step_over));
d50171e4 4896
d86d4aaf 4897 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
4898 return;
4899 }
4900 }
5544ad89 4901
c058728c 4902 threads_debug_printf ("Proceeding, no step-over needed");
d50171e4 4903
df95181f 4904 for_each_thread ([this] (thread_info *thread)
e2b44075
SM
4905 {
4906 proceed_one_lwp (thread, NULL);
4907 });
d50171e4
PA
4908}
4909
d16f3f6c
TBA
4910void
4911linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
d50171e4 4912{
c058728c
SM
4913 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4914
4915 if (except)
4916 threads_debug_printf ("except=(LWP %ld)",
4917 lwpid_of (get_lwp_thread (except)));
4918 else
4919 threads_debug_printf ("except=nullptr");
5544ad89 4920
7984d532 4921 if (unsuspend)
e2b44075
SM
4922 for_each_thread ([&] (thread_info *thread)
4923 {
4924 unsuspend_and_proceed_one_lwp (thread, except);
4925 });
7984d532 4926 else
e2b44075
SM
4927 for_each_thread ([&] (thread_info *thread)
4928 {
4929 proceed_one_lwp (thread, except);
4930 });
0d62e5e8
DJ
4931}
4932
58caa3dc
DJ
4933
4934#ifdef HAVE_LINUX_REGSETS
4935
1faeff08
MR
4936#define use_linux_regsets 1
4937
030031ee
PA
4938/* Returns true if REGSET has been disabled. */
4939
4940static int
4941regset_disabled (struct regsets_info *info, struct regset_info *regset)
4942{
4943 return (info->disabled_regsets != NULL
4944 && info->disabled_regsets[regset - info->regsets]);
4945}
4946
4947/* Disable REGSET. */
4948
4949static void
4950disable_regset (struct regsets_info *info, struct regset_info *regset)
4951{
4952 int dr_offset;
4953
4954 dr_offset = regset - info->regsets;
4955 if (info->disabled_regsets == NULL)
224c3ddb 4956 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
030031ee
PA
4957 info->disabled_regsets[dr_offset] = 1;
4958}
4959
58caa3dc 4960static int
3aee8918
PA
4961regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4962 struct regcache *regcache)
58caa3dc
DJ
4963{
4964 struct regset_info *regset;
e9d25b98 4965 int saw_general_regs = 0;
95954743 4966 int pid;
1570b33e 4967 struct iovec iov;
58caa3dc 4968
0bfdf32f 4969 pid = lwpid_of (current_thread);
28eef672 4970 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 4971 {
1570b33e
L
4972 void *buf, *data;
4973 int nt_type, res;
58caa3dc 4974
030031ee 4975 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 4976 continue;
58caa3dc 4977
bca929d3 4978 buf = xmalloc (regset->size);
1570b33e
L
4979
4980 nt_type = regset->nt_type;
4981 if (nt_type)
4982 {
4983 iov.iov_base = buf;
4984 iov.iov_len = regset->size;
4985 data = (void *) &iov;
4986 }
4987 else
4988 data = buf;
4989
dfb64f85 4990#ifndef __sparc__
f15f9948 4991 res = ptrace (regset->get_request, pid,
b8e1b30e 4992 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4993#else
1570b33e 4994 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4995#endif
58caa3dc
DJ
4996 if (res < 0)
4997 {
1ef53e6b
AH
4998 if (errno == EIO
4999 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5000 {
1ef53e6b
AH
5001 /* If we get EIO on a regset, or an EINVAL and the regset is
5002 optional, do not try it again for this process mode. */
030031ee 5003 disable_regset (regsets_info, regset);
58caa3dc 5004 }
e5a9158d
AA
5005 else if (errno == ENODATA)
5006 {
5007 /* ENODATA may be returned if the regset is currently
5008 not "active". This can happen in normal operation,
5009 so suppress the warning in this case. */
5010 }
fcd4a73d
YQ
5011 else if (errno == ESRCH)
5012 {
5013 /* At this point, ESRCH should mean the process is
5014 already gone, in which case we simply ignore attempts
5015 to read its registers. */
5016 }
58caa3dc
DJ
5017 else
5018 {
0d62e5e8 5019 char s[256];
95954743
PA
5020 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5021 pid);
0d62e5e8 5022 perror (s);
58caa3dc
DJ
5023 }
5024 }
098dbe61
AA
5025 else
5026 {
5027 if (regset->type == GENERAL_REGS)
5028 saw_general_regs = 1;
5029 regset->store_function (regcache, buf);
5030 }
fdeb2a12 5031 free (buf);
58caa3dc 5032 }
e9d25b98
DJ
5033 if (saw_general_regs)
5034 return 0;
5035 else
5036 return 1;
58caa3dc
DJ
5037}
5038
5039static int
3aee8918
PA
5040regsets_store_inferior_registers (struct regsets_info *regsets_info,
5041 struct regcache *regcache)
58caa3dc
DJ
5042{
5043 struct regset_info *regset;
e9d25b98 5044 int saw_general_regs = 0;
95954743 5045 int pid;
1570b33e 5046 struct iovec iov;
58caa3dc 5047
0bfdf32f 5048 pid = lwpid_of (current_thread);
28eef672 5049 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5050 {
1570b33e
L
5051 void *buf, *data;
5052 int nt_type, res;
58caa3dc 5053
feea5f36
AA
5054 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5055 || regset->fill_function == NULL)
28eef672 5056 continue;
58caa3dc 5057
bca929d3 5058 buf = xmalloc (regset->size);
545587ee
DJ
5059
5060 /* First fill the buffer with the current register set contents,
5061 in case there are any items in the kernel's regset that are
5062 not in gdbserver's regcache. */
1570b33e
L
5063
5064 nt_type = regset->nt_type;
5065 if (nt_type)
5066 {
5067 iov.iov_base = buf;
5068 iov.iov_len = regset->size;
5069 data = (void *) &iov;
5070 }
5071 else
5072 data = buf;
5073
dfb64f85 5074#ifndef __sparc__
f15f9948 5075 res = ptrace (regset->get_request, pid,
b8e1b30e 5076 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5077#else
689cc2ae 5078 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5079#endif
545587ee
DJ
5080
5081 if (res == 0)
5082 {
5083 /* Then overlay our cached registers on that. */
442ea881 5084 regset->fill_function (regcache, buf);
545587ee
DJ
5085
5086 /* Only now do we write the register set. */
dfb64f85 5087#ifndef __sparc__
f15f9948 5088 res = ptrace (regset->set_request, pid,
b8e1b30e 5089 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5090#else
1570b33e 5091 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 5092#endif
545587ee
DJ
5093 }
5094
58caa3dc
DJ
5095 if (res < 0)
5096 {
1ef53e6b
AH
5097 if (errno == EIO
5098 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5099 {
1ef53e6b
AH
5100 /* If we get EIO on a regset, or an EINVAL and the regset is
5101 optional, do not try it again for this process mode. */
030031ee 5102 disable_regset (regsets_info, regset);
58caa3dc 5103 }
3221518c
UW
5104 else if (errno == ESRCH)
5105 {
1b3f6016
PA
5106 /* At this point, ESRCH should mean the process is
5107 already gone, in which case we simply ignore attempts
5108 to change its registers. See also the related
df95181f 5109 comment in resume_one_lwp. */
fdeb2a12 5110 free (buf);
3221518c
UW
5111 return 0;
5112 }
58caa3dc
DJ
5113 else
5114 {
ce3a066d 5115 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
5116 }
5117 }
e9d25b98
DJ
5118 else if (regset->type == GENERAL_REGS)
5119 saw_general_regs = 1;
09ec9b38 5120 free (buf);
58caa3dc 5121 }
e9d25b98
DJ
5122 if (saw_general_regs)
5123 return 0;
5124 else
5125 return 1;
58caa3dc
DJ
5126}
5127
1faeff08 5128#else /* !HAVE_LINUX_REGSETS */
58caa3dc 5129
1faeff08 5130#define use_linux_regsets 0
3aee8918
PA
5131#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5132#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 5133
58caa3dc 5134#endif
1faeff08
MR
5135
5136/* Return 1 if register REGNO is supported by one of the regset ptrace
5137 calls or 0 if it has to be transferred individually. */
5138
5139static int
3aee8918 5140linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
5141{
5142 unsigned char mask = 1 << (regno % 8);
5143 size_t index = regno / 8;
5144
5145 return (use_linux_regsets
3aee8918
PA
5146 && (regs_info->regset_bitmap == NULL
5147 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
5148}
5149
58caa3dc 5150#ifdef HAVE_LINUX_USRREGS
1faeff08 5151
5b3da067 5152static int
3aee8918 5153register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
5154{
5155 int addr;
5156
3aee8918 5157 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
5158 error ("Invalid register number %d.", regnum);
5159
3aee8918 5160 addr = usrregs->regmap[regnum];
1faeff08
MR
5161
5162 return addr;
5163}
5164
daca57a7
TBA
5165
5166void
5167linux_process_target::fetch_register (const usrregs_info *usrregs,
5168 regcache *regcache, int regno)
1faeff08
MR
5169{
5170 CORE_ADDR regaddr;
5171 int i, size;
5172 char *buf;
5173 int pid;
5174
3aee8918 5175 if (regno >= usrregs->num_regs)
1faeff08 5176 return;
daca57a7 5177 if (low_cannot_fetch_register (regno))
1faeff08
MR
5178 return;
5179
3aee8918 5180 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5181 if (regaddr == -1)
5182 return;
5183
3aee8918
PA
5184 size = ((register_size (regcache->tdesc, regno)
5185 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5186 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5187 buf = (char *) alloca (size);
1faeff08 5188
0bfdf32f 5189 pid = lwpid_of (current_thread);
1faeff08
MR
5190 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5191 {
5192 errno = 0;
5193 *(PTRACE_XFER_TYPE *) (buf + i) =
5194 ptrace (PTRACE_PEEKUSER, pid,
5195 /* Coerce to a uintptr_t first to avoid potential gcc warning
5196 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5197 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
5198 regaddr += sizeof (PTRACE_XFER_TYPE);
5199 if (errno != 0)
9a70f35c
YQ
5200 {
5201 /* Mark register REGNO unavailable. */
5202 supply_register (regcache, regno, NULL);
5203 return;
5204 }
1faeff08
MR
5205 }
5206
b35db733 5207 low_supply_ptrace_register (regcache, regno, buf);
1faeff08
MR
5208}
5209
daca57a7
TBA
5210void
5211linux_process_target::store_register (const usrregs_info *usrregs,
5212 regcache *regcache, int regno)
1faeff08
MR
5213{
5214 CORE_ADDR regaddr;
5215 int i, size;
5216 char *buf;
5217 int pid;
5218
3aee8918 5219 if (regno >= usrregs->num_regs)
1faeff08 5220 return;
daca57a7 5221 if (low_cannot_store_register (regno))
1faeff08
MR
5222 return;
5223
3aee8918 5224 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5225 if (regaddr == -1)
5226 return;
5227
3aee8918
PA
5228 size = ((register_size (regcache->tdesc, regno)
5229 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5230 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5231 buf = (char *) alloca (size);
1faeff08
MR
5232 memset (buf, 0, size);
5233
b35db733 5234 low_collect_ptrace_register (regcache, regno, buf);
1faeff08 5235
0bfdf32f 5236 pid = lwpid_of (current_thread);
1faeff08
MR
5237 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5238 {
5239 errno = 0;
5240 ptrace (PTRACE_POKEUSER, pid,
5241 /* Coerce to a uintptr_t first to avoid potential gcc warning
5242 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5243 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5244 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
5245 if (errno != 0)
5246 {
5247 /* At this point, ESRCH should mean the process is
5248 already gone, in which case we simply ignore attempts
5249 to change its registers. See also the related
df95181f 5250 comment in resume_one_lwp. */
1faeff08
MR
5251 if (errno == ESRCH)
5252 return;
5253
daca57a7
TBA
5254
5255 if (!low_cannot_store_register (regno))
6d91ce9a 5256 error ("writing register %d: %s", regno, safe_strerror (errno));
1faeff08
MR
5257 }
5258 regaddr += sizeof (PTRACE_XFER_TYPE);
5259 }
5260}
daca57a7 5261#endif /* HAVE_LINUX_USRREGS */
1faeff08 5262
b35db733
TBA
5263void
5264linux_process_target::low_collect_ptrace_register (regcache *regcache,
5265 int regno, char *buf)
5266{
5267 collect_register (regcache, regno, buf);
5268}
5269
5270void
5271linux_process_target::low_supply_ptrace_register (regcache *regcache,
5272 int regno, const char *buf)
5273{
5274 supply_register (regcache, regno, buf);
5275}
5276
daca57a7
TBA
5277void
5278linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5279 regcache *regcache,
5280 int regno, int all)
1faeff08 5281{
daca57a7 5282#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5283 struct usrregs_info *usr = regs_info->usrregs;
5284
1faeff08
MR
5285 if (regno == -1)
5286 {
3aee8918
PA
5287 for (regno = 0; regno < usr->num_regs; regno++)
5288 if (all || !linux_register_in_regsets (regs_info, regno))
5289 fetch_register (usr, regcache, regno);
1faeff08
MR
5290 }
5291 else
3aee8918 5292 fetch_register (usr, regcache, regno);
daca57a7 5293#endif
1faeff08
MR
5294}
5295
daca57a7
TBA
5296void
5297linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5298 regcache *regcache,
5299 int regno, int all)
1faeff08 5300{
daca57a7 5301#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5302 struct usrregs_info *usr = regs_info->usrregs;
5303
1faeff08
MR
5304 if (regno == -1)
5305 {
3aee8918
PA
5306 for (regno = 0; regno < usr->num_regs; regno++)
5307 if (all || !linux_register_in_regsets (regs_info, regno))
5308 store_register (usr, regcache, regno);
1faeff08
MR
5309 }
5310 else
3aee8918 5311 store_register (usr, regcache, regno);
58caa3dc 5312#endif
daca57a7 5313}
1faeff08 5314
a5a4d4cd
TBA
5315void
5316linux_process_target::fetch_registers (regcache *regcache, int regno)
1faeff08
MR
5317{
5318 int use_regsets;
5319 int all = 0;
aa8d21c9 5320 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5321
5322 if (regno == -1)
5323 {
bd70b1f2 5324 if (regs_info->usrregs != NULL)
3aee8918 5325 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
bd70b1f2 5326 low_fetch_register (regcache, regno);
c14dfd32 5327
3aee8918
PA
5328 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5329 if (regs_info->usrregs != NULL)
5330 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
5331 }
5332 else
5333 {
bd70b1f2 5334 if (low_fetch_register (regcache, regno))
c14dfd32
PA
5335 return;
5336
3aee8918 5337 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5338 if (use_regsets)
3aee8918
PA
5339 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5340 regcache);
5341 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5342 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5343 }
58caa3dc
DJ
5344}
5345
a5a4d4cd
TBA
5346void
5347linux_process_target::store_registers (regcache *regcache, int regno)
58caa3dc 5348{
1faeff08
MR
5349 int use_regsets;
5350 int all = 0;
aa8d21c9 5351 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5352
5353 if (regno == -1)
5354 {
3aee8918
PA
5355 all = regsets_store_inferior_registers (regs_info->regsets_info,
5356 regcache);
5357 if (regs_info->usrregs != NULL)
5358 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
5359 }
5360 else
5361 {
3aee8918 5362 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5363 if (use_regsets)
3aee8918
PA
5364 all = regsets_store_inferior_registers (regs_info->regsets_info,
5365 regcache);
5366 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5367 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5368 }
58caa3dc
DJ
5369}
5370
bd70b1f2
TBA
5371bool
5372linux_process_target::low_fetch_register (regcache *regcache, int regno)
5373{
5374 return false;
5375}
da6d8c04 5376
e2558df3 5377/* A wrapper for the read_memory target op. */
da6d8c04 5378
c3e735a6 5379static int
f450004a 5380linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
e2558df3 5381{
52405d85 5382 return the_target->read_memory (memaddr, myaddr, len);
e2558df3
TBA
5383}
5384
e2558df3 5385
421490af
PA
5386/* Helper for read_memory/write_memory using /proc/PID/mem. Because
5387 we can use a single read/write call, this can be much more
5388 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
5389 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running threads.
5390 One an only one of READBUF and WRITEBUF is non-null. If READBUF is
5391 not null, then we're reading, otherwise we're writing. */
5392
5393static int
5394proc_xfer_memory (CORE_ADDR memaddr, unsigned char *readbuf,
5395 const gdb_byte *writebuf, int len)
da6d8c04 5396{
421490af 5397 gdb_assert ((readbuf == nullptr) != (writebuf == nullptr));
fd462a61 5398
421490af
PA
5399 process_info *proc = current_process ();
5400
5401 int fd = proc->priv->mem_fd;
5402 if (fd == -1)
5403 return EIO;
5404
5405 while (len > 0)
fd462a61 5406 {
4934b29e
MR
5407 int bytes;
5408
31a56a22
PA
5409 /* Use pread64/pwrite64 if available, since they save a syscall
5410 and can handle 64-bit offsets even on 32-bit platforms (for
5411 instance, SPARC debugging a SPARC64 application). But only
5412 use them if the offset isn't so high that when cast to off_t
5413 it'd be negative, as seen on SPARC64. pread64/pwrite64
5414 outright reject such offsets. lseek does not. */
fd462a61 5415#ifdef HAVE_PREAD64
31a56a22 5416 if ((off_t) memaddr >= 0)
421490af 5417 bytes = (readbuf != nullptr
31a56a22
PA
5418 ? pread64 (fd, readbuf, len, memaddr)
5419 : pwrite64 (fd, writebuf, len, memaddr));
5420 else
fd462a61 5421#endif
31a56a22
PA
5422 {
5423 bytes = -1;
5424 if (lseek (fd, memaddr, SEEK_SET) != -1)
5425 bytes = (readbuf != nullptr
5426 ? read (fd, readbuf, len)
5427 : write (fd, writebuf, len));
5428 }
fd462a61 5429
421490af
PA
5430 if (bytes < 0)
5431 return errno;
5432 else if (bytes == 0)
4934b29e 5433 {
421490af
PA
5434 /* EOF means the address space is gone, the whole process
5435 exited or execed. */
5436 return EIO;
4934b29e 5437 }
da6d8c04 5438
421490af
PA
5439 memaddr += bytes;
5440 if (readbuf != nullptr)
5441 readbuf += bytes;
5442 else
5443 writebuf += bytes;
5444 len -= bytes;
da6d8c04
DJ
5445 }
5446
421490af
PA
5447 return 0;
5448}
c3e735a6 5449
421490af
PA
5450int
5451linux_process_target::read_memory (CORE_ADDR memaddr,
5452 unsigned char *myaddr, int len)
5453{
5454 return proc_xfer_memory (memaddr, myaddr, nullptr, len);
da6d8c04
DJ
5455}
5456
93ae6fdc
PA
5457/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5458 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 5459 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 5460
e2558df3
TBA
5461int
5462linux_process_target::write_memory (CORE_ADDR memaddr,
5463 const unsigned char *myaddr, int len)
da6d8c04 5464{
0d62e5e8
DJ
5465 if (debug_threads)
5466 {
58d6951d 5467 /* Dump up to four bytes. */
bf47e248
PA
5468 char str[4 * 2 + 1];
5469 char *p = str;
5470 int dump = len < 4 ? len : 4;
5471
421490af 5472 for (int i = 0; i < dump; i++)
bf47e248
PA
5473 {
5474 sprintf (p, "%02x", myaddr[i]);
5475 p += 2;
5476 }
5477 *p = '\0';
5478
c058728c 5479 threads_debug_printf ("Writing %s to 0x%08lx in process %d",
421490af 5480 str, (long) memaddr, current_process ()->pid);
0d62e5e8
DJ
5481 }
5482
421490af 5483 return proc_xfer_memory (memaddr, nullptr, myaddr, len);
da6d8c04 5484}
2f2893d9 5485
2a31c7aa
TBA
5486void
5487linux_process_target::look_up_symbols ()
2f2893d9 5488{
0d62e5e8 5489#ifdef USE_THREAD_DB
95954743
PA
5490 struct process_info *proc = current_process ();
5491
fe978cb0 5492 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5493 return;
5494
9b4c5f87 5495 thread_db_init ();
0d62e5e8
DJ
5496#endif
5497}
5498
eb497a2a
TBA
5499void
5500linux_process_target::request_interrupt ()
e5379b03 5501{
78708b7c
PA
5502 /* Send a SIGINT to the process group. This acts just like the user
5503 typed a ^C on the controlling terminal. */
4c35c4c6
TV
5504 int res = ::kill (-signal_pid, SIGINT);
5505 if (res == -1)
5506 warning (_("Sending SIGINT to process group of pid %ld failed: %s"),
5507 signal_pid, safe_strerror (errno));
e5379b03
DJ
5508}
5509
eac215cc
TBA
5510bool
5511linux_process_target::supports_read_auxv ()
5512{
5513 return true;
5514}
5515
aa691b87
RM
5516/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5517 to debugger memory starting at MYADDR. */
5518
eac215cc 5519int
43e5fbd8
TJB
5520linux_process_target::read_auxv (int pid, CORE_ADDR offset,
5521 unsigned char *myaddr, unsigned int len)
aa691b87
RM
5522{
5523 char filename[PATH_MAX];
5524 int fd, n;
5525
6cebaf6e 5526 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5527
5528 fd = open (filename, O_RDONLY);
5529 if (fd < 0)
5530 return -1;
5531
5532 if (offset != (CORE_ADDR) 0
5533 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5534 n = -1;
5535 else
5536 n = read (fd, myaddr, len);
5537
5538 close (fd);
5539
5540 return n;
5541}
5542
7e0bde70
TBA
5543int
5544linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5545 int size, raw_breakpoint *bp)
e013ee27 5546{
c8f4bfdd
YQ
5547 if (type == raw_bkpt_type_sw)
5548 return insert_memory_breakpoint (bp);
e013ee27 5549 else
9db9aa23
TBA
5550 return low_insert_point (type, addr, size, bp);
5551}
5552
5553int
5554linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5555 int size, raw_breakpoint *bp)
5556{
5557 /* Unsupported (see target.h). */
5558 return 1;
e013ee27
OF
5559}
5560
7e0bde70
TBA
5561int
5562linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5563 int size, raw_breakpoint *bp)
e013ee27 5564{
c8f4bfdd
YQ
5565 if (type == raw_bkpt_type_sw)
5566 return remove_memory_breakpoint (bp);
e013ee27 5567 else
9db9aa23
TBA
5568 return low_remove_point (type, addr, size, bp);
5569}
5570
5571int
5572linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5573 int size, raw_breakpoint *bp)
5574{
5575 /* Unsupported (see target.h). */
5576 return 1;
e013ee27
OF
5577}
5578
84320c4e 5579/* Implement the stopped_by_sw_breakpoint target_ops
3e572f71
PA
5580 method. */
5581
84320c4e
TBA
5582bool
5583linux_process_target::stopped_by_sw_breakpoint ()
3e572f71
PA
5584{
5585 struct lwp_info *lwp = get_thread_lwp (current_thread);
5586
5587 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5588}
5589
84320c4e 5590/* Implement the supports_stopped_by_sw_breakpoint target_ops
3e572f71
PA
5591 method. */
5592
84320c4e
TBA
5593bool
5594linux_process_target::supports_stopped_by_sw_breakpoint ()
3e572f71
PA
5595{
5596 return USE_SIGTRAP_SIGINFO;
5597}
5598
93fe88b2 5599/* Implement the stopped_by_hw_breakpoint target_ops
3e572f71
PA
5600 method. */
5601
93fe88b2
TBA
5602bool
5603linux_process_target::stopped_by_hw_breakpoint ()
3e572f71
PA
5604{
5605 struct lwp_info *lwp = get_thread_lwp (current_thread);
5606
5607 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5608}
5609
93fe88b2 5610/* Implement the supports_stopped_by_hw_breakpoint target_ops
3e572f71
PA
5611 method. */
5612
93fe88b2
TBA
5613bool
5614linux_process_target::supports_stopped_by_hw_breakpoint ()
3e572f71
PA
5615{
5616 return USE_SIGTRAP_SIGINFO;
5617}
5618
70b90b91 5619/* Implement the supports_hardware_single_step target_ops method. */
45614f15 5620
22aa6223
TBA
5621bool
5622linux_process_target::supports_hardware_single_step ()
45614f15 5623{
b31cdfa6 5624 return true;
45614f15
YQ
5625}
5626
6eeb5c55
TBA
5627bool
5628linux_process_target::stopped_by_watchpoint ()
e013ee27 5629{
0bfdf32f 5630 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 5631
15c66dd6 5632 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
5633}
5634
6eeb5c55
TBA
5635CORE_ADDR
5636linux_process_target::stopped_data_address ()
e013ee27 5637{
0bfdf32f 5638 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
5639
5640 return lwp->stopped_data_address;
e013ee27
OF
5641}
5642
db0dfaa0
LM
5643/* This is only used for targets that define PT_TEXT_ADDR,
5644 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5645 the target has different ways of acquiring this information, like
5646 loadmaps. */
52fb6437 5647
5203ae1e
TBA
5648bool
5649linux_process_target::supports_read_offsets ()
5650{
5651#ifdef SUPPORTS_READ_OFFSETS
5652 return true;
5653#else
5654 return false;
5655#endif
5656}
5657
52fb6437
NS
5658/* Under uClinux, programs are loaded at non-zero offsets, which we need
5659 to tell gdb about. */
5660
5203ae1e
TBA
5661int
5662linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
52fb6437 5663{
5203ae1e 5664#ifdef SUPPORTS_READ_OFFSETS
52fb6437 5665 unsigned long text, text_end, data;
62828379 5666 int pid = lwpid_of (current_thread);
52fb6437
NS
5667
5668 errno = 0;
5669
b8e1b30e
LM
5670 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5671 (PTRACE_TYPE_ARG4) 0);
5672 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5673 (PTRACE_TYPE_ARG4) 0);
5674 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5675 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
5676
5677 if (errno == 0)
5678 {
5679 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
5680 used by gdb) are relative to the beginning of the program,
5681 with the data segment immediately following the text segment.
5682 However, the actual runtime layout in memory may put the data
5683 somewhere else, so when we send gdb a data base-address, we
5684 use the real data base address and subtract the compile-time
5685 data base-address from it (which is just the length of the
5686 text segment). BSS immediately follows data in both
5687 cases. */
52fb6437
NS
5688 *text_p = text;
5689 *data_p = data - (text_end - text);
1b3f6016 5690
52fb6437
NS
5691 return 1;
5692 }
5203ae1e
TBA
5693 return 0;
5694#else
5695 gdb_assert_not_reached ("target op read_offsets not supported");
52fb6437 5696#endif
5203ae1e 5697}
52fb6437 5698
6e3fd7e9
TBA
5699bool
5700linux_process_target::supports_get_tls_address ()
5701{
5702#ifdef USE_THREAD_DB
5703 return true;
5704#else
5705 return false;
5706#endif
5707}
5708
5709int
5710linux_process_target::get_tls_address (thread_info *thread,
5711 CORE_ADDR offset,
5712 CORE_ADDR load_module,
5713 CORE_ADDR *address)
5714{
5715#ifdef USE_THREAD_DB
5716 return thread_db_get_tls_address (thread, offset, load_module, address);
5717#else
5718 return -1;
5719#endif
5720}
5721
2d0795ee
TBA
5722bool
5723linux_process_target::supports_qxfer_osdata ()
5724{
5725 return true;
5726}
5727
5728int
5729linux_process_target::qxfer_osdata (const char *annex,
5730 unsigned char *readbuf,
5731 unsigned const char *writebuf,
5732 CORE_ADDR offset, int len)
07e059b5 5733{
d26e3629 5734 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
5735}
5736
cb63de7c
TBA
5737void
5738linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5739 gdb_byte *inf_siginfo, int direction)
d0722149 5740{
cb63de7c 5741 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
d0722149
DE
5742
5743 /* If there was no callback, or the callback didn't do anything,
5744 then just do a straight memcpy. */
5745 if (!done)
5746 {
5747 if (direction == 1)
a5362b9a 5748 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 5749 else
a5362b9a 5750 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
5751 }
5752}
5753
cb63de7c
TBA
5754bool
5755linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5756 int direction)
5757{
5758 return false;
5759}
5760
d7abedf7
TBA
5761bool
5762linux_process_target::supports_qxfer_siginfo ()
5763{
5764 return true;
5765}
5766
5767int
5768linux_process_target::qxfer_siginfo (const char *annex,
5769 unsigned char *readbuf,
5770 unsigned const char *writebuf,
5771 CORE_ADDR offset, int len)
4aa995e1 5772{
d0722149 5773 int pid;
a5362b9a 5774 siginfo_t siginfo;
8adce034 5775 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1 5776
0bfdf32f 5777 if (current_thread == NULL)
4aa995e1
PA
5778 return -1;
5779
0bfdf32f 5780 pid = lwpid_of (current_thread);
4aa995e1 5781
c058728c
SM
5782 threads_debug_printf ("%s siginfo for lwp %d.",
5783 readbuf != NULL ? "Reading" : "Writing",
5784 pid);
4aa995e1 5785
0adea5f7 5786 if (offset >= sizeof (siginfo))
4aa995e1
PA
5787 return -1;
5788
b8e1b30e 5789 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5790 return -1;
5791
d0722149
DE
5792 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5793 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5794 inferior with a 64-bit GDBSERVER should look the same as debugging it
5795 with a 32-bit GDBSERVER, we need to convert it. */
5796 siginfo_fixup (&siginfo, inf_siginfo, 0);
5797
4aa995e1
PA
5798 if (offset + len > sizeof (siginfo))
5799 len = sizeof (siginfo) - offset;
5800
5801 if (readbuf != NULL)
d0722149 5802 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
5803 else
5804 {
d0722149
DE
5805 memcpy (inf_siginfo + offset, writebuf, len);
5806
5807 /* Convert back to ptrace layout before flushing it out. */
5808 siginfo_fixup (&siginfo, inf_siginfo, 1);
5809
b8e1b30e 5810 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5811 return -1;
5812 }
5813
5814 return len;
5815}
5816
bd99dc85
PA
5817/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5818 so we notice when children change state; as the handler for the
5819 sigsuspend in my_waitpid. */
5820
5821static void
5822sigchld_handler (int signo)
5823{
5824 int old_errno = errno;
5825
5826 if (debug_threads)
e581f2b4
PA
5827 {
5828 do
5829 {
a7e559cc
AH
5830 /* Use the async signal safe debug function. */
5831 if (debug_write ("sigchld_handler\n",
5832 sizeof ("sigchld_handler\n") - 1) < 0)
e581f2b4
PA
5833 break; /* just ignore */
5834 } while (0);
5835 }
bd99dc85
PA
5836
5837 if (target_is_async_p ())
5838 async_file_mark (); /* trigger a linux_wait */
5839
5840 errno = old_errno;
5841}
5842
0dc587d4
TBA
5843bool
5844linux_process_target::supports_non_stop ()
bd99dc85 5845{
0dc587d4 5846 return true;
bd99dc85
PA
5847}
5848
0dc587d4
TBA
5849bool
5850linux_process_target::async (bool enable)
bd99dc85 5851{
0dc587d4 5852 bool previous = target_is_async_p ();
bd99dc85 5853
c058728c
SM
5854 threads_debug_printf ("async (%d), previous=%d",
5855 enable, previous);
8336d594 5856
bd99dc85
PA
5857 if (previous != enable)
5858 {
5859 sigset_t mask;
5860 sigemptyset (&mask);
5861 sigaddset (&mask, SIGCHLD);
5862
21987b9c 5863 gdb_sigmask (SIG_BLOCK, &mask, NULL);
bd99dc85
PA
5864
5865 if (enable)
5866 {
8674f082 5867 if (!linux_event_pipe.open_pipe ())
aa96c426 5868 {
21987b9c 5869 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
aa96c426
GB
5870
5871 warning ("creating event pipe failed.");
5872 return previous;
5873 }
bd99dc85 5874
bd99dc85 5875 /* Register the event loop handler. */
cdc8e9b2 5876 add_file_handler (linux_event_pipe.event_fd (),
2554f6f5
SM
5877 handle_target_event, NULL,
5878 "linux-low");
bd99dc85
PA
5879
5880 /* Always trigger a linux_wait. */
5881 async_file_mark ();
5882 }
5883 else
5884 {
cdc8e9b2 5885 delete_file_handler (linux_event_pipe.event_fd ());
bd99dc85 5886
8674f082 5887 linux_event_pipe.close_pipe ();
bd99dc85
PA
5888 }
5889
21987b9c 5890 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
bd99dc85
PA
5891 }
5892
5893 return previous;
5894}
5895
0dc587d4
TBA
5896int
5897linux_process_target::start_non_stop (bool nonstop)
bd99dc85
PA
5898{
5899 /* Register or unregister from event-loop accordingly. */
0dc587d4 5900 target_async (nonstop);
aa96c426 5901
0dc587d4 5902 if (target_is_async_p () != (nonstop != false))
aa96c426
GB
5903 return -1;
5904
bd99dc85
PA
5905 return 0;
5906}
5907
652aef77
TBA
5908bool
5909linux_process_target::supports_multi_process ()
cf8fd78b 5910{
652aef77 5911 return true;
cf8fd78b
PA
5912}
5913
89245bc0
DB
5914/* Check if fork events are supported. */
5915
9690a72a
TBA
5916bool
5917linux_process_target::supports_fork_events ()
89245bc0 5918{
a2885186 5919 return true;
89245bc0
DB
5920}
5921
5922/* Check if vfork events are supported. */
5923
9690a72a
TBA
5924bool
5925linux_process_target::supports_vfork_events ()
89245bc0 5926{
a2885186 5927 return true;
89245bc0
DB
5928}
5929
393a6b59
PA
5930/* Return the set of supported thread options. */
5931
5932gdb_thread_options
5933linux_process_target::supported_thread_options ()
5934{
48989498 5935 return GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
393a6b59
PA
5936}
5937
94585166
DB
5938/* Check if exec events are supported. */
5939
9690a72a
TBA
5940bool
5941linux_process_target::supports_exec_events ()
94585166 5942{
a2885186 5943 return true;
94585166
DB
5944}
5945
de0d863e
DB
5946/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5947 ptrace flags for all inferiors. This is in case the new GDB connection
5948 doesn't support the same set of events that the previous one did. */
5949
fb00dfce
TBA
5950void
5951linux_process_target::handle_new_gdb_connection ()
de0d863e 5952{
de0d863e 5953 /* Request that all the lwps reset their ptrace options. */
bbf550d5
SM
5954 for_each_thread ([] (thread_info *thread)
5955 {
5956 struct lwp_info *lwp = get_thread_lwp (thread);
5957
5958 if (!lwp->stopped)
5959 {
5960 /* Stop the lwp so we can modify its ptrace options. */
5961 lwp->must_set_ptrace_flags = 1;
5962 linux_stop_lwp (lwp);
5963 }
5964 else
5965 {
5966 /* Already stopped; go ahead and set the ptrace options. */
5967 struct process_info *proc = find_process_pid (pid_of (thread));
5968 int options = linux_low_ptrace_options (proc->attached);
5969
5970 linux_enable_event_reporting (lwpid_of (thread), options);
5971 lwp->must_set_ptrace_flags = 0;
5972 }
5973 });
de0d863e
DB
5974}
5975
55cf3021
TBA
5976int
5977linux_process_target::handle_monitor_command (char *mon)
5978{
5979#ifdef USE_THREAD_DB
5980 return thread_db_handle_monitor_command (mon);
5981#else
5982 return 0;
5983#endif
5984}
5985
95a45fc1
TBA
5986int
5987linux_process_target::core_of_thread (ptid_t ptid)
5988{
5989 return linux_common_core_of_thread (ptid);
5990}
5991
c756403b
TBA
5992bool
5993linux_process_target::supports_disable_randomization ()
03583c20 5994{
c756403b 5995 return true;
03583c20 5996}
efcbbd14 5997
c0245cb9
TBA
5998bool
5999linux_process_target::supports_agent ()
d1feda86 6000{
c0245cb9 6001 return true;
d1feda86
YQ
6002}
6003
2526e0cd
TBA
6004bool
6005linux_process_target::supports_range_stepping ()
c2d6af84 6006{
7582c77c 6007 if (supports_software_single_step ())
2526e0cd 6008 return true;
c2d6af84 6009
9cfd8715
TBA
6010 return low_supports_range_stepping ();
6011}
6012
6013bool
6014linux_process_target::low_supports_range_stepping ()
6015{
6016 return false;
c2d6af84
PA
6017}
6018
8247b823
TBA
6019bool
6020linux_process_target::supports_pid_to_exec_file ()
6021{
6022 return true;
6023}
6024
04977957 6025const char *
8247b823
TBA
6026linux_process_target::pid_to_exec_file (int pid)
6027{
6028 return linux_proc_pid_to_exec_file (pid);
6029}
6030
c9b7b804
TBA
6031bool
6032linux_process_target::supports_multifs ()
6033{
6034 return true;
6035}
6036
6037int
6038linux_process_target::multifs_open (int pid, const char *filename,
6039 int flags, mode_t mode)
6040{
6041 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6042}
6043
6044int
6045linux_process_target::multifs_unlink (int pid, const char *filename)
6046{
6047 return linux_mntns_unlink (pid, filename);
6048}
6049
6050ssize_t
6051linux_process_target::multifs_readlink (int pid, const char *filename,
6052 char *buf, size_t bufsiz)
6053{
6054 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6055}
6056
723b724b 6057#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
6058struct target_loadseg
6059{
6060 /* Core address to which the segment is mapped. */
6061 Elf32_Addr addr;
6062 /* VMA recorded in the program header. */
6063 Elf32_Addr p_vaddr;
6064 /* Size of this segment in memory. */
6065 Elf32_Word p_memsz;
6066};
6067
723b724b 6068# if defined PT_GETDSBT
78d85199
YQ
6069struct target_loadmap
6070{
6071 /* Protocol version number, must be zero. */
6072 Elf32_Word version;
6073 /* Pointer to the DSBT table, its size, and the DSBT index. */
6074 unsigned *dsbt_table;
6075 unsigned dsbt_size, dsbt_index;
6076 /* Number of segments in this map. */
6077 Elf32_Word nsegs;
6078 /* The actual memory map. */
6079 struct target_loadseg segs[/*nsegs*/];
6080};
723b724b
MF
6081# define LINUX_LOADMAP PT_GETDSBT
6082# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6083# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6084# else
6085struct target_loadmap
6086{
6087 /* Protocol version number, must be zero. */
6088 Elf32_Half version;
6089 /* Number of segments in this map. */
6090 Elf32_Half nsegs;
6091 /* The actual memory map. */
6092 struct target_loadseg segs[/*nsegs*/];
6093};
6094# define LINUX_LOADMAP PTRACE_GETFDPIC
6095# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6096# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6097# endif
78d85199 6098
9da41fda
TBA
6099bool
6100linux_process_target::supports_read_loadmap ()
6101{
6102 return true;
6103}
6104
6105int
6106linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6107 unsigned char *myaddr, unsigned int len)
78d85199 6108{
0bfdf32f 6109 int pid = lwpid_of (current_thread);
78d85199
YQ
6110 int addr = -1;
6111 struct target_loadmap *data = NULL;
6112 unsigned int actual_length, copy_length;
6113
6114 if (strcmp (annex, "exec") == 0)
723b724b 6115 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 6116 else if (strcmp (annex, "interp") == 0)
723b724b 6117 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
6118 else
6119 return -1;
6120
723b724b 6121 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
6122 return -1;
6123
6124 if (data == NULL)
6125 return -1;
6126
6127 actual_length = sizeof (struct target_loadmap)
6128 + sizeof (struct target_loadseg) * data->nsegs;
6129
6130 if (offset < 0 || offset > actual_length)
6131 return -1;
6132
6133 copy_length = actual_length - offset < len ? actual_length - offset : len;
6134 memcpy (myaddr, (char *) data + offset, copy_length);
6135 return copy_length;
6136}
723b724b 6137#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 6138
bc8d3ae4
TBA
6139bool
6140linux_process_target::supports_catch_syscall ()
82075af2 6141{
a2885186 6142 return low_supports_catch_syscall ();
82075af2
JS
6143}
6144
9eedd27d
TBA
6145bool
6146linux_process_target::low_supports_catch_syscall ()
6147{
6148 return false;
6149}
6150
770d8f6a
TBA
6151CORE_ADDR
6152linux_process_target::read_pc (regcache *regcache)
219f2f23 6153{
bf9ae9d8 6154 if (!low_supports_breakpoints ())
219f2f23
PA
6155 return 0;
6156
bf9ae9d8 6157 return low_get_pc (regcache);
219f2f23
PA
6158}
6159
770d8f6a
TBA
6160void
6161linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
219f2f23 6162{
bf9ae9d8 6163 gdb_assert (low_supports_breakpoints ());
219f2f23 6164
bf9ae9d8 6165 low_set_pc (regcache, pc);
219f2f23
PA
6166}
6167
68119632
TBA
6168bool
6169linux_process_target::supports_thread_stopped ()
6170{
6171 return true;
6172}
6173
6174bool
6175linux_process_target::thread_stopped (thread_info *thread)
8336d594
PA
6176{
6177 return get_thread_lwp (thread)->stopped;
6178}
6179
6180/* This exposes stop-all-threads functionality to other modules. */
6181
29e8dc09
TBA
6182void
6183linux_process_target::pause_all (bool freeze)
8336d594 6184{
7984d532
PA
6185 stop_all_lwps (freeze, NULL);
6186}
6187
6188/* This exposes unstop-all-threads functionality to other gdbserver
6189 modules. */
6190
29e8dc09
TBA
6191void
6192linux_process_target::unpause_all (bool unfreeze)
7984d532
PA
6193{
6194 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
6195}
6196
2268b414
JK
6197/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6198
6199static int
6200get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6201 CORE_ADDR *phdr_memaddr, int *num_phdr)
6202{
6203 char filename[PATH_MAX];
6204 int fd;
6205 const int auxv_size = is_elf64
6206 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6207 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6208
6209 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6210
6211 fd = open (filename, O_RDONLY);
6212 if (fd < 0)
6213 return 1;
6214
6215 *phdr_memaddr = 0;
6216 *num_phdr = 0;
6217 while (read (fd, buf, auxv_size) == auxv_size
6218 && (*phdr_memaddr == 0 || *num_phdr == 0))
6219 {
6220 if (is_elf64)
6221 {
6222 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6223
6224 switch (aux->a_type)
6225 {
6226 case AT_PHDR:
6227 *phdr_memaddr = aux->a_un.a_val;
6228 break;
6229 case AT_PHNUM:
6230 *num_phdr = aux->a_un.a_val;
6231 break;
6232 }
6233 }
6234 else
6235 {
6236 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6237
6238 switch (aux->a_type)
6239 {
6240 case AT_PHDR:
6241 *phdr_memaddr = aux->a_un.a_val;
6242 break;
6243 case AT_PHNUM:
6244 *num_phdr = aux->a_un.a_val;
6245 break;
6246 }
6247 }
6248 }
6249
6250 close (fd);
6251
6252 if (*phdr_memaddr == 0 || *num_phdr == 0)
6253 {
6254 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6255 "phdr_memaddr = %ld, phdr_num = %d",
6256 (long) *phdr_memaddr, *num_phdr);
6257 return 2;
6258 }
6259
6260 return 0;
6261}
6262
6263/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6264
6265static CORE_ADDR
6266get_dynamic (const int pid, const int is_elf64)
6267{
6268 CORE_ADDR phdr_memaddr, relocation;
db1ff28b 6269 int num_phdr, i;
2268b414 6270 unsigned char *phdr_buf;
db1ff28b 6271 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
2268b414
JK
6272
6273 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6274 return 0;
6275
6276 gdb_assert (num_phdr < 100); /* Basic sanity check. */
224c3ddb 6277 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
2268b414
JK
6278
6279 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6280 return 0;
6281
6282 /* Compute relocation: it is expected to be 0 for "regular" executables,
6283 non-zero for PIE ones. */
6284 relocation = -1;
db1ff28b
JK
6285 for (i = 0; relocation == -1 && i < num_phdr; i++)
6286 if (is_elf64)
6287 {
6288 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6289
6290 if (p->p_type == PT_PHDR)
6291 relocation = phdr_memaddr - p->p_vaddr;
6292 }
6293 else
6294 {
6295 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6296
6297 if (p->p_type == PT_PHDR)
6298 relocation = phdr_memaddr - p->p_vaddr;
6299 }
6300
2268b414
JK
6301 if (relocation == -1)
6302 {
e237a7e2
JK
6303 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6304 any real world executables, including PIE executables, have always
6305 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6306 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6307 or present DT_DEBUG anyway (fpc binaries are statically linked).
6308
6309 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6310
6311 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6312
2268b414
JK
6313 return 0;
6314 }
6315
db1ff28b
JK
6316 for (i = 0; i < num_phdr; i++)
6317 {
6318 if (is_elf64)
6319 {
6320 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6321
6322 if (p->p_type == PT_DYNAMIC)
6323 return p->p_vaddr + relocation;
6324 }
6325 else
6326 {
6327 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
2268b414 6328
db1ff28b
JK
6329 if (p->p_type == PT_DYNAMIC)
6330 return p->p_vaddr + relocation;
6331 }
6332 }
2268b414
JK
6333
6334 return 0;
6335}
6336
6337/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
6338 can be 0 if the inferior does not yet have the library list initialized.
6339 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6340 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
6341
6342static CORE_ADDR
6343get_r_debug (const int pid, const int is_elf64)
6344{
6345 CORE_ADDR dynamic_memaddr;
6346 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6347 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 6348 CORE_ADDR map = -1;
2268b414
JK
6349
6350 dynamic_memaddr = get_dynamic (pid, is_elf64);
6351 if (dynamic_memaddr == 0)
367ba2c2 6352 return map;
2268b414
JK
6353
6354 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6355 {
6356 if (is_elf64)
6357 {
6358 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
a738da3a 6359#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6360 union
6361 {
6362 Elf64_Xword map;
6363 unsigned char buf[sizeof (Elf64_Xword)];
6364 }
6365 rld_map;
a738da3a
MF
6366#endif
6367#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6368 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6369 {
6370 if (linux_read_memory (dyn->d_un.d_val,
6371 rld_map.buf, sizeof (rld_map.buf)) == 0)
6372 return rld_map.map;
6373 else
6374 break;
6375 }
75f62ce7 6376#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6377#ifdef DT_MIPS_RLD_MAP_REL
6378 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6379 {
6380 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6381 rld_map.buf, sizeof (rld_map.buf)) == 0)
6382 return rld_map.map;
6383 else
6384 break;
6385 }
6386#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6387
367ba2c2
MR
6388 if (dyn->d_tag == DT_DEBUG && map == -1)
6389 map = dyn->d_un.d_val;
2268b414
JK
6390
6391 if (dyn->d_tag == DT_NULL)
6392 break;
6393 }
6394 else
6395 {
6396 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
a738da3a 6397#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6398 union
6399 {
6400 Elf32_Word map;
6401 unsigned char buf[sizeof (Elf32_Word)];
6402 }
6403 rld_map;
a738da3a
MF
6404#endif
6405#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6406 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6407 {
6408 if (linux_read_memory (dyn->d_un.d_val,
6409 rld_map.buf, sizeof (rld_map.buf)) == 0)
6410 return rld_map.map;
6411 else
6412 break;
6413 }
75f62ce7 6414#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6415#ifdef DT_MIPS_RLD_MAP_REL
6416 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6417 {
6418 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6419 rld_map.buf, sizeof (rld_map.buf)) == 0)
6420 return rld_map.map;
6421 else
6422 break;
6423 }
6424#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6425
367ba2c2
MR
6426 if (dyn->d_tag == DT_DEBUG && map == -1)
6427 map = dyn->d_un.d_val;
2268b414
JK
6428
6429 if (dyn->d_tag == DT_NULL)
6430 break;
6431 }
6432
6433 dynamic_memaddr += dyn_size;
6434 }
6435
367ba2c2 6436 return map;
2268b414
JK
6437}
6438
6439/* Read one pointer from MEMADDR in the inferior. */
6440
6441static int
6442read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6443{
485f1ee4
PA
6444 int ret;
6445
6446 /* Go through a union so this works on either big or little endian
6447 hosts, when the inferior's pointer size is smaller than the size
6448 of CORE_ADDR. It is assumed the inferior's endianness is the
6449 same of the superior's. */
6450 union
6451 {
6452 CORE_ADDR core_addr;
6453 unsigned int ui;
6454 unsigned char uc;
6455 } addr;
6456
6457 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6458 if (ret == 0)
6459 {
6460 if (ptr_size == sizeof (CORE_ADDR))
6461 *ptr = addr.core_addr;
6462 else if (ptr_size == sizeof (unsigned int))
6463 *ptr = addr.ui;
6464 else
6465 gdb_assert_not_reached ("unhandled pointer size");
6466 }
6467 return ret;
2268b414
JK
6468}
6469
974387bb
TBA
6470bool
6471linux_process_target::supports_qxfer_libraries_svr4 ()
6472{
6473 return true;
6474}
6475
2268b414
JK
6476struct link_map_offsets
6477 {
6478 /* Offset and size of r_debug.r_version. */
6479 int r_version_offset;
6480
6481 /* Offset and size of r_debug.r_map. */
6482 int r_map_offset;
6483
8d56636a
MM
6484 /* Offset of r_debug_extended.r_next. */
6485 int r_next_offset;
6486
2268b414
JK
6487 /* Offset to l_addr field in struct link_map. */
6488 int l_addr_offset;
6489
6490 /* Offset to l_name field in struct link_map. */
6491 int l_name_offset;
6492
6493 /* Offset to l_ld field in struct link_map. */
6494 int l_ld_offset;
6495
6496 /* Offset to l_next field in struct link_map. */
6497 int l_next_offset;
6498
6499 /* Offset to l_prev field in struct link_map. */
6500 int l_prev_offset;
6501 };
6502
8d56636a
MM
6503static const link_map_offsets lmo_32bit_offsets =
6504 {
6505 0, /* r_version offset. */
6506 4, /* r_debug.r_map offset. */
6507 20, /* r_debug_extended.r_next. */
6508 0, /* l_addr offset in link_map. */
6509 4, /* l_name offset in link_map. */
6510 8, /* l_ld offset in link_map. */
6511 12, /* l_next offset in link_map. */
6512 16 /* l_prev offset in link_map. */
6513 };
6514
6515static const link_map_offsets lmo_64bit_offsets =
6516 {
6517 0, /* r_version offset. */
6518 8, /* r_debug.r_map offset. */
6519 40, /* r_debug_extended.r_next. */
6520 0, /* l_addr offset in link_map. */
6521 8, /* l_name offset in link_map. */
6522 16, /* l_ld offset in link_map. */
6523 24, /* l_next offset in link_map. */
6524 32 /* l_prev offset in link_map. */
6525 };
6526
6527/* Get the loaded shared libraries from one namespace. */
6528
6529static void
2733d9d5
MM
6530read_link_map (std::string &document, CORE_ADDR lmid, CORE_ADDR lm_addr,
6531 CORE_ADDR lm_prev, int ptr_size, const link_map_offsets *lmo)
8d56636a
MM
6532{
6533 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6534
6535 while (lm_addr
6536 && read_one_ptr (lm_addr + lmo->l_name_offset,
6537 &l_name, ptr_size) == 0
6538 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6539 &l_addr, ptr_size) == 0
6540 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6541 &l_ld, ptr_size) == 0
6542 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6543 &l_prev, ptr_size) == 0
6544 && read_one_ptr (lm_addr + lmo->l_next_offset,
6545 &l_next, ptr_size) == 0)
6546 {
6547 unsigned char libname[PATH_MAX];
6548
6549 if (lm_prev != l_prev)
6550 {
6551 warning ("Corrupted shared library list: 0x%s != 0x%s",
6552 paddress (lm_prev), paddress (l_prev));
6553 break;
6554 }
6555
ad10f44e
MM
6556 /* Not checking for error because reading may stop before we've got
6557 PATH_MAX worth of characters. */
6558 libname[0] = '\0';
6559 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6560 libname[sizeof (libname) - 1] = '\0';
6561 if (libname[0] != '\0')
8d56636a 6562 {
ad10f44e 6563 string_appendf (document, "<library name=\"");
de75275f 6564 xml_escape_text_append (document, (char *) libname);
ad10f44e 6565 string_appendf (document, "\" lm=\"0x%s\" l_addr=\"0x%s\" "
2733d9d5 6566 "l_ld=\"0x%s\" lmid=\"0x%s\"/>",
ad10f44e 6567 paddress (lm_addr), paddress (l_addr),
2733d9d5 6568 paddress (l_ld), paddress (lmid));
8d56636a
MM
6569 }
6570
6571 lm_prev = lm_addr;
6572 lm_addr = l_next;
6573 }
6574}
6575
fb723180 6576/* Construct qXfer:libraries-svr4:read reply. */
2268b414 6577
974387bb
TBA
6578int
6579linux_process_target::qxfer_libraries_svr4 (const char *annex,
6580 unsigned char *readbuf,
6581 unsigned const char *writebuf,
6582 CORE_ADDR offset, int len)
2268b414 6583{
fe978cb0 6584 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
6585 char filename[PATH_MAX];
6586 int pid, is_elf64;
214d508e 6587 unsigned int machine;
2733d9d5 6588 CORE_ADDR lmid = 0, lm_addr = 0, lm_prev = 0;
2268b414
JK
6589
6590 if (writebuf != NULL)
6591 return -2;
6592 if (readbuf == NULL)
6593 return -1;
6594
0bfdf32f 6595 pid = lwpid_of (current_thread);
2268b414 6596 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 6597 is_elf64 = elf_64_file_p (filename, &machine);
8d56636a
MM
6598 const link_map_offsets *lmo;
6599 int ptr_size;
6600 if (is_elf64)
6601 {
6602 lmo = &lmo_64bit_offsets;
6603 ptr_size = 8;
6604 }
6605 else
6606 {
6607 lmo = &lmo_32bit_offsets;
6608 ptr_size = 4;
6609 }
2268b414 6610
b1fbec62
GB
6611 while (annex[0] != '\0')
6612 {
6613 const char *sep;
6614 CORE_ADDR *addrp;
da4ae14a 6615 int name_len;
2268b414 6616
b1fbec62
GB
6617 sep = strchr (annex, '=');
6618 if (sep == NULL)
6619 break;
0c5bf5a9 6620
da4ae14a 6621 name_len = sep - annex;
2733d9d5
MM
6622 if (name_len == 4 && startswith (annex, "lmid"))
6623 addrp = &lmid;
6624 else if (name_len == 5 && startswith (annex, "start"))
b1fbec62 6625 addrp = &lm_addr;
da4ae14a 6626 else if (name_len == 4 && startswith (annex, "prev"))
b1fbec62
GB
6627 addrp = &lm_prev;
6628 else
6629 {
6630 annex = strchr (sep, ';');
6631 if (annex == NULL)
6632 break;
6633 annex++;
6634 continue;
6635 }
6636
6637 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 6638 }
b1fbec62 6639
8d56636a
MM
6640 std::string document = "<library-list-svr4 version=\"1.0\"";
6641
6642 /* When the starting LM_ADDR is passed in the annex, only traverse that
2733d9d5 6643 namespace, which is assumed to be identified by LMID.
8d56636a
MM
6644
6645 Otherwise, start with R_DEBUG and traverse all namespaces we find. */
6646 if (lm_addr != 0)
ad10f44e
MM
6647 {
6648 document += ">";
2733d9d5 6649 read_link_map (document, lmid, lm_addr, lm_prev, ptr_size, lmo);
ad10f44e 6650 }
8d56636a 6651 else
2268b414 6652 {
8d56636a
MM
6653 if (lm_prev != 0)
6654 warning ("ignoring prev=0x%s without start", paddress (lm_prev));
b1fbec62 6655
2733d9d5
MM
6656 /* We could interpret LMID as 'provide only the libraries for this
6657 namespace' but GDB is currently only providing lmid, start, and
6658 prev, or nothing. */
6659 if (lmid != 0)
6660 warning ("ignoring lmid=0x%s without start", paddress (lmid));
6661
8d56636a
MM
6662 CORE_ADDR r_debug = priv->r_debug;
6663 if (r_debug == 0)
6664 r_debug = priv->r_debug = get_r_debug (pid, is_elf64);
b1fbec62
GB
6665
6666 /* We failed to find DT_DEBUG. Such situation will not change
6667 for this inferior - do not retry it. Report it to GDB as
6668 E01, see for the reasons at the GDB solib-svr4.c side. */
8d56636a 6669 if (r_debug == (CORE_ADDR) -1)
b1fbec62
GB
6670 return -1;
6671
ad10f44e
MM
6672 /* Terminate the header if we end up with an empty list. */
6673 if (r_debug == 0)
6674 document += ">";
6675
8d56636a 6676 while (r_debug != 0)
2268b414 6677 {
8d56636a
MM
6678 int r_version = 0;
6679 if (linux_read_memory (r_debug + lmo->r_version_offset,
b1fbec62 6680 (unsigned char *) &r_version,
8d56636a
MM
6681 sizeof (r_version)) != 0)
6682 {
6683 warning ("unable to read r_version from 0x%s",
6684 paddress (r_debug + lmo->r_version_offset));
6685 break;
6686 }
6687
6688 if (r_version < 1)
b1fbec62
GB
6689 {
6690 warning ("unexpected r_debug version %d", r_version);
8d56636a 6691 break;
b1fbec62 6692 }
8d56636a
MM
6693
6694 if (read_one_ptr (r_debug + lmo->r_map_offset, &lm_addr,
6695 ptr_size) != 0)
b1fbec62 6696 {
8d56636a
MM
6697 warning ("unable to read r_map from 0x%s",
6698 paddress (r_debug + lmo->r_map_offset));
6699 break;
b1fbec62 6700 }
2268b414 6701
ad10f44e
MM
6702 /* We read the entire namespace. */
6703 lm_prev = 0;
6704
6705 /* The first entry corresponds to the main executable unless the
6706 dynamic loader was loaded late by a static executable. But
6707 in such case the main executable does not have PT_DYNAMIC
6708 present and we would not have gotten here. */
6709 if (r_debug == priv->r_debug)
6710 {
6711 if (lm_addr != 0)
6712 string_appendf (document, " main-lm=\"0x%s\">",
6713 paddress (lm_addr));
6714 else
6715 document += ">";
6716
6717 lm_prev = lm_addr;
6718 if (read_one_ptr (lm_addr + lmo->l_next_offset,
6719 &lm_addr, ptr_size) != 0)
6720 {
6721 warning ("unable to read l_next from 0x%s",
6722 paddress (lm_addr + lmo->l_next_offset));
6723 break;
6724 }
6725 }
6726
2733d9d5 6727 read_link_map (document, r_debug, lm_addr, lm_prev, ptr_size, lmo);
b1fbec62 6728
8d56636a
MM
6729 if (r_version < 2)
6730 break;
b1fbec62 6731
8d56636a
MM
6732 if (read_one_ptr (r_debug + lmo->r_next_offset, &r_debug,
6733 ptr_size) != 0)
2268b414 6734 {
8d56636a
MM
6735 warning ("unable to read r_next from 0x%s",
6736 paddress (r_debug + lmo->r_next_offset));
6737 break;
d878444c 6738 }
0afae3cf 6739 }
2268b414
JK
6740 }
6741
ad10f44e 6742 document += "</library-list-svr4>";
b1fbec62 6743
f6e8a41e 6744 int document_len = document.length ();
2268b414
JK
6745 if (offset < document_len)
6746 document_len -= offset;
6747 else
6748 document_len = 0;
6749 if (len > document_len)
6750 len = document_len;
6751
f6e8a41e 6752 memcpy (readbuf, document.data () + offset, len);
2268b414
JK
6753
6754 return len;
6755}
6756
9accd112
MM
6757#ifdef HAVE_LINUX_BTRACE
6758
8263b346
TBA
6759bool
6760linux_process_target::supports_btrace ()
6761{
6762 return true;
6763}
6764
79597bdd 6765btrace_target_info *
696c0d5e 6766linux_process_target::enable_btrace (thread_info *tp,
79597bdd
TBA
6767 const btrace_config *conf)
6768{
696c0d5e 6769 return linux_enable_btrace (tp->id, conf);
79597bdd
TBA
6770}
6771
969c39fb 6772/* See to_disable_btrace target method. */
9accd112 6773
79597bdd
TBA
6774int
6775linux_process_target::disable_btrace (btrace_target_info *tinfo)
969c39fb
MM
6776{
6777 enum btrace_error err;
6778
6779 err = linux_disable_btrace (tinfo);
6780 return (err == BTRACE_ERR_NONE ? 0 : -1);
6781}
6782
bc504a31 6783/* Encode an Intel Processor Trace configuration. */
b20a6524
MM
6784
6785static void
873a185b 6786linux_low_encode_pt_config (std::string *buffer,
b20a6524
MM
6787 const struct btrace_data_pt_config *config)
6788{
873a185b 6789 *buffer += "<pt-config>\n";
b20a6524
MM
6790
6791 switch (config->cpu.vendor)
6792 {
6793 case CV_INTEL:
873a185b
TT
6794 string_xml_appendf (*buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6795 "model=\"%u\" stepping=\"%u\"/>\n",
6796 config->cpu.family, config->cpu.model,
6797 config->cpu.stepping);
b20a6524
MM
6798 break;
6799
6800 default:
6801 break;
6802 }
6803
873a185b 6804 *buffer += "</pt-config>\n";
b20a6524
MM
6805}
6806
6807/* Encode a raw buffer. */
6808
6809static void
873a185b 6810linux_low_encode_raw (std::string *buffer, const gdb_byte *data,
b20a6524
MM
6811 unsigned int size)
6812{
6813 if (size == 0)
6814 return;
6815
268a13a5 6816 /* We use hex encoding - see gdbsupport/rsp-low.h. */
873a185b 6817 *buffer += "<raw>\n";
b20a6524
MM
6818
6819 while (size-- > 0)
6820 {
6821 char elem[2];
6822
6823 elem[0] = tohex ((*data >> 4) & 0xf);
6824 elem[1] = tohex (*data++ & 0xf);
6825
8b2d5ef8 6826 buffer->append (elem, 2);
b20a6524
MM
6827 }
6828
873a185b 6829 *buffer += "</raw>\n";
b20a6524
MM
6830}
6831
969c39fb
MM
6832/* See to_read_btrace target method. */
6833
79597bdd
TBA
6834int
6835linux_process_target::read_btrace (btrace_target_info *tinfo,
873a185b 6836 std::string *buffer,
79597bdd 6837 enum btrace_read_type type)
9accd112 6838{
734b0e4b 6839 struct btrace_data btrace;
969c39fb 6840 enum btrace_error err;
9accd112 6841
969c39fb
MM
6842 err = linux_read_btrace (&btrace, tinfo, type);
6843 if (err != BTRACE_ERR_NONE)
6844 {
6845 if (err == BTRACE_ERR_OVERFLOW)
873a185b 6846 *buffer += "E.Overflow.";
969c39fb 6847 else
873a185b 6848 *buffer += "E.Generic Error.";
969c39fb 6849
8dcc53b3 6850 return -1;
969c39fb 6851 }
9accd112 6852
734b0e4b
MM
6853 switch (btrace.format)
6854 {
6855 case BTRACE_FORMAT_NONE:
873a185b 6856 *buffer += "E.No Trace.";
8dcc53b3 6857 return -1;
734b0e4b
MM
6858
6859 case BTRACE_FORMAT_BTS:
873a185b
TT
6860 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6861 *buffer += "<btrace version=\"1.0\">\n";
9accd112 6862
46f29a9a 6863 for (const btrace_block &block : *btrace.variant.bts.blocks)
873a185b
TT
6864 string_xml_appendf (*buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6865 paddress (block.begin), paddress (block.end));
9accd112 6866
873a185b 6867 *buffer += "</btrace>\n";
734b0e4b
MM
6868 break;
6869
b20a6524 6870 case BTRACE_FORMAT_PT:
873a185b
TT
6871 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6872 *buffer += "<btrace version=\"1.0\">\n";
6873 *buffer += "<pt>\n";
b20a6524
MM
6874
6875 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
9accd112 6876
b20a6524
MM
6877 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6878 btrace.variant.pt.size);
6879
873a185b
TT
6880 *buffer += "</pt>\n";
6881 *buffer += "</btrace>\n";
b20a6524
MM
6882 break;
6883
6884 default:
873a185b 6885 *buffer += "E.Unsupported Trace Format.";
8dcc53b3 6886 return -1;
734b0e4b 6887 }
969c39fb
MM
6888
6889 return 0;
9accd112 6890}
f4abbc16
MM
6891
6892/* See to_btrace_conf target method. */
6893
79597bdd
TBA
6894int
6895linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
873a185b 6896 std::string *buffer)
f4abbc16
MM
6897{
6898 const struct btrace_config *conf;
6899
873a185b
TT
6900 *buffer += "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n";
6901 *buffer += "<btrace-conf version=\"1.0\">\n";
f4abbc16
MM
6902
6903 conf = linux_btrace_conf (tinfo);
6904 if (conf != NULL)
6905 {
6906 switch (conf->format)
6907 {
6908 case BTRACE_FORMAT_NONE:
6909 break;
6910
6911 case BTRACE_FORMAT_BTS:
873a185b
TT
6912 string_xml_appendf (*buffer, "<bts");
6913 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->bts.size);
6914 string_xml_appendf (*buffer, " />\n");
f4abbc16 6915 break;
b20a6524
MM
6916
6917 case BTRACE_FORMAT_PT:
873a185b
TT
6918 string_xml_appendf (*buffer, "<pt");
6919 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->pt.size);
6920 string_xml_appendf (*buffer, "/>\n");
b20a6524 6921 break;
f4abbc16
MM
6922 }
6923 }
6924
873a185b 6925 *buffer += "</btrace-conf>\n";
f4abbc16
MM
6926 return 0;
6927}
9accd112
MM
6928#endif /* HAVE_LINUX_BTRACE */
6929
7b669087
GB
6930/* See nat/linux-nat.h. */
6931
6932ptid_t
6933current_lwp_ptid (void)
6934{
6935 return ptid_of (current_thread);
6936}
6937
7f63b89b
TBA
6938const char *
6939linux_process_target::thread_name (ptid_t thread)
6940{
6941 return linux_proc_tid_get_name (thread);
6942}
6943
6944#if USE_THREAD_DB
6945bool
6946linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
6947 int *handle_len)
6948{
6949 return thread_db_thread_handle (ptid, handle, handle_len);
6950}
6951#endif
6952
7b961964
SM
6953thread_info *
6954linux_process_target::thread_pending_parent (thread_info *thread)
6955{
6956 lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
6957
6958 if (parent == nullptr)
6959 return nullptr;
6960
6961 return get_lwp_thread (parent);
6962}
6963
df5ad102 6964thread_info *
faf44a31
PA
6965linux_process_target::thread_pending_child (thread_info *thread,
6966 target_waitkind *kind)
df5ad102 6967{
faf44a31 6968 lwp_info *child = get_thread_lwp (thread)->pending_child (kind);
df5ad102
SM
6969
6970 if (child == nullptr)
6971 return nullptr;
6972
6973 return get_lwp_thread (child);
6974}
6975
276d4552
YQ
6976/* Default implementation of linux_target_ops method "set_pc" for
6977 32-bit pc register which is literally named "pc". */
6978
6979void
6980linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
6981{
6982 uint32_t newpc = pc;
6983
6984 supply_register_by_name (regcache, "pc", &newpc);
6985}
6986
6987/* Default implementation of linux_target_ops method "get_pc" for
6988 32-bit pc register which is literally named "pc". */
6989
6990CORE_ADDR
6991linux_get_pc_32bit (struct regcache *regcache)
6992{
6993 uint32_t pc;
6994
6995 collect_register_by_name (regcache, "pc", &pc);
c058728c 6996 threads_debug_printf ("stop pc is 0x%" PRIx32, pc);
276d4552
YQ
6997 return pc;
6998}
6999
6f69e520
YQ
7000/* Default implementation of linux_target_ops method "set_pc" for
7001 64-bit pc register which is literally named "pc". */
7002
7003void
7004linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7005{
7006 uint64_t newpc = pc;
7007
7008 supply_register_by_name (regcache, "pc", &newpc);
7009}
7010
7011/* Default implementation of linux_target_ops method "get_pc" for
7012 64-bit pc register which is literally named "pc". */
7013
7014CORE_ADDR
7015linux_get_pc_64bit (struct regcache *regcache)
7016{
7017 uint64_t pc;
7018
7019 collect_register_by_name (regcache, "pc", &pc);
c058728c 7020 threads_debug_printf ("stop pc is 0x%" PRIx64, pc);
6f69e520
YQ
7021 return pc;
7022}
7023
0570503d 7024/* See linux-low.h. */
974c89e0 7025
0570503d 7026int
43e5fbd8 7027linux_get_auxv (int pid, int wordsize, CORE_ADDR match, CORE_ADDR *valp)
974c89e0
AH
7028{
7029 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7030 int offset = 0;
7031
7032 gdb_assert (wordsize == 4 || wordsize == 8);
7033
43e5fbd8
TJB
7034 while (the_target->read_auxv (pid, offset, data, 2 * wordsize)
7035 == 2 * wordsize)
974c89e0
AH
7036 {
7037 if (wordsize == 4)
7038 {
0570503d 7039 uint32_t *data_p = (uint32_t *) data;
974c89e0 7040 if (data_p[0] == match)
0570503d
PFC
7041 {
7042 *valp = data_p[1];
7043 return 1;
7044 }
974c89e0
AH
7045 }
7046 else
7047 {
0570503d 7048 uint64_t *data_p = (uint64_t *) data;
974c89e0 7049 if (data_p[0] == match)
0570503d
PFC
7050 {
7051 *valp = data_p[1];
7052 return 1;
7053 }
974c89e0
AH
7054 }
7055
7056 offset += 2 * wordsize;
7057 }
7058
7059 return 0;
7060}
7061
7062/* See linux-low.h. */
7063
7064CORE_ADDR
43e5fbd8 7065linux_get_hwcap (int pid, int wordsize)
974c89e0 7066{
0570503d 7067 CORE_ADDR hwcap = 0;
43e5fbd8 7068 linux_get_auxv (pid, wordsize, AT_HWCAP, &hwcap);
0570503d 7069 return hwcap;
974c89e0
AH
7070}
7071
7072/* See linux-low.h. */
7073
7074CORE_ADDR
43e5fbd8 7075linux_get_hwcap2 (int pid, int wordsize)
974c89e0 7076{
0570503d 7077 CORE_ADDR hwcap2 = 0;
43e5fbd8 7078 linux_get_auxv (pid, wordsize, AT_HWCAP2, &hwcap2);
0570503d 7079 return hwcap2;
974c89e0 7080}
6f69e520 7081
3aee8918
PA
7082#ifdef HAVE_LINUX_REGSETS
7083void
7084initialize_regsets_info (struct regsets_info *info)
7085{
7086 for (info->num_regsets = 0;
7087 info->regsets[info->num_regsets].size >= 0;
7088 info->num_regsets++)
7089 ;
3aee8918
PA
7090}
7091#endif
7092
da6d8c04
DJ
7093void
7094initialize_low (void)
7095{
bd99dc85 7096 struct sigaction sigchld_action;
dd373349 7097
bd99dc85 7098 memset (&sigchld_action, 0, sizeof (sigchld_action));
ef0478f6 7099 set_target_ops (the_linux_target);
dd373349 7100
aa7c7447 7101 linux_ptrace_init_warnings ();
1b919490 7102 linux_proc_init_warnings ();
bd99dc85
PA
7103
7104 sigchld_action.sa_handler = sigchld_handler;
7105 sigemptyset (&sigchld_action.sa_mask);
7106 sigchld_action.sa_flags = SA_RESTART;
7107 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
7108
7109 initialize_low_arch ();
89245bc0
DB
7110
7111 linux_check_ptrace_features ();
da6d8c04 7112}