]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdbserver/linux-low.cc
gdb+gdbserver/Linux: Remove USE_SIGTRAP_SIGINFO fallback
[thirdparty/binutils-gdb.git] / gdbserver / linux-low.cc
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
1d506c26 2 Copyright (C) 1995-2024 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04 18
58caa3dc 19#include "linux-low.h"
125f8a3d 20#include "nat/linux-osdata.h"
268a13a5 21#include "gdbsupport/agent.h"
de0d863e 22#include "tdesc.h"
cdc8e9b2
JB
23#include "gdbsupport/event-loop.h"
24#include "gdbsupport/event-pipe.h"
268a13a5
TT
25#include "gdbsupport/rsp-low.h"
26#include "gdbsupport/signals-state-save-restore.h"
96d7229d
LM
27#include "nat/linux-nat.h"
28#include "nat/linux-waitpid.h"
268a13a5 29#include "gdbsupport/gdb_wait.h"
5826e159 30#include "nat/gdb_ptrace.h"
125f8a3d
GB
31#include "nat/linux-ptrace.h"
32#include "nat/linux-procfs.h"
8cc73a39 33#include "nat/linux-personality.h"
da6d8c04
DJ
34#include <signal.h>
35#include <sys/ioctl.h>
36#include <fcntl.h>
0a30fbc4 37#include <unistd.h>
fd500816 38#include <sys/syscall.h>
f9387fc3 39#include <sched.h>
07e059b5
VP
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
53ce3c39 43#include <sys/stat.h>
efcbbd14 44#include <sys/vfs.h>
1570b33e 45#include <sys/uio.h>
07b3255c
TT
46#include <langinfo.h>
47#include <iconv.h>
268a13a5 48#include "gdbsupport/filestuff.h"
07b3255c 49#include "gdbsupport/gdb-safe-ctype.h"
c144c7a0 50#include "tracepoint.h"
276d4552 51#include <inttypes.h>
268a13a5 52#include "gdbsupport/common-inferior.h"
2090129c 53#include "nat/fork-inferior.h"
268a13a5 54#include "gdbsupport/environ.h"
21987b9c 55#include "gdbsupport/gdb-sigmask.h"
268a13a5 56#include "gdbsupport/scoped_restore.h"
957f3f49
DE
57#ifndef ELFMAG0
58/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
59 then ELFMAG0 will have been defined. If it didn't get included by
60 gdb_proc_service.h then including it will likely introduce a duplicate
61 definition of elf_fpregset_t. */
62#include <elf.h>
63#endif
14d2069a 64#include "nat/linux-namespaces.h"
efcbbd14 65
fd462a61
DJ
66#ifndef O_LARGEFILE
67#define O_LARGEFILE 0
68#endif
1a981360 69
69f4c9cc
AH
70#ifndef AT_HWCAP2
71#define AT_HWCAP2 26
72#endif
73
db0dfaa0
LM
74/* Some targets did not define these ptrace constants from the start,
75 so gdbserver defines them locally here. In the future, these may
76 be removed after they are added to asm/ptrace.h. */
77#if !(defined(PT_TEXT_ADDR) \
78 || defined(PT_DATA_ADDR) \
79 || defined(PT_TEXT_END_ADDR))
80#if defined(__mcoldfire__)
81/* These are still undefined in 3.10 kernels. */
82#define PT_TEXT_ADDR 49*4
83#define PT_DATA_ADDR 50*4
84#define PT_TEXT_END_ADDR 51*4
db0dfaa0
LM
85/* These are still undefined in 3.10 kernels. */
86#elif defined(__TMS320C6X__)
87#define PT_TEXT_ADDR (0x10000*4)
88#define PT_DATA_ADDR (0x10004*4)
89#define PT_TEXT_END_ADDR (0x10008*4)
90#endif
91#endif
92
5203ae1e
TBA
93#if (defined(__UCLIBC__) \
94 && defined(HAS_NOMMU) \
95 && defined(PT_TEXT_ADDR) \
96 && defined(PT_DATA_ADDR) \
97 && defined(PT_TEXT_END_ADDR))
98#define SUPPORTS_READ_OFFSETS
99#endif
100
9accd112 101#ifdef HAVE_LINUX_BTRACE
125f8a3d 102# include "nat/linux-btrace.h"
268a13a5 103# include "gdbsupport/btrace-common.h"
9accd112
MM
104#endif
105
8365dcf5
TJB
106#ifndef HAVE_ELF32_AUXV_T
107/* Copied from glibc's elf.h. */
108typedef struct
109{
110 uint32_t a_type; /* Entry type */
111 union
112 {
113 uint32_t a_val; /* Integer value */
114 /* We use to have pointer elements added here. We cannot do that,
115 though, since it does not work when using 32-bit definitions
116 on 64-bit platforms and vice versa. */
117 } a_un;
118} Elf32_auxv_t;
119#endif
120
121#ifndef HAVE_ELF64_AUXV_T
122/* Copied from glibc's elf.h. */
123typedef struct
124{
125 uint64_t a_type; /* Entry type */
126 union
127 {
128 uint64_t a_val; /* Integer value */
129 /* We use to have pointer elements added here. We cannot do that,
130 though, since it does not work when using 32-bit definitions
131 on 64-bit platforms and vice versa. */
132 } a_un;
133} Elf64_auxv_t;
134#endif
135
ded48a5e 136/* Does the current host support PTRACE_GETREGSET? */
56f703d3 137int have_ptrace_getregset = -1;
ded48a5e 138
8a841a35
PA
139/* Return TRUE if THREAD is the leader thread of the process. */
140
141static bool
142is_leader (thread_info *thread)
143{
144 ptid_t ptid = ptid_of (thread);
145 return ptid.pid () == ptid.lwp ();
146}
147
48989498
PA
148/* Return true if we should report thread exit events to GDB, for
149 THR. */
150
151static bool
152report_exit_events_for (thread_info *thr)
153{
154 client_state &cs = get_client_state ();
155
156 return (cs.report_thread_events
157 || (thr->thread_options & GDB_THREAD_OPTION_EXIT) != 0);
158}
159
cff068da
GB
160/* LWP accessors. */
161
162/* See nat/linux-nat.h. */
163
164ptid_t
165ptid_of_lwp (struct lwp_info *lwp)
166{
167 return ptid_of (get_lwp_thread (lwp));
168}
169
170/* See nat/linux-nat.h. */
171
4b134ca1
GB
172void
173lwp_set_arch_private_info (struct lwp_info *lwp,
174 struct arch_lwp_info *info)
175{
176 lwp->arch_private = info;
177}
178
179/* See nat/linux-nat.h. */
180
181struct arch_lwp_info *
182lwp_arch_private_info (struct lwp_info *lwp)
183{
184 return lwp->arch_private;
185}
186
187/* See nat/linux-nat.h. */
188
cff068da
GB
189int
190lwp_is_stopped (struct lwp_info *lwp)
191{
192 return lwp->stopped;
193}
194
195/* See nat/linux-nat.h. */
196
197enum target_stop_reason
198lwp_stop_reason (struct lwp_info *lwp)
199{
200 return lwp->stop_reason;
201}
202
0e00e962
AA
203/* See nat/linux-nat.h. */
204
205int
206lwp_is_stepping (struct lwp_info *lwp)
207{
208 return lwp->stepping;
209}
210
05044653
PA
211/* A list of all unknown processes which receive stop signals. Some
212 other process will presumably claim each of these as forked
213 children momentarily. */
24a09b5f 214
05044653
PA
215struct simple_pid_list
216{
217 /* The process ID. */
218 int pid;
219
220 /* The status as reported by waitpid. */
221 int status;
222
223 /* Next in chain. */
224 struct simple_pid_list *next;
225};
05c309a8 226static struct simple_pid_list *stopped_pids;
05044653
PA
227
228/* Trivial list manipulation functions to keep track of a list of new
229 stopped processes. */
230
231static void
232add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
233{
8d749320 234 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
05044653
PA
235
236 new_pid->pid = pid;
237 new_pid->status = status;
238 new_pid->next = *listp;
239 *listp = new_pid;
240}
241
242static int
243pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
244{
245 struct simple_pid_list **p;
246
247 for (p = listp; *p != NULL; p = &(*p)->next)
248 if ((*p)->pid == pid)
249 {
250 struct simple_pid_list *next = (*p)->next;
251
252 *statusp = (*p)->status;
253 xfree (*p);
254 *p = next;
255 return 1;
256 }
257 return 0;
258}
24a09b5f 259
bde24c0a
PA
260enum stopping_threads_kind
261 {
262 /* Not stopping threads presently. */
263 NOT_STOPPING_THREADS,
264
265 /* Stopping threads. */
266 STOPPING_THREADS,
267
268 /* Stopping and suspending threads. */
269 STOPPING_AND_SUSPENDING_THREADS
270 };
271
272/* This is set while stop_all_lwps is in effect. */
6bd434d6 273static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
274
275/* FIXME make into a target method? */
24a09b5f 276int using_threads = 1;
24a09b5f 277
fa593d66
PA
278/* True if we're presently stabilizing threads (moving them out of
279 jump pads). */
280static int stabilizing_threads;
281
f50bf8e5 282static void unsuspend_all_lwps (struct lwp_info *except);
e8a625d1
PA
283static void mark_lwp_dead (struct lwp_info *lwp, int wstat,
284 bool thread_event);
00db26fa 285static int lwp_is_marked_dead (struct lwp_info *lwp);
d50171e4 286static int kill_lwp (unsigned long lwpid, int signo);
863d01bd 287static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
ece66d65 288static int linux_low_ptrace_options (int attached);
ced2dffb 289static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
d50171e4 290
582511be
PA
291/* When the event-loop is doing a step-over, this points at the thread
292 being stepped. */
6bd434d6 293static ptid_t step_over_bkpt;
582511be 294
bf9ae9d8
TBA
295bool
296linux_process_target::low_supports_breakpoints ()
297{
298 return false;
299}
d50171e4 300
bf9ae9d8
TBA
301CORE_ADDR
302linux_process_target::low_get_pc (regcache *regcache)
303{
304 return 0;
305}
306
307void
308linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
d50171e4 309{
bf9ae9d8 310 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
d50171e4 311}
0d62e5e8 312
7582c77c
TBA
313std::vector<CORE_ADDR>
314linux_process_target::low_get_next_pcs (regcache *regcache)
315{
316 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
317 "implemented");
318}
319
d4807ea2
TBA
320int
321linux_process_target::low_decr_pc_after_break ()
322{
323 return 0;
324}
325
c2d6af84
PA
326/* True if LWP is stopped in its stepping range. */
327
328static int
329lwp_in_step_range (struct lwp_info *lwp)
330{
331 CORE_ADDR pc = lwp->stop_pc;
332
333 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
334}
335
cdc8e9b2
JB
336/* The event pipe registered as a waitable file in the event loop. */
337static event_pipe linux_event_pipe;
bd99dc85
PA
338
339/* True if we're currently in async mode. */
cdc8e9b2 340#define target_is_async_p() (linux_event_pipe.is_open ())
bd99dc85 341
02fc4de7 342static void send_sigstop (struct lwp_info *lwp);
bd99dc85 343
d0722149
DE
344/* Return non-zero if HEADER is a 64-bit ELF file. */
345
346static int
214d508e 347elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 348{
214d508e
L
349 if (header->e_ident[EI_MAG0] == ELFMAG0
350 && header->e_ident[EI_MAG1] == ELFMAG1
351 && header->e_ident[EI_MAG2] == ELFMAG2
352 && header->e_ident[EI_MAG3] == ELFMAG3)
353 {
354 *machine = header->e_machine;
355 return header->e_ident[EI_CLASS] == ELFCLASS64;
356
357 }
358 *machine = EM_NONE;
359 return -1;
d0722149
DE
360}
361
362/* Return non-zero if FILE is a 64-bit ELF file,
363 zero if the file is not a 64-bit ELF file,
364 and -1 if the file is not accessible or doesn't exist. */
365
be07f1a2 366static int
214d508e 367elf_64_file_p (const char *file, unsigned int *machine)
d0722149 368{
957f3f49 369 Elf64_Ehdr header;
d0722149
DE
370 int fd;
371
372 fd = open (file, O_RDONLY);
373 if (fd < 0)
374 return -1;
375
376 if (read (fd, &header, sizeof (header)) != sizeof (header))
377 {
378 close (fd);
379 return 0;
380 }
381 close (fd);
382
214d508e 383 return elf_64_header_p (&header, machine);
d0722149
DE
384}
385
be07f1a2
PA
386/* Accepts an integer PID; Returns true if the executable PID is
387 running is a 64-bit ELF file.. */
388
389int
214d508e 390linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 391{
d8d2a3ee 392 char file[PATH_MAX];
be07f1a2
PA
393
394 sprintf (file, "/proc/%d/exe", pid);
214d508e 395 return elf_64_file_p (file, machine);
be07f1a2
PA
396}
397
fd000fb3
TBA
398void
399linux_process_target::delete_lwp (lwp_info *lwp)
bd99dc85 400{
fa96cb38
PA
401 struct thread_info *thr = get_lwp_thread (lwp);
402
c058728c 403 threads_debug_printf ("deleting %ld", lwpid_of (thr));
fa96cb38
PA
404
405 remove_thread (thr);
466eecee 406
fd000fb3 407 low_delete_thread (lwp->arch_private);
466eecee 408
013e3554 409 delete lwp;
bd99dc85
PA
410}
411
fd000fb3
TBA
412void
413linux_process_target::low_delete_thread (arch_lwp_info *info)
414{
415 /* Default implementation should be overridden if architecture-specific
416 info is being used. */
417 gdb_assert (info == nullptr);
418}
95954743 419
421490af
PA
420/* Open the /proc/PID/mem file for PROC. */
421
422static void
423open_proc_mem_file (process_info *proc)
424{
425 gdb_assert (proc->priv->mem_fd == -1);
426
427 char filename[64];
428 xsnprintf (filename, sizeof filename, "/proc/%d/mem", proc->pid);
429
430 proc->priv->mem_fd
431 = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
432}
433
fd000fb3 434process_info *
421490af 435linux_process_target::add_linux_process_no_mem_file (int pid, int attached)
95954743
PA
436{
437 struct process_info *proc;
438
95954743 439 proc = add_process (pid, attached);
8d749320 440 proc->priv = XCNEW (struct process_info_private);
95954743 441
fd000fb3 442 proc->priv->arch_private = low_new_process ();
421490af
PA
443 proc->priv->mem_fd = -1;
444
445 return proc;
446}
447
aa5ca48f 448
421490af
PA
449process_info *
450linux_process_target::add_linux_process (int pid, int attached)
451{
452 process_info *proc = add_linux_process_no_mem_file (pid, attached);
453 open_proc_mem_file (proc);
95954743
PA
454 return proc;
455}
456
f551c8ef
SM
457void
458linux_process_target::remove_linux_process (process_info *proc)
459{
460 if (proc->priv->mem_fd >= 0)
461 close (proc->priv->mem_fd);
462
463 this->low_delete_process (proc->priv->arch_private);
464
465 xfree (proc->priv);
466 proc->priv = nullptr;
467
468 remove_process (proc);
469}
470
fd000fb3
TBA
471arch_process_info *
472linux_process_target::low_new_process ()
473{
474 return nullptr;
475}
476
477void
478linux_process_target::low_delete_process (arch_process_info *info)
479{
480 /* Default implementation must be overridden if architecture-specific
481 info exists. */
482 gdb_assert (info == nullptr);
483}
484
485void
486linux_process_target::low_new_fork (process_info *parent, process_info *child)
487{
488 /* Nop. */
489}
490
797bcff5
TBA
491void
492linux_process_target::arch_setup_thread (thread_info *thread)
94585166 493{
24583e45
TBA
494 scoped_restore_current_thread restore_thread;
495 switch_to_thread (thread);
94585166 496
797bcff5 497 low_arch_setup ();
94585166
DB
498}
499
d16f3f6c
TBA
500int
501linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
502 int wstat)
24a09b5f 503{
c12a5089 504 client_state &cs = get_client_state ();
94585166 505 struct lwp_info *event_lwp = *orig_event_lwp;
89a5711c 506 int event = linux_ptrace_get_extended_event (wstat);
de0d863e 507 struct thread_info *event_thr = get_lwp_thread (event_lwp);
24a09b5f 508
183be222 509 gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
65706a29 510
82075af2
JS
511 /* All extended events we currently use are mid-syscall. Only
512 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
513 you have to be using PTRACE_SEIZE to get that. */
514 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
515
c269dbdb
DB
516 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
517 || (event == PTRACE_EVENT_CLONE))
24a09b5f
DJ
518 {
519 unsigned long new_pid;
05044653 520 int ret, status;
24a09b5f 521
de0d863e 522 /* Get the pid of the new lwp. */
d86d4aaf 523 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 524 &new_pid);
24a09b5f
DJ
525
526 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 527 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
528 {
529 /* The new child has a pending SIGSTOP. We can't affect it until it
530 hits the SIGSTOP, but we're already attached. */
531
97438e3f 532 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
533
534 if (ret == -1)
535 perror_with_name ("waiting for new child");
536 else if (ret != new_pid)
537 warning ("wait returned unexpected PID %d", ret);
da5898ce 538 else if (!WIFSTOPPED (status))
24a09b5f
DJ
539 warning ("wait returned unexpected status 0x%x", status);
540 }
541
393a6b59 542 if (debug_threads)
de0d863e 543 {
393a6b59
PA
544 debug_printf ("HEW: Got %s event from LWP %ld, new child is %ld\n",
545 (event == PTRACE_EVENT_FORK ? "fork"
546 : event == PTRACE_EVENT_VFORK ? "vfork"
547 : event == PTRACE_EVENT_CLONE ? "clone"
548 : "???"),
549 ptid_of (event_thr).lwp (),
550 new_pid);
551 }
552
553 ptid_t child_ptid = (event != PTRACE_EVENT_CLONE
554 ? ptid_t (new_pid, new_pid)
555 : ptid_t (ptid_of (event_thr).pid (), new_pid));
de0d863e 556
38065394
PA
557 process_info *child_proc = nullptr;
558
559 if (event != PTRACE_EVENT_CLONE)
560 {
561 /* Add the new process to the tables before we add the LWP.
562 We need to do this even if the new process will be
563 detached. See breakpoint cloning code further below. */
564 child_proc = add_linux_process (new_pid, 0);
565 }
566
393a6b59
PA
567 lwp_info *child_lwp = add_lwp (child_ptid);
568 gdb_assert (child_lwp != NULL);
569 child_lwp->stopped = 1;
570 if (event != PTRACE_EVENT_CLONE)
571 child_lwp->must_set_ptrace_flags = 1;
572 child_lwp->status_pending_p = 0;
de0d863e 573
393a6b59 574 thread_info *child_thr = get_lwp_thread (child_lwp);
de0d863e 575
393a6b59
PA
576 /* If we're suspending all threads, leave this one suspended
577 too. If the fork/clone parent is stepping over a breakpoint,
578 all other threads have been suspended already. Leave the
579 child suspended too. */
580 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
581 || event_lwp->bp_reinsert != 0)
582 {
583 threads_debug_printf ("leaving child suspended");
584 child_lwp->suspended = 1;
585 }
586
587 if (event_lwp->bp_reinsert != 0
588 && supports_software_single_step ()
589 && event == PTRACE_EVENT_VFORK)
590 {
591 /* If we leave single-step breakpoints there, child will
592 hit it, so uninsert single-step breakpoints from parent
593 (and child). Once vfork child is done, reinsert
594 them back to parent. */
595 uninsert_single_step_breakpoints (event_thr);
596 }
597
598 if (event != PTRACE_EVENT_CLONE)
599 {
38065394
PA
600 /* Clone the breakpoint lists of the parent. We need to do
601 this even if the new process will be detached, since we
602 will need the process object and the breakpoints to
603 remove any breakpoints from memory when we detach, and
604 the client side will access registers. */
de0d863e 605 gdb_assert (child_proc != NULL);
863d01bd 606
393a6b59 607 process_info *parent_proc = get_thread_process (event_thr);
de0d863e 608 child_proc->attached = parent_proc->attached;
2e7b624b 609
63c40ec7 610 clone_all_breakpoints (child_thr, event_thr);
de0d863e 611
51a948fd
AB
612 target_desc_up tdesc = allocate_target_description ();
613 copy_target_description (tdesc.get (), parent_proc->tdesc);
614 child_proc->tdesc = tdesc.release ();
de0d863e 615
3a8a0396 616 /* Clone arch-specific process data. */
fd000fb3 617 low_new_fork (parent_proc, child_proc);
393a6b59 618 }
3a8a0396 619
393a6b59
PA
620 /* Save fork/clone info in the parent thread. */
621 if (event == PTRACE_EVENT_FORK)
622 event_lwp->waitstatus.set_forked (child_ptid);
623 else if (event == PTRACE_EVENT_VFORK)
624 event_lwp->waitstatus.set_vforked (child_ptid);
625 else if (event == PTRACE_EVENT_CLONE
626 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
627 event_lwp->waitstatus.set_thread_cloned (child_ptid);
628
629 if (event != PTRACE_EVENT_CLONE
630 || (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
631 {
de0d863e 632 /* The status_pending field contains bits denoting the
393a6b59
PA
633 extended event, so when the pending event is handled, the
634 handler will look at lwp->waitstatus. */
de0d863e
DB
635 event_lwp->status_pending_p = 1;
636 event_lwp->status_pending = wstat;
637
393a6b59
PA
638 /* Link the threads until the parent's event is passed on to
639 GDB. */
640 event_lwp->relative = child_lwp;
641 child_lwp->relative = event_lwp;
de0d863e
DB
642 }
643
393a6b59
PA
644 /* If the parent thread is doing step-over with single-step
645 breakpoints, the list of single-step breakpoints are cloned
646 from the parent's. Remove them from the child process.
647 In case of vfork, we'll reinsert them back once vforked
648 child is done. */
649 if (event_lwp->bp_reinsert != 0
650 && supports_software_single_step ())
651 {
652 /* The child process is forked and stopped, so it is safe
653 to access its memory without stopping all other threads
654 from other processes. */
655 delete_single_step_breakpoints (child_thr);
e27d73f6 656
393a6b59
PA
657 gdb_assert (has_single_step_breakpoints (event_thr));
658 gdb_assert (!has_single_step_breakpoints (child_thr));
659 }
bde24c0a 660
da5898ce
DJ
661 /* Normally we will get the pending SIGSTOP. But in some cases
662 we might get another signal delivered to the group first.
f21cc1a2 663 If we do get another signal, be sure not to lose it. */
20ba1ce6 664 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 665 {
393a6b59
PA
666 child_lwp->stop_expected = 1;
667 child_lwp->status_pending_p = 1;
668 child_lwp->status_pending = status;
da5898ce 669 }
393a6b59 670 else if (event == PTRACE_EVENT_CLONE && cs.report_thread_events)
65706a29 671 {
393a6b59
PA
672 child_lwp->waitstatus.set_thread_created ();
673 child_lwp->status_pending_p = 1;
674 child_lwp->status_pending = status;
65706a29 675 }
de0d863e 676
393a6b59
PA
677 if (event == PTRACE_EVENT_CLONE)
678 {
a0aad537 679#ifdef USE_THREAD_DB
393a6b59 680 thread_db_notice_clone (event_thr, child_ptid);
a0aad537 681#endif
393a6b59 682 }
86299109 683
393a6b59
PA
684 if (event == PTRACE_EVENT_CLONE
685 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) == 0)
686 {
687 threads_debug_printf
688 ("not reporting clone event from LWP %ld, new child is %ld\n",
689 ptid_of (event_thr).lwp (),
690 new_pid);
691 return 1;
692 }
693
694 /* Leave the child stopped until GDB processes the parent
695 event. */
696 child_thr->last_resume_kind = resume_stop;
697 child_thr->last_status.set_stopped (GDB_SIGNAL_0);
698
699 /* Report the event. */
700 threads_debug_printf
701 ("reporting %s event from LWP %ld, new child is %ld\n",
702 (event == PTRACE_EVENT_FORK ? "fork"
703 : event == PTRACE_EVENT_VFORK ? "vfork"
704 : event == PTRACE_EVENT_CLONE ? "clone"
705 : "???"),
706 ptid_of (event_thr).lwp (),
707 new_pid);
708 return 0;
24a09b5f 709 }
c269dbdb
DB
710 else if (event == PTRACE_EVENT_VFORK_DONE)
711 {
183be222 712 event_lwp->waitstatus.set_vfork_done ();
c269dbdb 713
7582c77c 714 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
2e7b624b 715 {
3b9a79ef 716 reinsert_single_step_breakpoints (event_thr);
2e7b624b 717
3b9a79ef 718 gdb_assert (has_single_step_breakpoints (event_thr));
2e7b624b
YQ
719 }
720
c269dbdb
DB
721 /* Report the event. */
722 return 0;
723 }
c12a5089 724 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
94585166
DB
725 {
726 struct process_info *proc;
f27866ba 727 std::vector<int> syscalls_to_catch;
94585166
DB
728 ptid_t event_ptid;
729 pid_t event_pid;
730
c058728c
SM
731 threads_debug_printf ("Got exec event from LWP %ld",
732 lwpid_of (event_thr));
94585166
DB
733
734 /* Get the event ptid. */
735 event_ptid = ptid_of (event_thr);
e99b03dc 736 event_pid = event_ptid.pid ();
94585166 737
82075af2 738 /* Save the syscall list from the execing process. */
94585166 739 proc = get_thread_process (event_thr);
f27866ba 740 syscalls_to_catch = std::move (proc->syscalls_to_catch);
82075af2
JS
741
742 /* Delete the execing process and all its threads. */
d16f3f6c 743 mourn (proc);
24583e45 744 switch_to_thread (nullptr);
94585166
DB
745
746 /* Create a new process/lwp/thread. */
fd000fb3 747 proc = add_linux_process (event_pid, 0);
94585166
DB
748 event_lwp = add_lwp (event_ptid);
749 event_thr = get_lwp_thread (event_lwp);
750 gdb_assert (current_thread == event_thr);
797bcff5 751 arch_setup_thread (event_thr);
94585166
DB
752
753 /* Set the event status. */
183be222
SM
754 event_lwp->waitstatus.set_execd
755 (make_unique_xstrdup
756 (linux_proc_pid_to_exec_file (lwpid_of (event_thr))));
94585166
DB
757
758 /* Mark the exec status as pending. */
759 event_lwp->stopped = 1;
760 event_lwp->status_pending_p = 1;
761 event_lwp->status_pending = wstat;
762 event_thr->last_resume_kind = resume_continue;
183be222 763 event_thr->last_status.set_ignore ();
94585166 764
82075af2
JS
765 /* Update syscall state in the new lwp, effectively mid-syscall too. */
766 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
767
768 /* Restore the list to catch. Don't rely on the client, which is free
769 to avoid sending a new list when the architecture doesn't change.
770 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
f27866ba 771 proc->syscalls_to_catch = std::move (syscalls_to_catch);
82075af2 772
94585166
DB
773 /* Report the event. */
774 *orig_event_lwp = event_lwp;
775 return 0;
776 }
de0d863e 777
f34652de 778 internal_error (_("unknown ptrace event %d"), event);
24a09b5f
DJ
779}
780
df95181f
TBA
781CORE_ADDR
782linux_process_target::get_pc (lwp_info *lwp)
d50171e4 783{
a9deee17
PA
784 process_info *proc = get_thread_process (get_lwp_thread (lwp));
785 gdb_assert (!proc->starting_up);
d50171e4 786
bf9ae9d8 787 if (!low_supports_breakpoints ())
d50171e4
PA
788 return 0;
789
24583e45
TBA
790 scoped_restore_current_thread restore_thread;
791 switch_to_thread (get_lwp_thread (lwp));
d50171e4 792
a9deee17
PA
793 struct regcache *regcache = get_thread_regcache (current_thread, 1);
794 CORE_ADDR pc = low_get_pc (regcache);
d50171e4 795
c058728c 796 threads_debug_printf ("pc is 0x%lx", (long) pc);
d50171e4 797
d50171e4
PA
798 return pc;
799}
800
9eedd27d
TBA
801void
802linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
82075af2 803{
82075af2
JS
804 struct regcache *regcache;
805
24583e45
TBA
806 scoped_restore_current_thread restore_thread;
807 switch_to_thread (get_lwp_thread (lwp));
82075af2
JS
808
809 regcache = get_thread_regcache (current_thread, 1);
9eedd27d 810 low_get_syscall_trapinfo (regcache, sysno);
82075af2 811
c058728c 812 threads_debug_printf ("get_syscall_trapinfo sysno %d", *sysno);
82075af2
JS
813}
814
9eedd27d
TBA
815void
816linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
817{
818 /* By default, report an unknown system call number. */
819 *sysno = UNKNOWN_SYSCALL;
820}
821
df95181f
TBA
822bool
823linux_process_target::save_stop_reason (lwp_info *lwp)
0d62e5e8 824{
582511be
PA
825 CORE_ADDR pc;
826 CORE_ADDR sw_breakpoint_pc;
3e572f71 827 siginfo_t siginfo;
d50171e4 828
bf9ae9d8 829 if (!low_supports_breakpoints ())
df95181f 830 return false;
0d62e5e8 831
a9deee17
PA
832 process_info *proc = get_thread_process (get_lwp_thread (lwp));
833 if (proc->starting_up)
834 {
835 /* Claim we have the stop PC so that the caller doesn't try to
836 fetch it itself. */
837 return true;
838 }
839
582511be 840 pc = get_pc (lwp);
d4807ea2 841 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
d50171e4 842
582511be 843 /* breakpoint_at reads from the current thread. */
24583e45
TBA
844 scoped_restore_current_thread restore_thread;
845 switch_to_thread (get_lwp_thread (lwp));
47c0c975 846
3e572f71
PA
847 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
848 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
849 {
850 if (siginfo.si_signo == SIGTRAP)
851 {
e7ad2f14
PA
852 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
853 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 854 {
e7ad2f14
PA
855 /* The si_code is ambiguous on this arch -- check debug
856 registers. */
857 if (!check_stopped_by_watchpoint (lwp))
858 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
859 }
860 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
861 {
862 /* If we determine the LWP stopped for a SW breakpoint,
863 trust it. Particularly don't check watchpoint
864 registers, because at least on s390, we'd find
865 stopped-by-watchpoint as long as there's a watchpoint
866 set. */
3e572f71 867 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
3e572f71 868 }
e7ad2f14 869 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 870 {
e7ad2f14
PA
871 /* This can indicate either a hardware breakpoint or
872 hardware watchpoint. Check debug registers. */
873 if (!check_stopped_by_watchpoint (lwp))
874 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
3e572f71 875 }
2bf6fb9d
PA
876 else if (siginfo.si_code == TRAP_TRACE)
877 {
e7ad2f14
PA
878 /* We may have single stepped an instruction that
879 triggered a watchpoint. In that case, on some
880 architectures (such as x86), instead of TRAP_HWBKPT,
881 si_code indicates TRAP_TRACE, and we need to check
882 the debug registers separately. */
883 if (!check_stopped_by_watchpoint (lwp))
884 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
2bf6fb9d 885 }
3e572f71
PA
886 }
887 }
e7ad2f14
PA
888
889 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
582511be 890 {
c058728c
SM
891 threads_debug_printf
892 ("%s stopped by software breakpoint",
893 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
582511be
PA
894
895 /* Back up the PC if necessary. */
896 if (pc != sw_breakpoint_pc)
e7ad2f14 897 {
582511be
PA
898 struct regcache *regcache
899 = get_thread_regcache (current_thread, 1);
bf9ae9d8 900 low_set_pc (regcache, sw_breakpoint_pc);
582511be
PA
901 }
902
e7ad2f14
PA
903 /* Update this so we record the correct stop PC below. */
904 pc = sw_breakpoint_pc;
582511be 905 }
e7ad2f14 906 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
c058728c
SM
907 threads_debug_printf
908 ("%s stopped by hardware breakpoint",
909 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14 910 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
c058728c
SM
911 threads_debug_printf
912 ("%s stopped by hardware watchpoint",
913 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14 914 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
c058728c
SM
915 threads_debug_printf
916 ("%s stopped by trace",
917 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14
PA
918
919 lwp->stop_pc = pc;
df95181f 920 return true;
0d62e5e8 921}
ce3a066d 922
fd000fb3
TBA
923lwp_info *
924linux_process_target::add_lwp (ptid_t ptid)
611cb4a5 925{
c360a473 926 lwp_info *lwp = new lwp_info;
0d62e5e8 927
754e3168
AH
928 lwp->thread = add_thread (ptid, lwp);
929
fd000fb3 930 low_new_thread (lwp);
aa5ca48f 931
54a0b537 932 return lwp;
0d62e5e8 933}
611cb4a5 934
fd000fb3
TBA
935void
936linux_process_target::low_new_thread (lwp_info *info)
937{
938 /* Nop. */
939}
940
2090129c
SDJ
941/* Callback to be used when calling fork_inferior, responsible for
942 actually initiating the tracing of the inferior. */
943
944static void
945linux_ptrace_fun ()
946{
947 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
948 (PTRACE_TYPE_ARG4) 0) < 0)
50fa3001 949 trace_start_error_with_name ("ptrace");
2090129c
SDJ
950
951 if (setpgid (0, 0) < 0)
952 trace_start_error_with_name ("setpgid");
953
954 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
955 stdout to stderr so that inferior i/o doesn't corrupt the connection.
956 Also, redirect stdin to /dev/null. */
957 if (remote_connection_is_stdio ())
958 {
959 if (close (0) < 0)
960 trace_start_error_with_name ("close");
961 if (open ("/dev/null", O_RDONLY) < 0)
962 trace_start_error_with_name ("open");
963 if (dup2 (2, 1) < 0)
964 trace_start_error_with_name ("dup2");
965 if (write (2, "stdin/stdout redirected\n",
966 sizeof ("stdin/stdout redirected\n") - 1) < 0)
967 {
968 /* Errors ignored. */;
969 }
970 }
971}
972
da6d8c04 973/* Start an inferior process and returns its pid.
2090129c
SDJ
974 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
975 are its arguments. */
da6d8c04 976
15295543
TBA
977int
978linux_process_target::create_inferior (const char *program,
979 const std::vector<char *> &program_args)
da6d8c04 980{
c12a5089 981 client_state &cs = get_client_state ();
a6dbe5df 982 struct lwp_info *new_lwp;
da6d8c04 983 int pid;
95954743 984 ptid_t ptid;
03583c20 985
41272101
TT
986 {
987 maybe_disable_address_space_randomization restore_personality
c12a5089 988 (cs.disable_randomization);
bea571eb 989 std::string str_program_args = construct_inferior_arguments (program_args);
41272101
TT
990
991 pid = fork_inferior (program,
992 str_program_args.c_str (),
993 get_environ ()->envp (), linux_ptrace_fun,
994 NULL, NULL, NULL, NULL);
995 }
03583c20 996
421490af
PA
997 /* When spawning a new process, we can't open the mem file yet. We
998 still have to nurse the process through the shell, and that execs
999 a couple times. The address space a /proc/PID/mem file is
1000 accessing is destroyed on exec. */
1001 process_info *proc = add_linux_process_no_mem_file (pid, 0);
95954743 1002
184ea2f7 1003 ptid = ptid_t (pid, pid);
95954743 1004 new_lwp = add_lwp (ptid);
a6dbe5df 1005 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 1006
2090129c
SDJ
1007 post_fork_inferior (pid, program);
1008
421490af
PA
1009 /* PROC is now past the shell running the program we want, so we can
1010 open the /proc/PID/mem file. */
1011 open_proc_mem_file (proc);
1012
a9fa9f7d 1013 return pid;
da6d8c04
DJ
1014}
1015
ece66d65
JS
1016/* Implement the post_create_inferior target_ops method. */
1017
6dee9afb
TBA
1018void
1019linux_process_target::post_create_inferior ()
ece66d65
JS
1020{
1021 struct lwp_info *lwp = get_thread_lwp (current_thread);
1022
797bcff5 1023 low_arch_setup ();
ece66d65
JS
1024
1025 if (lwp->must_set_ptrace_flags)
1026 {
1027 struct process_info *proc = current_process ();
1028 int options = linux_low_ptrace_options (proc->attached);
1029
1030 linux_enable_event_reporting (lwpid_of (current_thread), options);
1031 lwp->must_set_ptrace_flags = 0;
1032 }
1033}
1034
7ae1a6a6 1035int
fd000fb3 1036linux_process_target::attach_lwp (ptid_t ptid)
da6d8c04 1037{
54a0b537 1038 struct lwp_info *new_lwp;
e38504b3 1039 int lwpid = ptid.lwp ();
611cb4a5 1040
b8e1b30e 1041 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 1042 != 0)
7ae1a6a6 1043 return errno;
24a09b5f 1044
b3312d80 1045 new_lwp = add_lwp (ptid);
0d62e5e8 1046
a6dbe5df
PA
1047 /* We need to wait for SIGSTOP before being able to make the next
1048 ptrace call on this LWP. */
1049 new_lwp->must_set_ptrace_flags = 1;
1050
644cebc9 1051 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2 1052 {
c058728c 1053 threads_debug_printf ("Attached to a stopped process");
c14d7ab2
PA
1054
1055 /* The process is definitely stopped. It is in a job control
1056 stop, unless the kernel predates the TASK_STOPPED /
1057 TASK_TRACED distinction, in which case it might be in a
1058 ptrace stop. Make sure it is in a ptrace stop; from there we
1059 can kill it, signal it, et cetera.
1060
1061 First make sure there is a pending SIGSTOP. Since we are
1062 already attached, the process can not transition from stopped
1063 to running without a PTRACE_CONT; so we know this signal will
1064 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1065 probably already in the queue (unless this kernel is old
1066 enough to use TASK_STOPPED for ptrace stops); but since
1067 SIGSTOP is not an RT signal, it can only be queued once. */
1068 kill_lwp (lwpid, SIGSTOP);
1069
1070 /* Finally, resume the stopped process. This will deliver the
1071 SIGSTOP (or a higher priority signal, just like normal
1072 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 1073 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
1074 }
1075
0d62e5e8 1076 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
1077 brings it to a halt.
1078
1079 There are several cases to consider here:
1080
1081 1) gdbserver has already attached to the process and is being notified
1b3f6016 1082 of a new thread that is being created.
d50171e4
PA
1083 In this case we should ignore that SIGSTOP and resume the
1084 process. This is handled below by setting stop_expected = 1,
8336d594 1085 and the fact that add_thread sets last_resume_kind ==
d50171e4 1086 resume_continue.
0e21c1ec
DE
1087
1088 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
1089 to it via attach_inferior.
1090 In this case we want the process thread to stop.
d50171e4
PA
1091 This is handled by having linux_attach set last_resume_kind ==
1092 resume_stop after we return.
e3deef73
LM
1093
1094 If the pid we are attaching to is also the tgid, we attach to and
1095 stop all the existing threads. Otherwise, we attach to pid and
1096 ignore any other threads in the same group as this pid.
0e21c1ec
DE
1097
1098 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
1099 existing threads.
1100 In this case we want the thread to stop.
1101 FIXME: This case is currently not properly handled.
1102 We should wait for the SIGSTOP but don't. Things work apparently
1103 because enough time passes between when we ptrace (ATTACH) and when
1104 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
1105
1106 On the other hand, if we are currently trying to stop all threads, we
1107 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 1108 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
1109 end of the list, and so the new thread has not yet reached
1110 wait_for_sigstop (but will). */
d50171e4 1111 new_lwp->stop_expected = 1;
0d62e5e8 1112
7ae1a6a6 1113 return 0;
95954743
PA
1114}
1115
8784d563
PA
1116/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1117 already attached. Returns true if a new LWP is found, false
1118 otherwise. */
1119
1120static int
1121attach_proc_task_lwp_callback (ptid_t ptid)
1122{
1123 /* Is this a new thread? */
1124 if (find_thread_ptid (ptid) == NULL)
1125 {
e38504b3 1126 int lwpid = ptid.lwp ();
8784d563
PA
1127 int err;
1128
c058728c 1129 threads_debug_printf ("Found new lwp %d", lwpid);
8784d563 1130
fd000fb3 1131 err = the_linux_target->attach_lwp (ptid);
8784d563
PA
1132
1133 /* Be quiet if we simply raced with the thread exiting. EPERM
1134 is returned if the thread's task still exists, and is marked
1135 as exited or zombie, as well as other conditions, so in that
1136 case, confirm the status in /proc/PID/status. */
1137 if (err == ESRCH
1138 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
c058728c
SM
1139 threads_debug_printf
1140 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1141 lwpid, err, safe_strerror (err));
8784d563
PA
1142 else if (err != 0)
1143 {
4d9b86e1 1144 std::string reason
50fa3001 1145 = linux_ptrace_attach_fail_reason_string (ptid, err);
4d9b86e1 1146
c6f7f9c8 1147 error (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
8784d563
PA
1148 }
1149
1150 return 1;
1151 }
1152 return 0;
1153}
1154
500c1d85
PA
1155static void async_file_mark (void);
1156
e3deef73
LM
1157/* Attach to PID. If PID is the tgid, attach to it and all
1158 of its threads. */
1159
ef03dad8
TBA
1160int
1161linux_process_target::attach (unsigned long pid)
0d62e5e8 1162{
500c1d85
PA
1163 struct process_info *proc;
1164 struct thread_info *initial_thread;
184ea2f7 1165 ptid_t ptid = ptid_t (pid, pid);
7ae1a6a6
PA
1166 int err;
1167
421490af
PA
1168 /* Delay opening the /proc/PID/mem file until we've successfully
1169 attached. */
1170 proc = add_linux_process_no_mem_file (pid, 1);
df0da8a2 1171
e3deef73
LM
1172 /* Attach to PID. We will check for other threads
1173 soon. */
fd000fb3 1174 err = attach_lwp (ptid);
7ae1a6a6 1175 if (err != 0)
4d9b86e1 1176 {
f551c8ef 1177 this->remove_linux_process (proc);
4d9b86e1 1178
50fa3001
SDJ
1179 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1180 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
4d9b86e1 1181 }
7ae1a6a6 1182
421490af
PA
1183 open_proc_mem_file (proc);
1184
500c1d85
PA
1185 /* Don't ignore the initial SIGSTOP if we just attached to this
1186 process. It will be collected by wait shortly. */
184ea2f7 1187 initial_thread = find_thread_ptid (ptid_t (pid, pid));
59487af3 1188 gdb_assert (initial_thread != nullptr);
500c1d85 1189 initial_thread->last_resume_kind = resume_stop;
0d62e5e8 1190
8784d563
PA
1191 /* We must attach to every LWP. If /proc is mounted, use that to
1192 find them now. On the one hand, the inferior may be using raw
1193 clone instead of using pthreads. On the other hand, even if it
1194 is using pthreads, GDB may not be connected yet (thread_db needs
1195 to do symbol lookups, through qSymbol). Also, thread_db walks
1196 structures in the inferior's address space to find the list of
1197 threads/LWPs, and those structures may well be corrupted. Note
1198 that once thread_db is loaded, we'll still use it to list threads
1199 and associate pthread info with each LWP. */
c6f7f9c8
TT
1200 try
1201 {
1202 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1203 }
1204 catch (const gdb_exception_error &)
1205 {
1206 /* Make sure we do not deliver the SIGSTOP to the process. */
1207 initial_thread->last_resume_kind = resume_continue;
1208
1209 this->detach (proc);
1210 throw;
1211 }
500c1d85
PA
1212
1213 /* GDB will shortly read the xml target description for this
1214 process, to figure out the process' architecture. But the target
1215 description is only filled in when the first process/thread in
1216 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1217 that now, otherwise, if GDB is fast enough, it could read the
1218 target description _before_ that initial stop. */
1219 if (non_stop)
1220 {
1221 struct lwp_info *lwp;
1222 int wstat, lwpid;
f2907e49 1223 ptid_t pid_ptid = ptid_t (pid);
500c1d85 1224
d16f3f6c 1225 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
500c1d85
PA
1226 gdb_assert (lwpid > 0);
1227
f2907e49 1228 lwp = find_lwp_pid (ptid_t (lwpid));
59487af3 1229 gdb_assert (lwp != nullptr);
500c1d85
PA
1230
1231 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1232 {
1233 lwp->status_pending_p = 1;
1234 lwp->status_pending = wstat;
1235 }
1236
1237 initial_thread->last_resume_kind = resume_continue;
1238
1239 async_file_mark ();
1240
1241 gdb_assert (proc->tdesc != NULL);
1242 }
1243
95954743
PA
1244 return 0;
1245}
1246
95954743 1247static int
e4eb0dec 1248last_thread_of_process_p (int pid)
95954743 1249{
e4eb0dec 1250 bool seen_one = false;
95954743 1251
da4ae14a 1252 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
95954743 1253 {
e4eb0dec
SM
1254 if (!seen_one)
1255 {
1256 /* This is the first thread of this process we see. */
1257 seen_one = true;
1258 return false;
1259 }
1260 else
1261 {
1262 /* This is the second thread of this process we see. */
1263 return true;
1264 }
1265 });
da6d8c04 1266
e4eb0dec 1267 return thread == NULL;
95954743
PA
1268}
1269
da84f473
PA
1270/* Kill LWP. */
1271
1272static void
1273linux_kill_one_lwp (struct lwp_info *lwp)
1274{
d86d4aaf
DE
1275 struct thread_info *thr = get_lwp_thread (lwp);
1276 int pid = lwpid_of (thr);
da84f473
PA
1277
1278 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1279 there is no signal context, and ptrace(PTRACE_KILL) (or
1280 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1281 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1282 alternative is to kill with SIGKILL. We only need one SIGKILL
1283 per process, not one for each thread. But since we still support
4a6ed09b
PA
1284 support debugging programs using raw clone without CLONE_THREAD,
1285 we send one for each thread. For years, we used PTRACE_KILL
1286 only, so we're being a bit paranoid about some old kernels where
1287 PTRACE_KILL might work better (dubious if there are any such, but
1288 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1289 second, and so we're fine everywhere. */
da84f473
PA
1290
1291 errno = 0;
69ff6be5 1292 kill_lwp (pid, SIGKILL);
da84f473 1293 if (debug_threads)
ce9e3fe7
PA
1294 {
1295 int save_errno = errno;
1296
c058728c
SM
1297 threads_debug_printf ("kill_lwp (SIGKILL) %s, 0, 0 (%s)",
1298 target_pid_to_str (ptid_of (thr)).c_str (),
1299 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1300 }
da84f473
PA
1301
1302 errno = 0;
b8e1b30e 1303 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 1304 if (debug_threads)
ce9e3fe7
PA
1305 {
1306 int save_errno = errno;
1307
c058728c
SM
1308 threads_debug_printf ("PTRACE_KILL %s, 0, 0 (%s)",
1309 target_pid_to_str (ptid_of (thr)).c_str (),
1310 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1311 }
da84f473
PA
1312}
1313
e76126e8
PA
1314/* Kill LWP and wait for it to die. */
1315
1316static void
1317kill_wait_lwp (struct lwp_info *lwp)
1318{
1319 struct thread_info *thr = get_lwp_thread (lwp);
e99b03dc 1320 int pid = ptid_of (thr).pid ();
e38504b3 1321 int lwpid = ptid_of (thr).lwp ();
e76126e8
PA
1322 int wstat;
1323 int res;
1324
c058728c 1325 threads_debug_printf ("killing lwp %d, for pid: %d", lwpid, pid);
e76126e8
PA
1326
1327 do
1328 {
1329 linux_kill_one_lwp (lwp);
1330
1331 /* Make sure it died. Notes:
1332
1333 - The loop is most likely unnecessary.
1334
d16f3f6c 1335 - We don't use wait_for_event as that could delete lwps
e76126e8
PA
1336 while we're iterating over them. We're not interested in
1337 any pending status at this point, only in making sure all
1338 wait status on the kernel side are collected until the
1339 process is reaped.
1340
1341 - We don't use __WALL here as the __WALL emulation relies on
1342 SIGCHLD, and killing a stopped process doesn't generate
1343 one, nor an exit status.
1344 */
1345 res = my_waitpid (lwpid, &wstat, 0);
1346 if (res == -1 && errno == ECHILD)
1347 res = my_waitpid (lwpid, &wstat, __WCLONE);
1348 } while (res > 0 && WIFSTOPPED (wstat));
1349
586b02a9
PA
1350 /* Even if it was stopped, the child may have already disappeared.
1351 E.g., if it was killed by SIGKILL. */
1352 if (res < 0 && errno != ECHILD)
1353 perror_with_name ("kill_wait_lwp");
e76126e8
PA
1354}
1355
578290ec 1356/* Callback for `for_each_thread'. Kills an lwp of a given process,
da84f473 1357 except the leader. */
95954743 1358
578290ec
SM
1359static void
1360kill_one_lwp_callback (thread_info *thread, int pid)
da6d8c04 1361{
54a0b537 1362 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 1363
fd500816
DJ
1364 /* We avoid killing the first thread here, because of a Linux kernel (at
1365 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1366 the children get a chance to be reaped, it will remain a zombie
1367 forever. */
95954743 1368
d86d4aaf 1369 if (lwpid_of (thread) == pid)
95954743 1370 {
c058728c
SM
1371 threads_debug_printf ("is last of process %s",
1372 target_pid_to_str (thread->id).c_str ());
578290ec 1373 return;
95954743 1374 }
fd500816 1375
e76126e8 1376 kill_wait_lwp (lwp);
da6d8c04
DJ
1377}
1378
c6885a57
TBA
1379int
1380linux_process_target::kill (process_info *process)
0d62e5e8 1381{
a780ef4f 1382 int pid = process->pid;
9d606399 1383
f9e39928
PA
1384 /* If we're killing a running inferior, make sure it is stopped
1385 first, as PTRACE_KILL will not work otherwise. */
7984d532 1386 stop_all_lwps (0, NULL);
f9e39928 1387
578290ec
SM
1388 for_each_thread (pid, [&] (thread_info *thread)
1389 {
1390 kill_one_lwp_callback (thread, pid);
1391 });
fd500816 1392
54a0b537 1393 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1394 thread in the list, so do so now. */
a780ef4f 1395 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
bd99dc85 1396
784867a5 1397 if (lwp == NULL)
c058728c 1398 threads_debug_printf ("cannot find lwp for pid: %d", pid);
784867a5 1399 else
e76126e8 1400 kill_wait_lwp (lwp);
2d717e4f 1401
8adb37b9 1402 mourn (process);
f9e39928
PA
1403
1404 /* Since we presently can only stop all lwps of all processes, we
1405 need to unstop lwps of other processes. */
7984d532 1406 unstop_all_lwps (0, NULL);
95954743 1407 return 0;
0d62e5e8
DJ
1408}
1409
9b224c5e
PA
1410/* Get pending signal of THREAD, for detaching purposes. This is the
1411 signal the thread last stopped for, which we need to deliver to the
1412 thread when detaching, otherwise, it'd be suppressed/lost. */
1413
1414static int
1415get_detach_signal (struct thread_info *thread)
1416{
c12a5089 1417 client_state &cs = get_client_state ();
a493e3e2 1418 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1419 int status;
1420 struct lwp_info *lp = get_thread_lwp (thread);
1421
1422 if (lp->status_pending_p)
1423 status = lp->status_pending;
1424 else
1425 {
1426 /* If the thread had been suspended by gdbserver, and it stopped
1427 cleanly, then it'll have stopped with SIGSTOP. But we don't
1428 want to deliver that SIGSTOP. */
183be222
SM
1429 if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1430 || thread->last_status.sig () == GDB_SIGNAL_0)
9b224c5e
PA
1431 return 0;
1432
1433 /* Otherwise, we may need to deliver the signal we
1434 intercepted. */
1435 status = lp->last_status;
1436 }
1437
1438 if (!WIFSTOPPED (status))
1439 {
c058728c
SM
1440 threads_debug_printf ("lwp %s hasn't stopped: no pending signal",
1441 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e
PA
1442 return 0;
1443 }
1444
1445 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1446 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e 1447 {
c058728c
SM
1448 threads_debug_printf ("lwp %s had stopped with extended "
1449 "status: no pending signal",
1450 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e
PA
1451 return 0;
1452 }
1453
2ea28649 1454 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e 1455
c12a5089 1456 if (cs.program_signals_p && !cs.program_signals[signo])
9b224c5e 1457 {
c058728c
SM
1458 threads_debug_printf ("lwp %s had signal %s, but it is in nopass state",
1459 target_pid_to_str (ptid_of (thread)).c_str (),
1460 gdb_signal_to_string (signo));
9b224c5e
PA
1461 return 0;
1462 }
c12a5089 1463 else if (!cs.program_signals_p
9b224c5e
PA
1464 /* If we have no way to know which signals GDB does not
1465 want to have passed to the program, assume
1466 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1467 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e 1468 {
c058728c
SM
1469 threads_debug_printf ("lwp %s had signal %s, "
1470 "but we don't know if we should pass it. "
1471 "Default to not.",
1472 target_pid_to_str (ptid_of (thread)).c_str (),
1473 gdb_signal_to_string (signo));
9b224c5e
PA
1474 return 0;
1475 }
1476 else
1477 {
c058728c
SM
1478 threads_debug_printf ("lwp %s has pending signal %s: delivering it",
1479 target_pid_to_str (ptid_of (thread)).c_str (),
1480 gdb_signal_to_string (signo));
9b224c5e
PA
1481
1482 return WSTOPSIG (status);
1483 }
1484}
1485
fd000fb3
TBA
1486void
1487linux_process_target::detach_one_lwp (lwp_info *lwp)
6ad8ae5c 1488{
ced2dffb 1489 struct thread_info *thread = get_lwp_thread (lwp);
9b224c5e 1490 int sig;
ced2dffb 1491 int lwpid;
6ad8ae5c 1492
9b224c5e 1493 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1494 if (lwp->stop_expected)
ae13219e 1495 {
c058728c
SM
1496 threads_debug_printf ("Sending SIGCONT to %s",
1497 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e 1498
d86d4aaf 1499 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1500 lwp->stop_expected = 0;
ae13219e
DJ
1501 }
1502
9b224c5e
PA
1503 /* Pass on any pending signal for this thread. */
1504 sig = get_detach_signal (thread);
1505
ced2dffb
PA
1506 /* Preparing to resume may try to write registers, and fail if the
1507 lwp is zombie. If that happens, ignore the error. We'll handle
1508 it below, when detach fails with ESRCH. */
a70b8144 1509 try
ced2dffb
PA
1510 {
1511 /* Flush any pending changes to the process's registers. */
1512 regcache_invalidate_thread (thread);
1513
1514 /* Finally, let it resume. */
d7599cc0 1515 low_prepare_to_resume (lwp);
ced2dffb 1516 }
230d2906 1517 catch (const gdb_exception_error &ex)
ced2dffb
PA
1518 {
1519 if (!check_ptrace_stopped_lwp_gone (lwp))
eedc3f4f 1520 throw;
ced2dffb 1521 }
ced2dffb
PA
1522
1523 lwpid = lwpid_of (thread);
1524 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1525 (PTRACE_TYPE_ARG4) (long) sig) < 0)
ced2dffb
PA
1526 {
1527 int save_errno = errno;
1528
1529 /* We know the thread exists, so ESRCH must mean the lwp is
1530 zombie. This can happen if one of the already-detached
1531 threads exits the whole thread group. In that case we're
1532 still attached, and must reap the lwp. */
1533 if (save_errno == ESRCH)
1534 {
1535 int ret, status;
1536
1537 ret = my_waitpid (lwpid, &status, __WALL);
1538 if (ret == -1)
1539 {
1540 warning (_("Couldn't reap LWP %d while detaching: %s"),
6d91ce9a 1541 lwpid, safe_strerror (errno));
ced2dffb
PA
1542 }
1543 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1544 {
1545 warning (_("Reaping LWP %d while detaching "
1546 "returned unexpected status 0x%x"),
1547 lwpid, status);
1548 }
1549 }
1550 else
1551 {
1552 error (_("Can't detach %s: %s"),
61d7f128 1553 target_pid_to_str (ptid_of (thread)).c_str (),
6d91ce9a 1554 safe_strerror (save_errno));
ced2dffb
PA
1555 }
1556 }
c058728c
SM
1557 else
1558 threads_debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)",
1559 target_pid_to_str (ptid_of (thread)).c_str (),
1560 strsignal (sig));
bd99dc85
PA
1561
1562 delete_lwp (lwp);
ced2dffb
PA
1563}
1564
9061c9cf
TBA
1565int
1566linux_process_target::detach (process_info *process)
95954743 1567{
ced2dffb 1568 struct lwp_info *main_lwp;
95954743 1569
863d01bd
PA
1570 /* As there's a step over already in progress, let it finish first,
1571 otherwise nesting a stabilize_threads operation on top gets real
1572 messy. */
1573 complete_ongoing_step_over ();
1574
f9e39928 1575 /* Stop all threads before detaching. First, ptrace requires that
30baf67b 1576 the thread is stopped to successfully detach. Second, thread_db
f9e39928
PA
1577 may need to uninstall thread event breakpoints from memory, which
1578 only works with a stopped process anyway. */
7984d532 1579 stop_all_lwps (0, NULL);
f9e39928 1580
ca5c370d 1581#ifdef USE_THREAD_DB
8336d594 1582 thread_db_detach (process);
ca5c370d
PA
1583#endif
1584
fa593d66 1585 /* Stabilize threads (move out of jump pads). */
5c9eb2f2 1586 target_stabilize_threads ();
fa593d66 1587
ced2dffb
PA
1588 /* Detach from the clone lwps first. If the thread group exits just
1589 while we're detaching, we must reap the clone lwps before we're
1590 able to reap the leader. */
fd000fb3
TBA
1591 for_each_thread (process->pid, [this] (thread_info *thread)
1592 {
1593 /* We don't actually detach from the thread group leader just yet.
1594 If the thread group exits, we must reap the zombie clone lwps
1595 before we're able to reap the leader. */
1596 if (thread->id.pid () == thread->id.lwp ())
1597 return;
1598
1599 lwp_info *lwp = get_thread_lwp (thread);
1600 detach_one_lwp (lwp);
1601 });
ced2dffb 1602
ef2ddb33 1603 main_lwp = find_lwp_pid (ptid_t (process->pid));
59487af3 1604 gdb_assert (main_lwp != nullptr);
fd000fb3 1605 detach_one_lwp (main_lwp);
8336d594 1606
8adb37b9 1607 mourn (process);
f9e39928
PA
1608
1609 /* Since we presently can only stop all lwps of all processes, we
1610 need to unstop lwps of other processes. */
7984d532 1611 unstop_all_lwps (0, NULL);
f9e39928
PA
1612 return 0;
1613}
1614
1615/* Remove all LWPs that belong to process PROC from the lwp list. */
1616
8adb37b9
TBA
1617void
1618linux_process_target::mourn (process_info *process)
8336d594 1619{
8336d594
PA
1620#ifdef USE_THREAD_DB
1621 thread_db_mourn (process);
1622#endif
1623
fd000fb3 1624 for_each_thread (process->pid, [this] (thread_info *thread)
6b2a85da
SM
1625 {
1626 delete_lwp (get_thread_lwp (thread));
1627 });
f9e39928 1628
f551c8ef 1629 this->remove_linux_process (process);
8336d594
PA
1630}
1631
95a49a39
TBA
1632void
1633linux_process_target::join (int pid)
444d6139 1634{
444d6139
PA
1635 int status, ret;
1636
1637 do {
d105de22 1638 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1639 if (WIFEXITED (status) || WIFSIGNALED (status))
1640 break;
1641 } while (ret != -1 || errno != ECHILD);
1642}
1643
13d3d99b
TBA
1644/* Return true if the given thread is still alive. */
1645
1646bool
1647linux_process_target::thread_alive (ptid_t ptid)
0d62e5e8 1648{
95954743
PA
1649 struct lwp_info *lwp = find_lwp_pid (ptid);
1650
1651 /* We assume we always know if a thread exits. If a whole process
1652 exited but we still haven't been able to report it to GDB, we'll
1653 hold on to the last lwp of the dead process. */
1654 if (lwp != NULL)
00db26fa 1655 return !lwp_is_marked_dead (lwp);
0d62e5e8
DJ
1656 else
1657 return 0;
1658}
1659
df95181f
TBA
1660bool
1661linux_process_target::thread_still_has_status_pending (thread_info *thread)
582511be
PA
1662{
1663 struct lwp_info *lp = get_thread_lwp (thread);
1664
1665 if (!lp->status_pending_p)
1666 return 0;
1667
582511be 1668 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1669 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1670 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be 1671 {
582511be
PA
1672 CORE_ADDR pc;
1673 int discard = 0;
1674
1675 gdb_assert (lp->last_status != 0);
1676
1677 pc = get_pc (lp);
1678
24583e45
TBA
1679 scoped_restore_current_thread restore_thread;
1680 switch_to_thread (thread);
582511be
PA
1681
1682 if (pc != lp->stop_pc)
1683 {
c058728c
SM
1684 threads_debug_printf ("PC of %ld changed",
1685 lwpid_of (thread));
582511be
PA
1686 discard = 1;
1687 }
3e572f71 1688
582511be
PA
1689 if (discard)
1690 {
c058728c 1691 threads_debug_printf ("discarding pending breakpoint status");
582511be
PA
1692 lp->status_pending_p = 0;
1693 return 0;
1694 }
1695 }
1696
1697 return 1;
1698}
1699
a681f9c9
PA
1700/* Returns true if LWP is resumed from the client's perspective. */
1701
1702static int
1703lwp_resumed (struct lwp_info *lwp)
1704{
1705 struct thread_info *thread = get_lwp_thread (lwp);
1706
1707 if (thread->last_resume_kind != resume_stop)
1708 return 1;
1709
1710 /* Did gdb send us a `vCont;t', but we haven't reported the
1711 corresponding stop to gdb yet? If so, the thread is still
1712 resumed/running from gdb's perspective. */
1713 if (thread->last_resume_kind == resume_stop
183be222 1714 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
a681f9c9
PA
1715 return 1;
1716
1717 return 0;
1718}
1719
df95181f
TBA
1720bool
1721linux_process_target::status_pending_p_callback (thread_info *thread,
1722 ptid_t ptid)
0d62e5e8 1723{
582511be 1724 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1725
1726 /* Check if we're only interested in events from a specific process
afa8d396 1727 or a specific LWP. */
83e1b6c1 1728 if (!thread->id.matches (ptid))
95954743 1729 return 0;
0d62e5e8 1730
a681f9c9
PA
1731 if (!lwp_resumed (lp))
1732 return 0;
1733
582511be 1734 if (lp->status_pending_p
df95181f 1735 && !thread_still_has_status_pending (thread))
582511be 1736 {
df95181f 1737 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
582511be
PA
1738 return 0;
1739 }
0d62e5e8 1740
582511be 1741 return lp->status_pending_p;
0d62e5e8
DJ
1742}
1743
95954743
PA
1744struct lwp_info *
1745find_lwp_pid (ptid_t ptid)
1746{
d4895ba2
SM
1747 long lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1748 thread_info *thread = find_thread ([lwp] (thread_info *thr_arg)
454296a2 1749 {
da4ae14a 1750 return thr_arg->id.lwp () == lwp;
454296a2 1751 });
d86d4aaf
DE
1752
1753 if (thread == NULL)
1754 return NULL;
1755
9c80ecd6 1756 return get_thread_lwp (thread);
95954743
PA
1757}
1758
fa96cb38 1759/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1760
fa96cb38
PA
1761static int
1762num_lwps (int pid)
1763{
fa96cb38 1764 int count = 0;
0d62e5e8 1765
4d3bb80e
SM
1766 for_each_thread (pid, [&] (thread_info *thread)
1767 {
9c80ecd6 1768 count++;
4d3bb80e 1769 });
3aee8918 1770
fa96cb38
PA
1771 return count;
1772}
d61ddec4 1773
6d4ee8c6
GB
1774/* See nat/linux-nat.h. */
1775
1776struct lwp_info *
1777iterate_over_lwps (ptid_t filter,
d3a70e03 1778 gdb::function_view<iterate_over_lwps_ftype> callback)
6d4ee8c6 1779{
da4ae14a 1780 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
6d1e5673 1781 {
da4ae14a 1782 lwp_info *lwp = get_thread_lwp (thr_arg);
6d1e5673 1783
d3a70e03 1784 return callback (lwp);
6d1e5673 1785 });
6d4ee8c6 1786
9c80ecd6 1787 if (thread == NULL)
6d4ee8c6
GB
1788 return NULL;
1789
9c80ecd6 1790 return get_thread_lwp (thread);
6d4ee8c6
GB
1791}
1792
e8a625d1 1793bool
fd000fb3 1794linux_process_target::check_zombie_leaders ()
fa96cb38 1795{
e8a625d1
PA
1796 bool new_pending_event = false;
1797
1798 for_each_process ([&] (process_info *proc)
aa40a989
PA
1799 {
1800 pid_t leader_pid = pid_of (proc);
1801 lwp_info *leader_lp = find_lwp_pid (ptid_t (leader_pid));
1802
1803 threads_debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1804 "num_lwps=%d, zombie=%d",
1805 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1806 linux_proc_pid_is_zombie (leader_pid));
1807
1808 if (leader_lp != NULL && !leader_lp->stopped
1809 /* Check if there are other threads in the group, as we may
8a841a35
PA
1810 have raced with the inferior simply exiting. Note this
1811 isn't a watertight check. If the inferior is
1812 multi-threaded and is exiting, it may be we see the
1813 leader as zombie before we reap all the non-leader
1814 threads. See comments below. */
aa40a989
PA
1815 && !last_thread_of_process_p (leader_pid)
1816 && linux_proc_pid_is_zombie (leader_pid))
1817 {
8a841a35
PA
1818 /* A zombie leader in a multi-threaded program can mean one
1819 of three things:
1820
1821 #1 - Only the leader exited, not the whole program, e.g.,
1822 with pthread_exit. Since we can't reap the leader's exit
1823 status until all other threads are gone and reaped too,
1824 we want to delete the zombie leader right away, as it
1825 can't be debugged, we can't read its registers, etc.
1826 This is the main reason we check for zombie leaders
1827 disappearing.
1828
1829 #2 - The whole thread-group/process exited (a group exit,
1830 via e.g. exit(3), and there is (or will be shortly) an
1831 exit reported for each thread in the process, and then
1832 finally an exit for the leader once the non-leaders are
1833 reaped.
1834
1835 #3 - There are 3 or more threads in the group, and a
1836 thread other than the leader exec'd. See comments on
1837 exec events at the top of the file.
1838
1839 Ideally we would never delete the leader for case #2.
1840 Instead, we want to collect the exit status of each
1841 non-leader thread, and then finally collect the exit
1842 status of the leader as normal and use its exit code as
1843 whole-process exit code. Unfortunately, there's no
1844 race-free way to distinguish cases #1 and #2. We can't
1845 assume the exit events for the non-leaders threads are
1846 already pending in the kernel, nor can we assume the
1847 non-leader threads are in zombie state already. Between
1848 the leader becoming zombie and the non-leaders exiting
1849 and becoming zombie themselves, there's a small time
1850 window, so such a check would be racy. Temporarily
1851 pausing all threads and checking to see if all threads
1852 exit or not before re-resuming them would work in the
1853 case that all threads are running right now, but it
1854 wouldn't work if some thread is currently already
1855 ptrace-stopped, e.g., due to scheduler-locking.
1856
1857 So what we do is we delete the leader anyhow, and then
1858 later on when we see its exit status, we re-add it back.
1859 We also make sure that we only report a whole-process
1860 exit when we see the leader exiting, as opposed to when
1861 the last LWP in the LWP list exits, which can be a
1862 non-leader if we deleted the leader here. */
aa40a989 1863 threads_debug_printf ("Thread group leader %d zombie "
8a841a35
PA
1864 "(it exited, or another thread execd), "
1865 "deleting it.",
aa40a989 1866 leader_pid);
e8a625d1
PA
1867
1868 thread_info *leader_thread = get_lwp_thread (leader_lp);
1869 if (report_exit_events_for (leader_thread))
1870 {
1871 mark_lwp_dead (leader_lp, W_EXITCODE (0, 0), true);
1872 new_pending_event = true;
1873 }
1874 else
1875 delete_lwp (leader_lp);
aa40a989 1876 }
9179355e 1877 });
e8a625d1
PA
1878
1879 return new_pending_event;
fa96cb38 1880}
c3adc08c 1881
a1385b7b
SM
1882/* Callback for `find_thread'. Returns the first LWP that is not
1883 stopped. */
d50171e4 1884
a1385b7b
SM
1885static bool
1886not_stopped_callback (thread_info *thread, ptid_t filter)
fa96cb38 1887{
a1385b7b
SM
1888 if (!thread->id.matches (filter))
1889 return false;
47c0c975 1890
a1385b7b 1891 lwp_info *lwp = get_thread_lwp (thread);
fa96cb38 1892
a1385b7b 1893 return !lwp->stopped;
0d62e5e8 1894}
611cb4a5 1895
863d01bd
PA
1896/* Increment LWP's suspend count. */
1897
1898static void
1899lwp_suspended_inc (struct lwp_info *lwp)
1900{
1901 lwp->suspended++;
1902
c058728c
SM
1903 if (lwp->suspended > 4)
1904 threads_debug_printf
1905 ("LWP %ld has a suspiciously high suspend count, suspended=%d",
1906 lwpid_of (get_lwp_thread (lwp)), lwp->suspended);
863d01bd
PA
1907}
1908
1909/* Decrement LWP's suspend count. */
1910
1911static void
1912lwp_suspended_decr (struct lwp_info *lwp)
1913{
1914 lwp->suspended--;
1915
1916 if (lwp->suspended < 0)
1917 {
1918 struct thread_info *thread = get_lwp_thread (lwp);
1919
f34652de 1920 internal_error ("unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
863d01bd
PA
1921 lwp->suspended);
1922 }
1923}
1924
219f2f23
PA
1925/* This function should only be called if the LWP got a SIGTRAP.
1926
1927 Handle any tracepoint steps or hits. Return true if a tracepoint
1928 event was handled, 0 otherwise. */
1929
1930static int
1931handle_tracepoints (struct lwp_info *lwp)
1932{
1933 struct thread_info *tinfo = get_lwp_thread (lwp);
1934 int tpoint_related_event = 0;
1935
582511be
PA
1936 gdb_assert (lwp->suspended == 0);
1937
7984d532
PA
1938 /* If this tracepoint hit causes a tracing stop, we'll immediately
1939 uninsert tracepoints. To do this, we temporarily pause all
1940 threads, unpatch away, and then unpause threads. We need to make
1941 sure the unpausing doesn't resume LWP too. */
863d01bd 1942 lwp_suspended_inc (lwp);
7984d532 1943
219f2f23
PA
1944 /* And we need to be sure that any all-threads-stopping doesn't try
1945 to move threads out of the jump pads, as it could deadlock the
1946 inferior (LWP could be in the jump pad, maybe even holding the
1947 lock.) */
1948
1949 /* Do any necessary step collect actions. */
1950 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1951
fa593d66
PA
1952 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1953
219f2f23
PA
1954 /* See if we just hit a tracepoint and do its main collect
1955 actions. */
1956 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1957
863d01bd 1958 lwp_suspended_decr (lwp);
7984d532
PA
1959
1960 gdb_assert (lwp->suspended == 0);
229d26fc
SM
1961 gdb_assert (!stabilizing_threads
1962 || (lwp->collecting_fast_tracepoint
1963 != fast_tpoint_collect_result::not_collecting));
7984d532 1964
219f2f23
PA
1965 if (tpoint_related_event)
1966 {
c058728c 1967 threads_debug_printf ("got a tracepoint event");
219f2f23
PA
1968 return 1;
1969 }
1970
1971 return 0;
1972}
1973
13e567af
TBA
1974fast_tpoint_collect_result
1975linux_process_target::linux_fast_tracepoint_collecting
1976 (lwp_info *lwp, fast_tpoint_collect_status *status)
fa593d66
PA
1977{
1978 CORE_ADDR thread_area;
d86d4aaf 1979 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66 1980
fa593d66
PA
1981 /* Get the thread area address. This is used to recognize which
1982 thread is which when tracing with the in-process agent library.
1983 We don't read anything from the address, and treat it as opaque;
1984 it's the address itself that we assume is unique per-thread. */
13e567af 1985 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
229d26fc 1986 return fast_tpoint_collect_result::not_collecting;
fa593d66
PA
1987
1988 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1989}
1990
13e567af
TBA
1991int
1992linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1993{
1994 return -1;
1995}
1996
d16f3f6c
TBA
1997bool
1998linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
fa593d66 1999{
24583e45
TBA
2000 scoped_restore_current_thread restore_thread;
2001 switch_to_thread (get_lwp_thread (lwp));
fa593d66
PA
2002
2003 if ((wstat == NULL
2004 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2005 && supports_fast_tracepoints ()
58b4daa5 2006 && agent_loaded_p ())
fa593d66
PA
2007 {
2008 struct fast_tpoint_collect_status status;
fa593d66 2009
c058728c
SM
2010 threads_debug_printf
2011 ("Checking whether LWP %ld needs to move out of the jump pad.",
2012 lwpid_of (current_thread));
fa593d66 2013
229d26fc
SM
2014 fast_tpoint_collect_result r
2015 = linux_fast_tracepoint_collecting (lwp, &status);
fa593d66
PA
2016
2017 if (wstat == NULL
2018 || (WSTOPSIG (*wstat) != SIGILL
2019 && WSTOPSIG (*wstat) != SIGFPE
2020 && WSTOPSIG (*wstat) != SIGSEGV
2021 && WSTOPSIG (*wstat) != SIGBUS))
2022 {
2023 lwp->collecting_fast_tracepoint = r;
2024
229d26fc 2025 if (r != fast_tpoint_collect_result::not_collecting)
fa593d66 2026 {
229d26fc
SM
2027 if (r == fast_tpoint_collect_result::before_insn
2028 && lwp->exit_jump_pad_bkpt == NULL)
fa593d66
PA
2029 {
2030 /* Haven't executed the original instruction yet.
2031 Set breakpoint there, and wait till it's hit,
2032 then single-step until exiting the jump pad. */
2033 lwp->exit_jump_pad_bkpt
2034 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2035 }
2036
c058728c
SM
2037 threads_debug_printf
2038 ("Checking whether LWP %ld needs to move out of the jump pad..."
2039 " it does", lwpid_of (current_thread));
fa593d66 2040
d16f3f6c 2041 return true;
fa593d66
PA
2042 }
2043 }
2044 else
2045 {
2046 /* If we get a synchronous signal while collecting, *and*
2047 while executing the (relocated) original instruction,
2048 reset the PC to point at the tpoint address, before
2049 reporting to GDB. Otherwise, it's an IPA lib bug: just
2050 report the signal to GDB, and pray for the best. */
2051
229d26fc
SM
2052 lwp->collecting_fast_tracepoint
2053 = fast_tpoint_collect_result::not_collecting;
fa593d66 2054
229d26fc 2055 if (r != fast_tpoint_collect_result::not_collecting
fa593d66
PA
2056 && (status.adjusted_insn_addr <= lwp->stop_pc
2057 && lwp->stop_pc < status.adjusted_insn_addr_end))
2058 {
2059 siginfo_t info;
2060 struct regcache *regcache;
2061
2062 /* The si_addr on a few signals references the address
2063 of the faulting instruction. Adjust that as
2064 well. */
2065 if ((WSTOPSIG (*wstat) == SIGILL
2066 || WSTOPSIG (*wstat) == SIGFPE
2067 || WSTOPSIG (*wstat) == SIGBUS
2068 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 2069 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2070 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
2071 /* Final check just to make sure we don't clobber
2072 the siginfo of non-kernel-sent signals. */
2073 && (uintptr_t) info.si_addr == lwp->stop_pc)
2074 {
2075 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 2076 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2077 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
2078 }
2079
0bfdf32f 2080 regcache = get_thread_regcache (current_thread, 1);
bf9ae9d8 2081 low_set_pc (regcache, status.tpoint_addr);
fa593d66
PA
2082 lwp->stop_pc = status.tpoint_addr;
2083
2084 /* Cancel any fast tracepoint lock this thread was
2085 holding. */
2086 force_unlock_trace_buffer ();
2087 }
2088
2089 if (lwp->exit_jump_pad_bkpt != NULL)
2090 {
c058728c
SM
2091 threads_debug_printf
2092 ("Cancelling fast exit-jump-pad: removing bkpt."
2093 "stopping all threads momentarily.");
fa593d66
PA
2094
2095 stop_all_lwps (1, lwp);
fa593d66
PA
2096
2097 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2098 lwp->exit_jump_pad_bkpt = NULL;
2099
2100 unstop_all_lwps (1, lwp);
2101
2102 gdb_assert (lwp->suspended >= 0);
2103 }
2104 }
2105 }
2106
c058728c
SM
2107 threads_debug_printf
2108 ("Checking whether LWP %ld needs to move out of the jump pad... no",
2109 lwpid_of (current_thread));
0cccb683 2110
d16f3f6c 2111 return false;
fa593d66
PA
2112}
2113
2114/* Enqueue one signal in the "signals to report later when out of the
2115 jump pad" list. */
2116
2117static void
2118enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2119{
d86d4aaf 2120 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66 2121
c058728c
SM
2122 threads_debug_printf ("Deferring signal %d for LWP %ld.",
2123 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2124
2125 if (debug_threads)
2126 {
013e3554 2127 for (const auto &sig : lwp->pending_signals_to_report)
c058728c 2128 threads_debug_printf (" Already queued %d", sig.signal);
fa593d66 2129
c058728c 2130 threads_debug_printf (" (no more currently queued signals)");
fa593d66
PA
2131 }
2132
1a981360
PA
2133 /* Don't enqueue non-RT signals if they are already in the deferred
2134 queue. (SIGSTOP being the easiest signal to see ending up here
2135 twice) */
2136 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2137 {
013e3554 2138 for (const auto &sig : lwp->pending_signals_to_report)
1a981360 2139 {
013e3554 2140 if (sig.signal == WSTOPSIG (*wstat))
1a981360 2141 {
c058728c
SM
2142 threads_debug_printf
2143 ("Not requeuing already queued non-RT signal %d for LWP %ld",
2144 sig.signal, lwpid_of (thread));
1a981360
PA
2145 return;
2146 }
2147 }
2148 }
2149
013e3554 2150 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
8d749320 2151
d86d4aaf 2152 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554 2153 &lwp->pending_signals_to_report.back ().info);
fa593d66
PA
2154}
2155
2156/* Dequeue one signal from the "signals to report later when out of
2157 the jump pad" list. */
2158
2159static int
2160dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2161{
d86d4aaf
DE
2162 struct thread_info *thread = get_lwp_thread (lwp);
2163
013e3554 2164 if (!lwp->pending_signals_to_report.empty ())
fa593d66 2165 {
013e3554 2166 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
fa593d66 2167
013e3554
TBA
2168 *wstat = W_STOPCODE (p_sig.signal);
2169 if (p_sig.info.si_signo != 0)
d86d4aaf 2170 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554
TBA
2171 &p_sig.info);
2172
2173 lwp->pending_signals_to_report.pop_front ();
fa593d66 2174
c058728c
SM
2175 threads_debug_printf ("Reporting deferred signal %d for LWP %ld.",
2176 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2177
2178 if (debug_threads)
2179 {
013e3554 2180 for (const auto &sig : lwp->pending_signals_to_report)
c058728c 2181 threads_debug_printf (" Still queued %d", sig.signal);
fa593d66 2182
c058728c 2183 threads_debug_printf (" (no more queued signals)");
fa593d66
PA
2184 }
2185
2186 return 1;
2187 }
2188
2189 return 0;
2190}
2191
ac1bbaca
TBA
2192bool
2193linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
582511be 2194{
24583e45
TBA
2195 scoped_restore_current_thread restore_thread;
2196 switch_to_thread (get_lwp_thread (child));
d50171e4 2197
ac1bbaca
TBA
2198 if (low_stopped_by_watchpoint ())
2199 {
2200 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2201 child->stopped_data_address = low_stopped_data_address ();
2202 }
582511be 2203
ac1bbaca
TBA
2204 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2205}
d50171e4 2206
ac1bbaca
TBA
2207bool
2208linux_process_target::low_stopped_by_watchpoint ()
2209{
2210 return false;
2211}
d50171e4 2212
ac1bbaca
TBA
2213CORE_ADDR
2214linux_process_target::low_stopped_data_address ()
2215{
2216 return 0;
c4d9ceb6
YQ
2217}
2218
de0d863e
DB
2219/* Return the ptrace options that we want to try to enable. */
2220
2221static int
2222linux_low_ptrace_options (int attached)
2223{
c12a5089 2224 client_state &cs = get_client_state ();
de0d863e
DB
2225 int options = 0;
2226
2227 if (!attached)
2228 options |= PTRACE_O_EXITKILL;
2229
c12a5089 2230 if (cs.report_fork_events)
de0d863e
DB
2231 options |= PTRACE_O_TRACEFORK;
2232
c12a5089 2233 if (cs.report_vfork_events)
c269dbdb
DB
2234 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2235
c12a5089 2236 if (cs.report_exec_events)
94585166
DB
2237 options |= PTRACE_O_TRACEEXEC;
2238
82075af2
JS
2239 options |= PTRACE_O_TRACESYSGOOD;
2240
de0d863e
DB
2241 return options;
2242}
2243
1a48f002 2244void
d16f3f6c 2245linux_process_target::filter_event (int lwpid, int wstat)
fa96cb38
PA
2246{
2247 struct lwp_info *child;
2248 struct thread_info *thread;
582511be 2249 int have_stop_pc = 0;
fa96cb38 2250
f2907e49 2251 child = find_lwp_pid (ptid_t (lwpid));
fa96cb38 2252
5406bc3f
PA
2253 /* Check for events reported by anything not in our LWP list. */
2254 if (child == nullptr)
94585166 2255 {
5406bc3f
PA
2256 if (WIFSTOPPED (wstat))
2257 {
2258 if (WSTOPSIG (wstat) == SIGTRAP
2259 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2260 {
2261 /* A non-leader thread exec'ed after we've seen the
2262 leader zombie, and removed it from our lists (in
2263 check_zombie_leaders). The non-leader thread changes
2264 its tid to the tgid. */
2265 threads_debug_printf
2266 ("Re-adding thread group leader LWP %d after exec.",
2267 lwpid);
94585166 2268
5406bc3f
PA
2269 child = add_lwp (ptid_t (lwpid, lwpid));
2270 child->stopped = 1;
2271 switch_to_thread (child->thread);
2272 }
2273 else
2274 {
2275 /* A process we are controlling has forked and the new
2276 child's stop was reported to us by the kernel. Save
2277 its PID and go back to waiting for the fork event to
2278 be reported - the stopped process might be returned
2279 from waitpid before or after the fork event is. */
2280 threads_debug_printf
2281 ("Saving LWP %d status %s in stopped_pids list",
2282 lwpid, status_to_str (wstat).c_str ());
2283 add_to_pid_list (&stopped_pids, lwpid, wstat);
2284 }
2285 }
2286 else
2287 {
2288 /* Don't report an event for the exit of an LWP not in our
2289 list, i.e. not part of any inferior we're debugging.
2290 This can happen if we detach from a program we originally
8a841a35
PA
2291 forked and then it exits. However, note that we may have
2292 earlier deleted a leader of an inferior we're debugging,
2293 in check_zombie_leaders. Re-add it back here if so. */
2294 find_process ([&] (process_info *proc)
2295 {
2296 if (proc->pid == lwpid)
2297 {
2298 threads_debug_printf
2299 ("Re-adding thread group leader LWP %d after exit.",
2300 lwpid);
2301
2302 child = add_lwp (ptid_t (lwpid, lwpid));
2303 return true;
2304 }
2305 return false;
2306 });
5406bc3f 2307 }
94585166 2308
5406bc3f
PA
2309 if (child == nullptr)
2310 return;
fa96cb38 2311 }
fa96cb38
PA
2312
2313 thread = get_lwp_thread (child);
2314
2315 child->stopped = 1;
2316
2317 child->last_status = wstat;
2318
582511be
PA
2319 /* Check if the thread has exited. */
2320 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2321 {
c058728c 2322 threads_debug_printf ("%d exited", lwpid);
f50bf8e5
YQ
2323
2324 if (finish_step_over (child))
2325 {
2326 /* Unsuspend all other LWPs, and set them back running again. */
2327 unsuspend_all_lwps (child);
2328 }
2329
8a841a35
PA
2330 /* If this is not the leader LWP, then the exit signal was not
2331 the end of the debugged application and should be ignored,
2332 unless GDB wants to hear about thread exits. */
48989498 2333 if (report_exit_events_for (thread) || is_leader (thread))
582511be 2334 {
65706a29
PA
2335 /* Since events are serialized to GDB core, and we can't
2336 report this one right now. Leave the status pending for
2337 the next time we're able to report it. */
e8a625d1 2338 mark_lwp_dead (child, wstat, false);
1a48f002 2339 return;
582511be
PA
2340 }
2341 else
2342 {
65706a29 2343 delete_lwp (child);
1a48f002 2344 return;
582511be
PA
2345 }
2346 }
2347
2348 gdb_assert (WIFSTOPPED (wstat));
2349
fa96cb38
PA
2350 if (WIFSTOPPED (wstat))
2351 {
2352 struct process_info *proc;
2353
c06cbd92 2354 /* Architecture-specific setup after inferior is running. */
fa96cb38 2355 proc = find_process_pid (pid_of (thread));
c06cbd92 2356 if (proc->tdesc == NULL)
fa96cb38 2357 {
c06cbd92
YQ
2358 if (proc->attached)
2359 {
c06cbd92
YQ
2360 /* This needs to happen after we have attached to the
2361 inferior and it is stopped for the first time, but
2362 before we access any inferior registers. */
797bcff5 2363 arch_setup_thread (thread);
c06cbd92
YQ
2364 }
2365 else
2366 {
2367 /* The process is started, but GDBserver will do
2368 architecture-specific setup after the program stops at
2369 the first instruction. */
2370 child->status_pending_p = 1;
2371 child->status_pending = wstat;
1a48f002 2372 return;
c06cbd92 2373 }
fa96cb38
PA
2374 }
2375 }
2376
fa96cb38
PA
2377 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2378 {
beed38b8 2379 struct process_info *proc = find_process_pid (pid_of (thread));
de0d863e 2380 int options = linux_low_ptrace_options (proc->attached);
beed38b8 2381
de0d863e 2382 linux_enable_event_reporting (lwpid, options);
fa96cb38
PA
2383 child->must_set_ptrace_flags = 0;
2384 }
2385
82075af2
JS
2386 /* Always update syscall_state, even if it will be filtered later. */
2387 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2388 {
2389 child->syscall_state
2390 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2391 ? TARGET_WAITKIND_SYSCALL_RETURN
2392 : TARGET_WAITKIND_SYSCALL_ENTRY);
2393 }
2394 else
2395 {
2396 /* Almost all other ptrace-stops are known to be outside of system
2397 calls, with further exceptions in handle_extended_wait. */
2398 child->syscall_state = TARGET_WAITKIND_IGNORE;
2399 }
2400
e7ad2f14
PA
2401 /* Be careful to not overwrite stop_pc until save_stop_reason is
2402 called. */
fa96cb38 2403 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2404 && linux_is_extended_waitstatus (wstat))
fa96cb38 2405 {
582511be 2406 child->stop_pc = get_pc (child);
94585166 2407 if (handle_extended_wait (&child, wstat))
de0d863e
DB
2408 {
2409 /* The event has been handled, so just return without
2410 reporting it. */
1a48f002 2411 return;
de0d863e 2412 }
fa96cb38
PA
2413 }
2414
80aea927 2415 if (linux_wstatus_maybe_breakpoint (wstat))
582511be 2416 {
e7ad2f14 2417 if (save_stop_reason (child))
582511be
PA
2418 have_stop_pc = 1;
2419 }
2420
2421 if (!have_stop_pc)
2422 child->stop_pc = get_pc (child);
2423
fa96cb38
PA
2424 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2425 && child->stop_expected)
2426 {
c058728c
SM
2427 threads_debug_printf ("Expected stop.");
2428
fa96cb38
PA
2429 child->stop_expected = 0;
2430
2431 if (thread->last_resume_kind == resume_stop)
2432 {
2433 /* We want to report the stop to the core. Treat the
2434 SIGSTOP as a normal event. */
c058728c
SM
2435 threads_debug_printf ("resume_stop SIGSTOP caught for %s.",
2436 target_pid_to_str (ptid_of (thread)).c_str ());
fa96cb38
PA
2437 }
2438 else if (stopping_threads != NOT_STOPPING_THREADS)
2439 {
2440 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2441 pending. */
c058728c
SM
2442 threads_debug_printf ("SIGSTOP caught for %s while stopping threads.",
2443 target_pid_to_str (ptid_of (thread)).c_str ());
1a48f002 2444 return;
fa96cb38
PA
2445 }
2446 else
2447 {
2bf6fb9d 2448 /* This is a delayed SIGSTOP. Filter out the event. */
c058728c 2449 threads_debug_printf ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2bf6fb9d 2450 child->stepping ? "step" : "continue",
61d7f128 2451 target_pid_to_str (ptid_of (thread)).c_str ());
2bf6fb9d 2452
df95181f 2453 resume_one_lwp (child, child->stepping, 0, NULL);
1a48f002 2454 return;
fa96cb38
PA
2455 }
2456 }
2457
582511be
PA
2458 child->status_pending_p = 1;
2459 child->status_pending = wstat;
1a48f002 2460 return;
fa96cb38
PA
2461}
2462
b31cdfa6
TBA
2463bool
2464linux_process_target::maybe_hw_step (thread_info *thread)
f79b145d 2465{
b31cdfa6
TBA
2466 if (supports_hardware_single_step ())
2467 return true;
f79b145d
YQ
2468 else
2469 {
3b9a79ef 2470 /* GDBserver must insert single-step breakpoint for software
f79b145d 2471 single step. */
3b9a79ef 2472 gdb_assert (has_single_step_breakpoints (thread));
b31cdfa6 2473 return false;
f79b145d
YQ
2474 }
2475}
2476
df95181f
TBA
2477void
2478linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
20ba1ce6 2479{
20ba1ce6
PA
2480 struct lwp_info *lp = get_thread_lwp (thread);
2481
2482 if (lp->stopped
863d01bd 2483 && !lp->suspended
20ba1ce6 2484 && !lp->status_pending_p
183be222 2485 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
20ba1ce6 2486 {
8901d193
YQ
2487 int step = 0;
2488
2489 if (thread->last_resume_kind == resume_step)
b6d8d612
KB
2490 {
2491 if (supports_software_single_step ())
2492 install_software_single_step_breakpoints (lp);
2493
2494 step = maybe_hw_step (thread);
2495 }
20ba1ce6 2496
c058728c
SM
2497 threads_debug_printf ("resuming stopped-resumed LWP %s at %s: step=%d",
2498 target_pid_to_str (ptid_of (thread)).c_str (),
2499 paddress (lp->stop_pc), step);
20ba1ce6 2500
df95181f 2501 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
20ba1ce6
PA
2502 }
2503}
2504
d16f3f6c
TBA
2505int
2506linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2507 ptid_t filter_ptid,
2508 int *wstatp, int options)
0d62e5e8 2509{
d86d4aaf 2510 struct thread_info *event_thread;
d50171e4 2511 struct lwp_info *event_child, *requested_child;
fa96cb38 2512 sigset_t block_mask, prev_mask;
d50171e4 2513
fa96cb38 2514 retry:
d86d4aaf
DE
2515 /* N.B. event_thread points to the thread_info struct that contains
2516 event_child. Keep them in sync. */
2517 event_thread = NULL;
d50171e4
PA
2518 event_child = NULL;
2519 requested_child = NULL;
0d62e5e8 2520
95954743 2521 /* Check for a lwp with a pending status. */
bd99dc85 2522
d7e15655 2523 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
0d62e5e8 2524 {
83e1b6c1
SM
2525 event_thread = find_thread_in_random ([&] (thread_info *thread)
2526 {
2527 return status_pending_p_callback (thread, filter_ptid);
2528 });
2529
d86d4aaf 2530 if (event_thread != NULL)
c058728c
SM
2531 {
2532 event_child = get_thread_lwp (event_thread);
2533 threads_debug_printf ("Got a pending child %ld", lwpid_of (event_thread));
2534 }
0d62e5e8 2535 }
d7e15655 2536 else if (filter_ptid != null_ptid)
0d62e5e8 2537 {
fa96cb38 2538 requested_child = find_lwp_pid (filter_ptid);
59487af3 2539 gdb_assert (requested_child != nullptr);
d50171e4 2540
bde24c0a 2541 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66 2542 && requested_child->status_pending_p
229d26fc
SM
2543 && (requested_child->collecting_fast_tracepoint
2544 != fast_tpoint_collect_result::not_collecting))
fa593d66
PA
2545 {
2546 enqueue_one_deferred_signal (requested_child,
2547 &requested_child->status_pending);
2548 requested_child->status_pending_p = 0;
2549 requested_child->status_pending = 0;
df95181f 2550 resume_one_lwp (requested_child, 0, 0, NULL);
fa593d66
PA
2551 }
2552
2553 if (requested_child->suspended
2554 && requested_child->status_pending_p)
38e08fca 2555 {
f34652de 2556 internal_error ("requesting an event out of a"
38e08fca
GB
2557 " suspended child?");
2558 }
fa593d66 2559
d50171e4 2560 if (requested_child->status_pending_p)
d86d4aaf
DE
2561 {
2562 event_child = requested_child;
2563 event_thread = get_lwp_thread (event_child);
2564 }
0d62e5e8 2565 }
611cb4a5 2566
0d62e5e8
DJ
2567 if (event_child != NULL)
2568 {
c058728c
SM
2569 threads_debug_printf ("Got an event from pending child %ld (%04x)",
2570 lwpid_of (event_thread),
2571 event_child->status_pending);
2572
fa96cb38 2573 *wstatp = event_child->status_pending;
bd99dc85
PA
2574 event_child->status_pending_p = 0;
2575 event_child->status_pending = 0;
24583e45 2576 switch_to_thread (event_thread);
d86d4aaf 2577 return lwpid_of (event_thread);
0d62e5e8
DJ
2578 }
2579
fa96cb38
PA
2580 /* But if we don't find a pending event, we'll have to wait.
2581
2582 We only enter this loop if no process has a pending wait status.
2583 Thus any action taken in response to a wait status inside this
2584 loop is responding as soon as we detect the status, not after any
2585 pending events. */
d8301ad1 2586
fa96cb38
PA
2587 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2588 all signals while here. */
2589 sigfillset (&block_mask);
21987b9c 2590 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
fa96cb38 2591
582511be
PA
2592 /* Always pull all events out of the kernel. We'll randomly select
2593 an event LWP out of all that have events, to prevent
2594 starvation. */
fa96cb38 2595 while (event_child == NULL)
0d62e5e8 2596 {
fa96cb38 2597 pid_t ret = 0;
0d62e5e8 2598
fa96cb38
PA
2599 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2600 quirks:
0d62e5e8 2601
fa96cb38
PA
2602 - If the thread group leader exits while other threads in the
2603 thread group still exist, waitpid(TGID, ...) hangs. That
2604 waitpid won't return an exit status until the other threads
2605 in the group are reaped.
611cb4a5 2606
fa96cb38
PA
2607 - When a non-leader thread execs, that thread just vanishes
2608 without reporting an exit (so we'd hang if we waited for it
2609 explicitly in that case). The exec event is reported to
94585166 2610 the TGID pid. */
fa96cb38
PA
2611 errno = 0;
2612 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2613
c058728c
SM
2614 threads_debug_printf ("waitpid(-1, ...) returned %d, %s",
2615 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
0d62e5e8 2616
fa96cb38 2617 if (ret > 0)
0d62e5e8 2618 {
c058728c
SM
2619 threads_debug_printf ("waitpid %ld received %s",
2620 (long) ret, status_to_str (*wstatp).c_str ());
89be2091 2621
582511be
PA
2622 /* Filter all events. IOW, leave all events pending. We'll
2623 randomly select an event LWP out of all that have events
2624 below. */
d16f3f6c 2625 filter_event (ret, *wstatp);
fa96cb38
PA
2626 /* Retry until nothing comes out of waitpid. A single
2627 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2628 continue;
2629 }
2630
20ba1ce6
PA
2631 /* Now that we've pulled all events out of the kernel, resume
2632 LWPs that don't have an interesting event to report. */
2633 if (stopping_threads == NOT_STOPPING_THREADS)
df95181f
TBA
2634 for_each_thread ([this] (thread_info *thread)
2635 {
2636 resume_stopped_resumed_lwps (thread);
2637 });
20ba1ce6
PA
2638
2639 /* ... and find an LWP with a status to report to the core, if
2640 any. */
83e1b6c1
SM
2641 event_thread = find_thread_in_random ([&] (thread_info *thread)
2642 {
2643 return status_pending_p_callback (thread, filter_ptid);
2644 });
2645
582511be
PA
2646 if (event_thread != NULL)
2647 {
2648 event_child = get_thread_lwp (event_thread);
2649 *wstatp = event_child->status_pending;
2650 event_child->status_pending_p = 0;
2651 event_child->status_pending = 0;
2652 break;
2653 }
2654
fa96cb38
PA
2655 /* Check for zombie thread group leaders. Those can't be reaped
2656 until all other threads in the thread group are. */
e8a625d1
PA
2657 if (check_zombie_leaders ())
2658 goto retry;
fa96cb38 2659
a1385b7b
SM
2660 auto not_stopped = [&] (thread_info *thread)
2661 {
2662 return not_stopped_callback (thread, wait_ptid);
2663 };
2664
fa96cb38
PA
2665 /* If there are no resumed children left in the set of LWPs we
2666 want to wait for, bail. We can't just block in
2667 waitpid/sigsuspend, because lwps might have been left stopped
2668 in trace-stop state, and we'd be stuck forever waiting for
2669 their status to change (which would only happen if we resumed
2670 them). Even if WNOHANG is set, this return code is preferred
2671 over 0 (below), as it is more detailed. */
a1385b7b 2672 if (find_thread (not_stopped) == NULL)
a6dbe5df 2673 {
c058728c
SM
2674 threads_debug_printf ("exit (no unwaited-for LWP)");
2675
21987b9c 2676 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2677 return -1;
a6dbe5df
PA
2678 }
2679
fa96cb38
PA
2680 /* No interesting event to report to the caller. */
2681 if ((options & WNOHANG))
24a09b5f 2682 {
c058728c 2683 threads_debug_printf ("WNOHANG set, no event found");
fa96cb38 2684
21987b9c 2685 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2686 return 0;
24a09b5f
DJ
2687 }
2688
fa96cb38 2689 /* Block until we get an event reported with SIGCHLD. */
c058728c 2690 threads_debug_printf ("sigsuspend'ing");
d50171e4 2691
fa96cb38 2692 sigsuspend (&prev_mask);
21987b9c 2693 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38
PA
2694 goto retry;
2695 }
d50171e4 2696
21987b9c 2697 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2698
24583e45 2699 switch_to_thread (event_thread);
d50171e4 2700
fa96cb38
PA
2701 return lwpid_of (event_thread);
2702}
2703
d16f3f6c
TBA
2704int
2705linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
fa96cb38 2706{
d16f3f6c 2707 return wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2708}
2709
6bf5e0ba
PA
2710/* Select one LWP out of those that have events pending. */
2711
2712static void
2713select_event_lwp (struct lwp_info **orig_lp)
2714{
582511be
PA
2715 struct thread_info *event_thread = NULL;
2716
2717 /* In all-stop, give preference to the LWP that is being
2718 single-stepped. There will be at most one, and it's the LWP that
2719 the core is most interested in. If we didn't do this, then we'd
2720 have to handle pending step SIGTRAPs somehow in case the core
2721 later continues the previously-stepped thread, otherwise we'd
2722 report the pending SIGTRAP, and the core, not having stepped the
2723 thread, wouldn't understand what the trap was for, and therefore
2724 would report it to the user as a random signal. */
2725 if (!non_stop)
6bf5e0ba 2726 {
39a64da5
SM
2727 event_thread = find_thread ([] (thread_info *thread)
2728 {
2729 lwp_info *lp = get_thread_lwp (thread);
2730
183be222 2731 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
39a64da5
SM
2732 && thread->last_resume_kind == resume_step
2733 && lp->status_pending_p);
2734 });
2735
582511be 2736 if (event_thread != NULL)
c058728c
SM
2737 threads_debug_printf
2738 ("Select single-step %s",
2739 target_pid_to_str (ptid_of (event_thread)).c_str ());
6bf5e0ba 2740 }
582511be 2741 if (event_thread == NULL)
6bf5e0ba
PA
2742 {
2743 /* No single-stepping LWP. Select one at random, out of those
dda83cd7 2744 which have had events. */
6bf5e0ba 2745
b0319eaa 2746 event_thread = find_thread_in_random ([&] (thread_info *thread)
39a64da5
SM
2747 {
2748 lwp_info *lp = get_thread_lwp (thread);
2749
b0319eaa 2750 /* Only resumed LWPs that have an event pending. */
183be222 2751 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
b0319eaa 2752 && lp->status_pending_p);
39a64da5 2753 });
6bf5e0ba
PA
2754 }
2755
d86d4aaf 2756 if (event_thread != NULL)
6bf5e0ba 2757 {
d86d4aaf
DE
2758 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2759
6bf5e0ba
PA
2760 /* Switch the event LWP. */
2761 *orig_lp = event_lp;
2762 }
2763}
2764
7984d532
PA
2765/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2766 NULL. */
2767
2768static void
2769unsuspend_all_lwps (struct lwp_info *except)
2770{
139720c5
SM
2771 for_each_thread ([&] (thread_info *thread)
2772 {
2773 lwp_info *lwp = get_thread_lwp (thread);
2774
2775 if (lwp != except)
2776 lwp_suspended_decr (lwp);
2777 });
7984d532
PA
2778}
2779
5a6b0a41 2780static bool lwp_running (thread_info *thread);
fa593d66
PA
2781
2782/* Stabilize threads (move out of jump pads).
2783
2784 If a thread is midway collecting a fast tracepoint, we need to
2785 finish the collection and move it out of the jump pad before
2786 reporting the signal.
2787
2788 This avoids recursion while collecting (when a signal arrives
2789 midway, and the signal handler itself collects), which would trash
2790 the trace buffer. In case the user set a breakpoint in a signal
2791 handler, this avoids the backtrace showing the jump pad, etc..
2792 Most importantly, there are certain things we can't do safely if
2793 threads are stopped in a jump pad (or in its callee's). For
2794 example:
2795
2796 - starting a new trace run. A thread still collecting the
2797 previous run, could trash the trace buffer when resumed. The trace
2798 buffer control structures would have been reset but the thread had
2799 no way to tell. The thread could even midway memcpy'ing to the
2800 buffer, which would mean that when resumed, it would clobber the
2801 trace buffer that had been set for a new run.
2802
2803 - we can't rewrite/reuse the jump pads for new tracepoints
2804 safely. Say you do tstart while a thread is stopped midway while
2805 collecting. When the thread is later resumed, it finishes the
2806 collection, and returns to the jump pad, to execute the original
2807 instruction that was under the tracepoint jump at the time the
2808 older run had been started. If the jump pad had been rewritten
2809 since for something else in the new run, the thread would now
2810 execute the wrong / random instructions. */
2811
5c9eb2f2
TBA
2812void
2813linux_process_target::stabilize_threads ()
fa593d66 2814{
13e567af
TBA
2815 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2816 {
2817 return stuck_in_jump_pad (thread);
2818 });
fa593d66 2819
d86d4aaf 2820 if (thread_stuck != NULL)
fa593d66 2821 {
c058728c
SM
2822 threads_debug_printf ("can't stabilize, LWP %ld is stuck in jump pad",
2823 lwpid_of (thread_stuck));
fa593d66
PA
2824 return;
2825 }
2826
24583e45 2827 scoped_restore_current_thread restore_thread;
fa593d66
PA
2828
2829 stabilizing_threads = 1;
2830
2831 /* Kick 'em all. */
d16f3f6c
TBA
2832 for_each_thread ([this] (thread_info *thread)
2833 {
2834 move_out_of_jump_pad (thread);
2835 });
fa593d66
PA
2836
2837 /* Loop until all are stopped out of the jump pads. */
5a6b0a41 2838 while (find_thread (lwp_running) != NULL)
fa593d66
PA
2839 {
2840 struct target_waitstatus ourstatus;
2841 struct lwp_info *lwp;
fa593d66
PA
2842 int wstat;
2843
2844 /* Note that we go through the full wait even loop. While
2845 moving threads out of jump pad, we need to be able to step
2846 over internal breakpoints and such. */
d16f3f6c 2847 wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66 2848
183be222 2849 if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
fa593d66 2850 {
0bfdf32f 2851 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2852
2853 /* Lock it. */
863d01bd 2854 lwp_suspended_inc (lwp);
fa593d66 2855
183be222 2856 if (ourstatus.sig () != GDB_SIGNAL_0
0bfdf32f 2857 || current_thread->last_resume_kind == resume_stop)
fa593d66 2858 {
183be222 2859 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
fa593d66
PA
2860 enqueue_one_deferred_signal (lwp, &wstat);
2861 }
2862 }
2863 }
2864
fcdad592 2865 unsuspend_all_lwps (NULL);
fa593d66
PA
2866
2867 stabilizing_threads = 0;
2868
b4d51a55 2869 if (debug_threads)
fa593d66 2870 {
13e567af
TBA
2871 thread_stuck = find_thread ([this] (thread_info *thread)
2872 {
2873 return stuck_in_jump_pad (thread);
2874 });
fcb056a5 2875
d86d4aaf 2876 if (thread_stuck != NULL)
c058728c
SM
2877 threads_debug_printf
2878 ("couldn't stabilize, LWP %ld got stuck in jump pad",
2879 lwpid_of (thread_stuck));
fa593d66
PA
2880 }
2881}
2882
582511be
PA
2883/* Convenience function that is called when the kernel reports an
2884 event that is not passed out to GDB. */
2885
2886static ptid_t
2887ignore_event (struct target_waitstatus *ourstatus)
2888{
2889 /* If we got an event, there may still be others, as a single
2890 SIGCHLD can indicate more than one child stopped. This forces
2891 another target_wait call. */
2892 async_file_mark ();
2893
183be222 2894 ourstatus->set_ignore ();
582511be
PA
2895 return null_ptid;
2896}
2897
fd000fb3
TBA
2898ptid_t
2899linux_process_target::filter_exit_event (lwp_info *event_child,
2900 target_waitstatus *ourstatus)
65706a29
PA
2901{
2902 struct thread_info *thread = get_lwp_thread (event_child);
2903 ptid_t ptid = ptid_of (thread);
2904
e8a625d1
PA
2905 if (ourstatus->kind () == TARGET_WAITKIND_THREAD_EXITED)
2906 {
2907 /* We're reporting a thread exit for the leader. The exit was
2908 detected by check_zombie_leaders. */
2909 gdb_assert (is_leader (thread));
2910 gdb_assert (report_exit_events_for (thread));
2911
2912 delete_lwp (event_child);
2913 return ptid;
2914 }
2915
48989498
PA
2916 /* Note we must filter TARGET_WAITKIND_SIGNALLED as well, otherwise
2917 if a non-leader thread exits with a signal, we'd report it to the
2918 core which would interpret it as the whole-process exiting.
2919 There is no TARGET_WAITKIND_THREAD_SIGNALLED event kind. */
2920 if (ourstatus->kind () != TARGET_WAITKIND_EXITED
2921 && ourstatus->kind () != TARGET_WAITKIND_SIGNALLED)
2922 return ptid;
2923
8a841a35 2924 if (!is_leader (thread))
65706a29 2925 {
48989498 2926 if (report_exit_events_for (thread))
183be222 2927 ourstatus->set_thread_exited (0);
65706a29 2928 else
183be222 2929 ourstatus->set_ignore ();
65706a29
PA
2930
2931 delete_lwp (event_child);
2932 }
2933 return ptid;
2934}
2935
82075af2
JS
2936/* Returns 1 if GDB is interested in any event_child syscalls. */
2937
2938static int
2939gdb_catching_syscalls_p (struct lwp_info *event_child)
2940{
2941 struct thread_info *thread = get_lwp_thread (event_child);
2942 struct process_info *proc = get_thread_process (thread);
2943
f27866ba 2944 return !proc->syscalls_to_catch.empty ();
82075af2
JS
2945}
2946
9eedd27d
TBA
2947bool
2948linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
82075af2 2949{
4cc32bec 2950 int sysno;
82075af2
JS
2951 struct thread_info *thread = get_lwp_thread (event_child);
2952 struct process_info *proc = get_thread_process (thread);
2953
f27866ba 2954 if (proc->syscalls_to_catch.empty ())
9eedd27d 2955 return false;
82075af2 2956
f27866ba 2957 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
9eedd27d 2958 return true;
82075af2 2959
4cc32bec 2960 get_syscall_trapinfo (event_child, &sysno);
f27866ba
SM
2961
2962 for (int iter : proc->syscalls_to_catch)
82075af2 2963 if (iter == sysno)
9eedd27d 2964 return true;
82075af2 2965
9eedd27d 2966 return false;
82075af2
JS
2967}
2968
d16f3f6c
TBA
2969ptid_t
2970linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
b60cea74 2971 target_wait_flags target_options)
da6d8c04 2972{
c058728c
SM
2973 THREADS_SCOPED_DEBUG_ENTER_EXIT;
2974
c12a5089 2975 client_state &cs = get_client_state ();
e5f1222d 2976 int w;
fc7238bb 2977 struct lwp_info *event_child;
bd99dc85 2978 int options;
bd99dc85 2979 int pid;
6bf5e0ba
PA
2980 int step_over_finished;
2981 int bp_explains_trap;
2982 int maybe_internal_trap;
2983 int report_to_gdb;
219f2f23 2984 int trace_event;
c2d6af84 2985 int in_step_range;
bd99dc85 2986
c058728c 2987 threads_debug_printf ("[%s]", target_pid_to_str (ptid).c_str ());
87ce2a04 2988
bd99dc85
PA
2989 /* Translate generic target options into linux options. */
2990 options = __WALL;
2991 if (target_options & TARGET_WNOHANG)
2992 options |= WNOHANG;
0d62e5e8 2993
fa593d66
PA
2994 bp_explains_trap = 0;
2995 trace_event = 0;
c2d6af84 2996 in_step_range = 0;
183be222 2997 ourstatus->set_ignore ();
bd99dc85 2998
ef980d65 2999 bool was_any_resumed = any_resumed ();
f2faf941 3000
d7e15655 3001 if (step_over_bkpt == null_ptid)
d16f3f6c 3002 pid = wait_for_event (ptid, &w, options);
6bf5e0ba
PA
3003 else
3004 {
c058728c
SM
3005 threads_debug_printf ("step_over_bkpt set [%s], doing a blocking wait",
3006 target_pid_to_str (step_over_bkpt).c_str ());
d16f3f6c 3007 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
6bf5e0ba
PA
3008 }
3009
ef980d65 3010 if (pid == 0 || (pid == -1 && !was_any_resumed))
87ce2a04 3011 {
fa96cb38
PA
3012 gdb_assert (target_options & TARGET_WNOHANG);
3013
c058728c 3014 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_IGNORE");
fa96cb38 3015
183be222 3016 ourstatus->set_ignore ();
87ce2a04
DE
3017 return null_ptid;
3018 }
fa96cb38
PA
3019 else if (pid == -1)
3020 {
c058728c 3021 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_NO_RESUMED");
bd99dc85 3022
183be222 3023 ourstatus->set_no_resumed ();
fa96cb38
PA
3024 return null_ptid;
3025 }
0d62e5e8 3026
0bfdf32f 3027 event_child = get_thread_lwp (current_thread);
0d62e5e8 3028
d16f3f6c 3029 /* wait_for_event only returns an exit status for the last
fa96cb38
PA
3030 child of a process. Report it. */
3031 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 3032 {
fa96cb38 3033 if (WIFEXITED (w))
0d62e5e8 3034 {
e8a625d1
PA
3035 /* If we already have the exit recorded in waitstatus, use
3036 it. This will happen when we detect a zombie leader,
3037 when we had GDB_THREAD_OPTION_EXIT enabled for it. We
3038 want to report its exit as TARGET_WAITKIND_THREAD_EXITED,
3039 as the whole process hasn't exited yet. */
3040 const target_waitstatus &ws = event_child->waitstatus;
3041 if (ws.kind () != TARGET_WAITKIND_IGNORE)
3042 {
3043 gdb_assert (ws.kind () == TARGET_WAITKIND_EXITED
3044 || ws.kind () == TARGET_WAITKIND_THREAD_EXITED);
3045 *ourstatus = ws;
3046 }
3047 else
3048 ourstatus->set_exited (WEXITSTATUS (w));
bd99dc85 3049
c058728c
SM
3050 threads_debug_printf
3051 ("ret = %s, exited with retcode %d",
3052 target_pid_to_str (ptid_of (current_thread)).c_str (),
3053 WEXITSTATUS (w));
fa96cb38
PA
3054 }
3055 else
3056 {
183be222 3057 ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
5b1c542e 3058
c058728c
SM
3059 threads_debug_printf
3060 ("ret = %s, terminated with signal %d",
3061 target_pid_to_str (ptid_of (current_thread)).c_str (),
3062 WTERMSIG (w));
0d62e5e8 3063 }
fa96cb38 3064
48989498 3065 return filter_exit_event (event_child, ourstatus);
da6d8c04
DJ
3066 }
3067
2d97cd35
AT
3068 /* If step-over executes a breakpoint instruction, in the case of a
3069 hardware single step it means a gdb/gdbserver breakpoint had been
3070 planted on top of a permanent breakpoint, in the case of a software
3071 single step it may just mean that gdbserver hit the reinsert breakpoint.
e7ad2f14 3072 The PC has been adjusted by save_stop_reason to point at
2d97cd35
AT
3073 the breakpoint address.
3074 So in the case of the hardware single step advance the PC manually
3075 past the breakpoint and in the case of software single step advance only
3b9a79ef 3076 if it's not the single_step_breakpoint we are hitting.
2d97cd35
AT
3077 This avoids that a program would keep trapping a permanent breakpoint
3078 forever. */
d7e15655 3079 if (step_over_bkpt != null_ptid
2d97cd35
AT
3080 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3081 && (event_child->stepping
3b9a79ef 3082 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
8090aef2 3083 {
dd373349
AT
3084 int increment_pc = 0;
3085 int breakpoint_kind = 0;
3086 CORE_ADDR stop_pc = event_child->stop_pc;
3087
d16f3f6c
TBA
3088 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3089 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
8090aef2 3090
c058728c
SM
3091 threads_debug_printf
3092 ("step-over for %s executed software breakpoint",
3093 target_pid_to_str (ptid_of (current_thread)).c_str ());
8090aef2
PA
3094
3095 if (increment_pc != 0)
3096 {
3097 struct regcache *regcache
3098 = get_thread_regcache (current_thread, 1);
3099
3100 event_child->stop_pc += increment_pc;
bf9ae9d8 3101 low_set_pc (regcache, event_child->stop_pc);
8090aef2 3102
d7146cda 3103 if (!low_breakpoint_at (event_child->stop_pc))
15c66dd6 3104 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
3105 }
3106 }
3107
6bf5e0ba
PA
3108 /* If this event was not handled before, and is not a SIGTRAP, we
3109 report it. SIGILL and SIGSEGV are also treated as traps in case
3110 a breakpoint is inserted at the current PC. If this target does
3111 not support internal breakpoints at all, we also report the
3112 SIGTRAP without further processing; it's of no concern to us. */
3113 maybe_internal_trap
bf9ae9d8 3114 = (low_supports_breakpoints ()
6bf5e0ba
PA
3115 && (WSTOPSIG (w) == SIGTRAP
3116 || ((WSTOPSIG (w) == SIGILL
3117 || WSTOPSIG (w) == SIGSEGV)
d7146cda 3118 && low_breakpoint_at (event_child->stop_pc))));
6bf5e0ba
PA
3119
3120 if (maybe_internal_trap)
3121 {
3122 /* Handle anything that requires bookkeeping before deciding to
3123 report the event or continue waiting. */
3124
3125 /* First check if we can explain the SIGTRAP with an internal
3126 breakpoint, or if we should possibly report the event to GDB.
3127 Do this before anything that may remove or insert a
3128 breakpoint. */
3129 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3130
3131 /* We have a SIGTRAP, possibly a step-over dance has just
3132 finished. If so, tweak the state machine accordingly,
3b9a79ef
YQ
3133 reinsert breakpoints and delete any single-step
3134 breakpoints. */
6bf5e0ba
PA
3135 step_over_finished = finish_step_over (event_child);
3136
3137 /* Now invoke the callbacks of any internal breakpoints there. */
3138 check_breakpoints (event_child->stop_pc);
3139
219f2f23
PA
3140 /* Handle tracepoint data collecting. This may overflow the
3141 trace buffer, and cause a tracing stop, removing
3142 breakpoints. */
3143 trace_event = handle_tracepoints (event_child);
3144
6bf5e0ba 3145 if (bp_explains_trap)
c058728c 3146 threads_debug_printf ("Hit a gdbserver breakpoint.");
6bf5e0ba
PA
3147 }
3148 else
3149 {
3150 /* We have some other signal, possibly a step-over dance was in
3151 progress, and it should be cancelled too. */
3152 step_over_finished = finish_step_over (event_child);
fa593d66
PA
3153 }
3154
3155 /* We have all the data we need. Either report the event to GDB, or
3156 resume threads and keep waiting for more. */
3157
3158 /* If we're collecting a fast tracepoint, finish the collection and
3159 move out of the jump pad before delivering a signal. See
3160 linux_stabilize_threads. */
3161
3162 if (WIFSTOPPED (w)
3163 && WSTOPSIG (w) != SIGTRAP
3164 && supports_fast_tracepoints ()
58b4daa5 3165 && agent_loaded_p ())
fa593d66 3166 {
c058728c
SM
3167 threads_debug_printf ("Got signal %d for LWP %ld. Check if we need "
3168 "to defer or adjust it.",
3169 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3170
3171 /* Allow debugging the jump pad itself. */
0bfdf32f 3172 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
3173 && maybe_move_out_of_jump_pad (event_child, &w))
3174 {
3175 enqueue_one_deferred_signal (event_child, &w);
3176
c058728c
SM
3177 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad)",
3178 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66 3179
df95181f 3180 resume_one_lwp (event_child, 0, 0, NULL);
582511be
PA
3181
3182 return ignore_event (ourstatus);
fa593d66
PA
3183 }
3184 }
219f2f23 3185
229d26fc
SM
3186 if (event_child->collecting_fast_tracepoint
3187 != fast_tpoint_collect_result::not_collecting)
fa593d66 3188 {
c058728c
SM
3189 threads_debug_printf
3190 ("LWP %ld was trying to move out of the jump pad (%d). "
3191 "Check if we're already there.",
3192 lwpid_of (current_thread),
3193 (int) event_child->collecting_fast_tracepoint);
fa593d66
PA
3194
3195 trace_event = 1;
3196
3197 event_child->collecting_fast_tracepoint
3198 = linux_fast_tracepoint_collecting (event_child, NULL);
3199
229d26fc
SM
3200 if (event_child->collecting_fast_tracepoint
3201 != fast_tpoint_collect_result::before_insn)
fa593d66
PA
3202 {
3203 /* No longer need this breakpoint. */
3204 if (event_child->exit_jump_pad_bkpt != NULL)
3205 {
c058728c
SM
3206 threads_debug_printf
3207 ("No longer need exit-jump-pad bkpt; removing it."
3208 "stopping all threads momentarily.");
fa593d66
PA
3209
3210 /* Other running threads could hit this breakpoint.
3211 We don't handle moribund locations like GDB does,
3212 instead we always pause all threads when removing
3213 breakpoints, so that any step-over or
3214 decr_pc_after_break adjustment is always taken
3215 care of while the breakpoint is still
3216 inserted. */
3217 stop_all_lwps (1, event_child);
fa593d66
PA
3218
3219 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3220 event_child->exit_jump_pad_bkpt = NULL;
3221
3222 unstop_all_lwps (1, event_child);
3223
3224 gdb_assert (event_child->suspended >= 0);
3225 }
3226 }
3227
229d26fc
SM
3228 if (event_child->collecting_fast_tracepoint
3229 == fast_tpoint_collect_result::not_collecting)
fa593d66 3230 {
c058728c
SM
3231 threads_debug_printf
3232 ("fast tracepoint finished collecting successfully.");
fa593d66
PA
3233
3234 /* We may have a deferred signal to report. */
3235 if (dequeue_one_deferred_signal (event_child, &w))
c058728c 3236 threads_debug_printf ("dequeued one signal.");
3c11dd79 3237 else
fa593d66 3238 {
c058728c 3239 threads_debug_printf ("no deferred signals.");
fa593d66
PA
3240
3241 if (stabilizing_threads)
3242 {
183be222 3243 ourstatus->set_stopped (GDB_SIGNAL_0);
87ce2a04 3244
c058728c
SM
3245 threads_debug_printf
3246 ("ret = %s, stopped while stabilizing threads",
3247 target_pid_to_str (ptid_of (current_thread)).c_str ());
87ce2a04 3248
0bfdf32f 3249 return ptid_of (current_thread);
fa593d66
PA
3250 }
3251 }
3252 }
6bf5e0ba
PA
3253 }
3254
e471f25b
PA
3255 /* Check whether GDB would be interested in this event. */
3256
82075af2
JS
3257 /* Check if GDB is interested in this syscall. */
3258 if (WIFSTOPPED (w)
3259 && WSTOPSIG (w) == SYSCALL_SIGTRAP
9eedd27d 3260 && !gdb_catch_this_syscall (event_child))
82075af2 3261 {
c058728c
SM
3262 threads_debug_printf ("Ignored syscall for LWP %ld.",
3263 lwpid_of (current_thread));
82075af2 3264
df95181f 3265 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
edeeb602 3266
82075af2
JS
3267 return ignore_event (ourstatus);
3268 }
3269
e471f25b
PA
3270 /* If GDB is not interested in this signal, don't stop other
3271 threads, and don't report it to GDB. Just resume the inferior
3272 right away. We do this for threading-related signals as well as
3273 any that GDB specifically requested we ignore. But never ignore
3274 SIGSTOP if we sent it ourselves, and do not ignore signals when
3275 stepping - they may require special handling to skip the signal
c9587f88
AT
3276 handler. Also never ignore signals that could be caused by a
3277 breakpoint. */
e471f25b 3278 if (WIFSTOPPED (w)
0bfdf32f 3279 && current_thread->last_resume_kind != resume_step
e471f25b 3280 && (
1a981360 3281#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 3282 (current_process ()->priv->thread_db != NULL
e471f25b
PA
3283 && (WSTOPSIG (w) == __SIGRTMIN
3284 || WSTOPSIG (w) == __SIGRTMIN + 1))
3285 ||
3286#endif
c12a5089 3287 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 3288 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
3289 && current_thread->last_resume_kind == resume_stop)
3290 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
3291 {
3292 siginfo_t info, *info_p;
3293
c058728c
SM
3294 threads_debug_printf ("Ignored signal %d for LWP %ld.",
3295 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 3296
0bfdf32f 3297 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 3298 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
3299 info_p = &info;
3300 else
3301 info_p = NULL;
863d01bd
PA
3302
3303 if (step_over_finished)
3304 {
3305 /* We cancelled this thread's step-over above. We still
3306 need to unsuspend all other LWPs, and set them back
3307 running again while the signal handler runs. */
3308 unsuspend_all_lwps (event_child);
3309
3310 /* Enqueue the pending signal info so that proceed_all_lwps
3311 doesn't lose it. */
3312 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3313
3314 proceed_all_lwps ();
3315 }
3316 else
3317 {
df95181f
TBA
3318 resume_one_lwp (event_child, event_child->stepping,
3319 WSTOPSIG (w), info_p);
863d01bd 3320 }
edeeb602 3321
582511be 3322 return ignore_event (ourstatus);
e471f25b
PA
3323 }
3324
c2d6af84
PA
3325 /* Note that all addresses are always "out of the step range" when
3326 there's no range to begin with. */
3327 in_step_range = lwp_in_step_range (event_child);
3328
3329 /* If GDB wanted this thread to single step, and the thread is out
3330 of the step range, we always want to report the SIGTRAP, and let
3331 GDB handle it. Watchpoints should always be reported. So should
3332 signals we can't explain. A SIGTRAP we can't explain could be a
3333 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3334 do, we're be able to handle GDB breakpoints on top of internal
3335 breakpoints, by handling the internal breakpoint and still
3336 reporting the event to GDB. If we don't, we're out of luck, GDB
863d01bd
PA
3337 won't see the breakpoint hit. If we see a single-step event but
3338 the thread should be continuing, don't pass the trap to gdb.
3339 That indicates that we had previously finished a single-step but
3340 left the single-step pending -- see
3341 complete_ongoing_step_over. */
6bf5e0ba 3342 report_to_gdb = (!maybe_internal_trap
0bfdf32f 3343 || (current_thread->last_resume_kind == resume_step
c2d6af84 3344 && !in_step_range)
15c66dd6 3345 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
863d01bd
PA
3346 || (!in_step_range
3347 && !bp_explains_trap
3348 && !trace_event
3349 && !step_over_finished
3350 && !(current_thread->last_resume_kind == resume_continue
3351 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
9f3a5c85 3352 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5 3353 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
de0d863e 3354 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
183be222 3355 || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
d3ce09f5
SS
3356
3357 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
3358
3359 /* We found no reason GDB would want us to stop. We either hit one
3360 of our own breakpoints, or finished an internal step GDB
3361 shouldn't know about. */
3362 if (!report_to_gdb)
3363 {
c058728c
SM
3364 if (bp_explains_trap)
3365 threads_debug_printf ("Hit a gdbserver breakpoint.");
3366
3367 if (step_over_finished)
3368 threads_debug_printf ("Step-over finished.");
3369
3370 if (trace_event)
3371 threads_debug_printf ("Tracepoint event.");
3372
3373 if (lwp_in_step_range (event_child))
3374 threads_debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).",
3375 paddress (event_child->stop_pc),
3376 paddress (event_child->step_range_start),
3377 paddress (event_child->step_range_end));
6bf5e0ba
PA
3378
3379 /* We're not reporting this breakpoint to GDB, so apply the
3380 decr_pc_after_break adjustment to the inferior's regcache
3381 ourselves. */
3382
bf9ae9d8 3383 if (low_supports_breakpoints ())
6bf5e0ba
PA
3384 {
3385 struct regcache *regcache
0bfdf32f 3386 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3387 low_set_pc (regcache, event_child->stop_pc);
6bf5e0ba
PA
3388 }
3389
7984d532 3390 if (step_over_finished)
e3652c84
YQ
3391 {
3392 /* If we have finished stepping over a breakpoint, we've
3393 stopped and suspended all LWPs momentarily except the
3394 stepping one. This is where we resume them all again.
3395 We're going to keep waiting, so use proceed, which
3396 handles stepping over the next breakpoint. */
3397 unsuspend_all_lwps (event_child);
3398 }
3399 else
3400 {
3401 /* Remove the single-step breakpoints if any. Note that
3402 there isn't single-step breakpoint if we finished stepping
3403 over. */
7582c77c 3404 if (supports_software_single_step ()
e3652c84
YQ
3405 && has_single_step_breakpoints (current_thread))
3406 {
3407 stop_all_lwps (0, event_child);
3408 delete_single_step_breakpoints (current_thread);
3409 unstop_all_lwps (0, event_child);
3410 }
3411 }
7984d532 3412
c058728c 3413 threads_debug_printf ("proceeding all threads.");
edeeb602 3414
c058728c 3415 proceed_all_lwps ();
edeeb602 3416
582511be 3417 return ignore_event (ourstatus);
6bf5e0ba
PA
3418 }
3419
c058728c
SM
3420 if (debug_threads)
3421 {
3422 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3423 threads_debug_printf ("LWP %ld: extended event with waitstatus %s",
3424 lwpid_of (get_lwp_thread (event_child)),
3425 event_child->waitstatus.to_string ().c_str ());
3426
3427 if (current_thread->last_resume_kind == resume_step)
3428 {
3429 if (event_child->step_range_start == event_child->step_range_end)
3430 threads_debug_printf
3431 ("GDB wanted to single-step, reporting event.");
3432 else if (!lwp_in_step_range (event_child))
3433 threads_debug_printf ("Out of step range, reporting event.");
3434 }
3435
3436 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3437 threads_debug_printf ("Stopped by watchpoint.");
3438 else if (gdb_breakpoint_here (event_child->stop_pc))
3439 threads_debug_printf ("Stopped by GDB breakpoint.");
3440 }
3441
3442 threads_debug_printf ("Hit a non-gdbserver trap event.");
6bf5e0ba
PA
3443
3444 /* Alright, we're going to report a stop. */
3445
3b9a79ef 3446 /* Remove single-step breakpoints. */
7582c77c 3447 if (supports_software_single_step ())
8901d193 3448 {
3b9a79ef 3449 /* Remove single-step breakpoints or not. It it is true, stop all
8901d193
YQ
3450 lwps, so that other threads won't hit the breakpoint in the
3451 staled memory. */
3b9a79ef 3452 int remove_single_step_breakpoints_p = 0;
8901d193
YQ
3453
3454 if (non_stop)
3455 {
3b9a79ef
YQ
3456 remove_single_step_breakpoints_p
3457 = has_single_step_breakpoints (current_thread);
8901d193
YQ
3458 }
3459 else
3460 {
3461 /* In all-stop, a stop reply cancels all previous resume
3b9a79ef 3462 requests. Delete all single-step breakpoints. */
8901d193 3463
9c80ecd6
SM
3464 find_thread ([&] (thread_info *thread) {
3465 if (has_single_step_breakpoints (thread))
3466 {
3467 remove_single_step_breakpoints_p = 1;
3468 return true;
3469 }
8901d193 3470
9c80ecd6
SM
3471 return false;
3472 });
8901d193
YQ
3473 }
3474
3b9a79ef 3475 if (remove_single_step_breakpoints_p)
8901d193 3476 {
3b9a79ef 3477 /* If we remove single-step breakpoints from memory, stop all lwps,
8901d193
YQ
3478 so that other threads won't hit the breakpoint in the staled
3479 memory. */
3480 stop_all_lwps (0, event_child);
3481
3482 if (non_stop)
3483 {
3b9a79ef
YQ
3484 gdb_assert (has_single_step_breakpoints (current_thread));
3485 delete_single_step_breakpoints (current_thread);
8901d193
YQ
3486 }
3487 else
3488 {
9c80ecd6
SM
3489 for_each_thread ([] (thread_info *thread){
3490 if (has_single_step_breakpoints (thread))
3491 delete_single_step_breakpoints (thread);
3492 });
8901d193
YQ
3493 }
3494
3495 unstop_all_lwps (0, event_child);
3496 }
3497 }
3498
582511be 3499 if (!stabilizing_threads)
6bf5e0ba
PA
3500 {
3501 /* In all-stop, stop all threads. */
582511be
PA
3502 if (!non_stop)
3503 stop_all_lwps (0, NULL);
6bf5e0ba 3504
c03e6ccc 3505 if (step_over_finished)
582511be
PA
3506 {
3507 if (!non_stop)
3508 {
3509 /* If we were doing a step-over, all other threads but
3510 the stepping one had been paused in start_step_over,
3511 with their suspend counts incremented. We don't want
3512 to do a full unstop/unpause, because we're in
3513 all-stop mode (so we want threads stopped), but we
3514 still need to unsuspend the other threads, to
3515 decrement their `suspended' count back. */
3516 unsuspend_all_lwps (event_child);
3517 }
3518 else
3519 {
3520 /* If we just finished a step-over, then all threads had
3521 been momentarily paused. In all-stop, that's fine,
3522 we want threads stopped by now anyway. In non-stop,
3523 we need to re-resume threads that GDB wanted to be
3524 running. */
3525 unstop_all_lwps (1, event_child);
3526 }
3527 }
c03e6ccc 3528
3aa5cfa0
AT
3529 /* If we're not waiting for a specific LWP, choose an event LWP
3530 from among those that have had events. Giving equal priority
3531 to all LWPs that have had events helps prevent
3532 starvation. */
d7e15655 3533 if (ptid == minus_one_ptid)
3aa5cfa0
AT
3534 {
3535 event_child->status_pending_p = 1;
3536 event_child->status_pending = w;
3537
3538 select_event_lwp (&event_child);
3539
3540 /* current_thread and event_child must stay in sync. */
24583e45 3541 switch_to_thread (get_lwp_thread (event_child));
3aa5cfa0
AT
3542
3543 event_child->status_pending_p = 0;
3544 w = event_child->status_pending;
3545 }
3546
3547
fa593d66 3548 /* Stabilize threads (move out of jump pads). */
582511be 3549 if (!non_stop)
5c9eb2f2 3550 target_stabilize_threads ();
6bf5e0ba
PA
3551 }
3552 else
3553 {
3554 /* If we just finished a step-over, then all threads had been
3555 momentarily paused. In all-stop, that's fine, we want
3556 threads stopped by now anyway. In non-stop, we need to
3557 re-resume threads that GDB wanted to be running. */
3558 if (step_over_finished)
7984d532 3559 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3560 }
3561
e88cf517
SM
3562 /* At this point, we haven't set OURSTATUS. This is where we do it. */
3563 gdb_assert (ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3564
183be222 3565 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
de0d863e 3566 {
393a6b59
PA
3567 /* If the reported event is an exit, fork, vfork, clone or exec,
3568 let GDB know. */
5a04c4cf 3569
393a6b59
PA
3570 /* Break the unreported fork/vfork/clone relationship chain. */
3571 if (is_new_child_status (event_child->waitstatus.kind ()))
5a04c4cf 3572 {
393a6b59
PA
3573 event_child->relative->relative = NULL;
3574 event_child->relative = NULL;
5a04c4cf
PA
3575 }
3576
00db26fa 3577 *ourstatus = event_child->waitstatus;
de0d863e 3578 /* Clear the event lwp's waitstatus since we handled it already. */
183be222 3579 event_child->waitstatus.set_ignore ();
de0d863e
DB
3580 }
3581 else
183be222 3582 {
e88cf517 3583 /* The LWP stopped due to a plain signal or a syscall signal. Either way,
3bfdcabb 3584 event_child->waitstatus wasn't filled in with the details, so look at
e88cf517
SM
3585 the wait status W. */
3586 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3587 {
3588 int syscall_number;
3589
3590 get_syscall_trapinfo (event_child, &syscall_number);
3591 if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3592 ourstatus->set_syscall_entry (syscall_number);
3593 else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3594 ourstatus->set_syscall_return (syscall_number);
3595 else
3596 gdb_assert_not_reached ("unexpected syscall state");
3597 }
3598 else if (current_thread->last_resume_kind == resume_stop
3599 && WSTOPSIG (w) == SIGSTOP)
3600 {
3601 /* A thread that has been requested to stop by GDB with vCont;t,
3602 and it stopped cleanly, so report as SIG0. The use of
3603 SIGSTOP is an implementation detail. */
3604 ourstatus->set_stopped (GDB_SIGNAL_0);
3605 }
3606 else
3607 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
183be222 3608 }
5b1c542e 3609
582511be 3610 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3611 it was a software breakpoint, and the client doesn't know we can
3612 adjust the breakpoint ourselves. */
3613 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
c12a5089 3614 && !cs.swbreak_feature)
582511be 3615 {
d4807ea2 3616 int decr_pc = low_decr_pc_after_break ();
582511be
PA
3617
3618 if (decr_pc != 0)
3619 {
3620 struct regcache *regcache
3621 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3622 low_set_pc (regcache, event_child->stop_pc + decr_pc);
582511be
PA
3623 }
3624 }
3625
d7e15655 3626 gdb_assert (step_over_bkpt == null_ptid);
d50171e4 3627
e48359ea 3628 threads_debug_printf ("ret = %s, %s",
c058728c 3629 target_pid_to_str (ptid_of (current_thread)).c_str (),
e48359ea 3630 ourstatus->to_string ().c_str ());
bd99dc85 3631
48989498 3632 return filter_exit_event (event_child, ourstatus);
bd99dc85
PA
3633}
3634
3635/* Get rid of any pending event in the pipe. */
3636static void
3637async_file_flush (void)
3638{
cdc8e9b2 3639 linux_event_pipe.flush ();
bd99dc85
PA
3640}
3641
3642/* Put something in the pipe, so the event loop wakes up. */
3643static void
3644async_file_mark (void)
3645{
cdc8e9b2 3646 linux_event_pipe.mark ();
bd99dc85
PA
3647}
3648
6532e7e3
TBA
3649ptid_t
3650linux_process_target::wait (ptid_t ptid,
3651 target_waitstatus *ourstatus,
b60cea74 3652 target_wait_flags target_options)
bd99dc85 3653{
95954743 3654 ptid_t event_ptid;
bd99dc85 3655
bd99dc85
PA
3656 /* Flush the async file first. */
3657 if (target_is_async_p ())
3658 async_file_flush ();
3659
582511be
PA
3660 do
3661 {
d16f3f6c 3662 event_ptid = wait_1 (ptid, ourstatus, target_options);
582511be
PA
3663 }
3664 while ((target_options & TARGET_WNOHANG) == 0
183be222 3665 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3666
3667 /* If at least one stop was reported, there may be more. A single
3668 SIGCHLD can signal more than one child stop. */
3669 if (target_is_async_p ()
3670 && (target_options & TARGET_WNOHANG) != 0
d7e15655 3671 && event_ptid != null_ptid)
bd99dc85
PA
3672 async_file_mark ();
3673
3674 return event_ptid;
da6d8c04
DJ
3675}
3676
c5f62d5f 3677/* Send a signal to an LWP. */
fd500816
DJ
3678
3679static int
a1928bad 3680kill_lwp (unsigned long lwpid, int signo)
fd500816 3681{
4a6ed09b 3682 int ret;
fd500816 3683
4a6ed09b
PA
3684 errno = 0;
3685 ret = syscall (__NR_tkill, lwpid, signo);
3686 if (errno == ENOSYS)
3687 {
3688 /* If tkill fails, then we are not using nptl threads, a
3689 configuration we no longer support. */
3690 perror_with_name (("tkill"));
3691 }
3692 return ret;
fd500816
DJ
3693}
3694
964e4306
PA
3695void
3696linux_stop_lwp (struct lwp_info *lwp)
3697{
3698 send_sigstop (lwp);
3699}
3700
0d62e5e8 3701static void
02fc4de7 3702send_sigstop (struct lwp_info *lwp)
0d62e5e8 3703{
bd99dc85 3704 int pid;
0d62e5e8 3705
d86d4aaf 3706 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3707
0d62e5e8
DJ
3708 /* If we already have a pending stop signal for this process, don't
3709 send another. */
54a0b537 3710 if (lwp->stop_expected)
0d62e5e8 3711 {
c058728c 3712 threads_debug_printf ("Have pending sigstop for lwp %d", pid);
ae13219e 3713
0d62e5e8
DJ
3714 return;
3715 }
3716
c058728c 3717 threads_debug_printf ("Sending sigstop to lwp %d", pid);
0d62e5e8 3718
d50171e4 3719 lwp->stop_expected = 1;
bd99dc85 3720 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3721}
3722
df3e4dbe
SM
3723static void
3724send_sigstop (thread_info *thread, lwp_info *except)
02fc4de7 3725{
d86d4aaf 3726 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3727
7984d532
PA
3728 /* Ignore EXCEPT. */
3729 if (lwp == except)
df3e4dbe 3730 return;
7984d532 3731
02fc4de7 3732 if (lwp->stopped)
df3e4dbe 3733 return;
02fc4de7
PA
3734
3735 send_sigstop (lwp);
7984d532
PA
3736}
3737
3738/* Increment the suspend count of an LWP, and stop it, if not stopped
3739 yet. */
df3e4dbe
SM
3740static void
3741suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
7984d532 3742{
d86d4aaf 3743 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3744
3745 /* Ignore EXCEPT. */
3746 if (lwp == except)
df3e4dbe 3747 return;
7984d532 3748
863d01bd 3749 lwp_suspended_inc (lwp);
7984d532 3750
df3e4dbe 3751 send_sigstop (thread, except);
02fc4de7
PA
3752}
3753
e8a625d1
PA
3754/* Mark LWP dead, with WSTAT as exit status pending to report later.
3755 If THREAD_EVENT is true, interpret WSTAT as a thread exit event
3756 instead of a process exit event. This is meaningful for the leader
3757 thread, as we normally report a process-wide exit event when we see
3758 the leader exit, and a thread exit event when we see any other
3759 thread exit. */
3760
95954743 3761static void
e8a625d1 3762mark_lwp_dead (struct lwp_info *lwp, int wstat, bool thread_event)
95954743 3763{
95954743
PA
3764 /* Store the exit status for later. */
3765 lwp->status_pending_p = 1;
3766 lwp->status_pending = wstat;
3767
00db26fa
PA
3768 /* Store in waitstatus as well, as there's nothing else to process
3769 for this event. */
3770 if (WIFEXITED (wstat))
e8a625d1
PA
3771 {
3772 if (thread_event)
3773 lwp->waitstatus.set_thread_exited (WEXITSTATUS (wstat));
3774 else
3775 lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
3776 }
00db26fa 3777 else if (WIFSIGNALED (wstat))
e8a625d1
PA
3778 {
3779 gdb_assert (!thread_event);
3780 lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
3781 }
3782 else
3783 gdb_assert_not_reached ("unknown status kind");
00db26fa 3784
95954743
PA
3785 /* Prevent trying to stop it. */
3786 lwp->stopped = 1;
3787
3788 /* No further stops are expected from a dead lwp. */
3789 lwp->stop_expected = 0;
3790}
3791
00db26fa
PA
3792/* Return true if LWP has exited already, and has a pending exit event
3793 to report to GDB. */
3794
3795static int
3796lwp_is_marked_dead (struct lwp_info *lwp)
3797{
3798 return (lwp->status_pending_p
3799 && (WIFEXITED (lwp->status_pending)
3800 || WIFSIGNALED (lwp->status_pending)));
3801}
3802
d16f3f6c
TBA
3803void
3804linux_process_target::wait_for_sigstop ()
0d62e5e8 3805{
0bfdf32f 3806 struct thread_info *saved_thread;
95954743 3807 ptid_t saved_tid;
fa96cb38
PA
3808 int wstat;
3809 int ret;
0d62e5e8 3810
0bfdf32f
GB
3811 saved_thread = current_thread;
3812 if (saved_thread != NULL)
9c80ecd6 3813 saved_tid = saved_thread->id;
bd99dc85 3814 else
95954743 3815 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3816
20ac1cdb
TBA
3817 scoped_restore_current_thread restore_thread;
3818
c058728c 3819 threads_debug_printf ("pulling events");
d50171e4 3820
fa96cb38
PA
3821 /* Passing NULL_PTID as filter indicates we want all events to be
3822 left pending. Eventually this returns when there are no
3823 unwaited-for children left. */
d16f3f6c 3824 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
fa96cb38 3825 gdb_assert (ret == -1);
0d62e5e8 3826
13d3d99b 3827 if (saved_thread == NULL || mythread_alive (saved_tid))
20ac1cdb 3828 return;
0d62e5e8
DJ
3829 else
3830 {
c058728c 3831 threads_debug_printf ("Previously current thread died.");
0d62e5e8 3832
f0db101d
PA
3833 /* We can't change the current inferior behind GDB's back,
3834 otherwise, a subsequent command may apply to the wrong
3835 process. */
20ac1cdb
TBA
3836 restore_thread.dont_restore ();
3837 switch_to_thread (nullptr);
0d62e5e8
DJ
3838 }
3839}
3840
13e567af
TBA
3841bool
3842linux_process_target::stuck_in_jump_pad (thread_info *thread)
fa593d66 3843{
d86d4aaf 3844 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3845
863d01bd
PA
3846 if (lwp->suspended != 0)
3847 {
f34652de 3848 internal_error ("LWP %ld is suspended, suspended=%d\n",
863d01bd
PA
3849 lwpid_of (thread), lwp->suspended);
3850 }
fa593d66
PA
3851 gdb_assert (lwp->stopped);
3852
3853 /* Allow debugging the jump pad, gdb_collect, etc.. */
3854 return (supports_fast_tracepoints ()
58b4daa5 3855 && agent_loaded_p ()
fa593d66 3856 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3857 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66 3858 || thread->last_resume_kind == resume_step)
229d26fc
SM
3859 && (linux_fast_tracepoint_collecting (lwp, NULL)
3860 != fast_tpoint_collect_result::not_collecting));
fa593d66
PA
3861}
3862
d16f3f6c
TBA
3863void
3864linux_process_target::move_out_of_jump_pad (thread_info *thread)
fa593d66 3865{
d86d4aaf 3866 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3867 int *wstat;
3868
863d01bd
PA
3869 if (lwp->suspended != 0)
3870 {
f34652de 3871 internal_error ("LWP %ld is suspended, suspended=%d\n",
863d01bd
PA
3872 lwpid_of (thread), lwp->suspended);
3873 }
fa593d66
PA
3874 gdb_assert (lwp->stopped);
3875
f0ce0d3a 3876 /* For gdb_breakpoint_here. */
24583e45
TBA
3877 scoped_restore_current_thread restore_thread;
3878 switch_to_thread (thread);
f0ce0d3a 3879
fa593d66
PA
3880 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3881
3882 /* Allow debugging the jump pad, gdb_collect, etc. */
3883 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3884 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3885 && thread->last_resume_kind != resume_step
3886 && maybe_move_out_of_jump_pad (lwp, wstat))
3887 {
c058728c
SM
3888 threads_debug_printf ("LWP %ld needs stabilizing (in jump pad)",
3889 lwpid_of (thread));
fa593d66
PA
3890
3891 if (wstat)
3892 {
3893 lwp->status_pending_p = 0;
3894 enqueue_one_deferred_signal (lwp, wstat);
3895
c058728c
SM
3896 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad",
3897 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
3898 }
3899
df95181f 3900 resume_one_lwp (lwp, 0, 0, NULL);
fa593d66
PA
3901 }
3902 else
863d01bd 3903 lwp_suspended_inc (lwp);
fa593d66
PA
3904}
3905
5a6b0a41
SM
3906static bool
3907lwp_running (thread_info *thread)
fa593d66 3908{
d86d4aaf 3909 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3910
00db26fa 3911 if (lwp_is_marked_dead (lwp))
5a6b0a41
SM
3912 return false;
3913
3914 return !lwp->stopped;
fa593d66
PA
3915}
3916
d16f3f6c
TBA
3917void
3918linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
0d62e5e8 3919{
bde24c0a
PA
3920 /* Should not be called recursively. */
3921 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3922
c058728c
SM
3923 THREADS_SCOPED_DEBUG_ENTER_EXIT;
3924
3925 threads_debug_printf
3926 ("%s, except=%s", suspend ? "stop-and-suspend" : "stop",
3927 (except != NULL
3928 ? target_pid_to_str (ptid_of (get_lwp_thread (except))).c_str ()
3929 : "none"));
87ce2a04 3930
bde24c0a
PA
3931 stopping_threads = (suspend
3932 ? STOPPING_AND_SUSPENDING_THREADS
3933 : STOPPING_THREADS);
7984d532
PA
3934
3935 if (suspend)
df3e4dbe
SM
3936 for_each_thread ([&] (thread_info *thread)
3937 {
3938 suspend_and_send_sigstop (thread, except);
3939 });
7984d532 3940 else
df3e4dbe
SM
3941 for_each_thread ([&] (thread_info *thread)
3942 {
3943 send_sigstop (thread, except);
3944 });
3945
fa96cb38 3946 wait_for_sigstop ();
bde24c0a 3947 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04 3948
c058728c 3949 threads_debug_printf ("setting stopping_threads back to !stopping");
0d62e5e8
DJ
3950}
3951
863d01bd
PA
3952/* Enqueue one signal in the chain of signals which need to be
3953 delivered to this process on next resume. */
3954
3955static void
3956enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3957{
013e3554
TBA
3958 lwp->pending_signals.emplace_back (signal);
3959 if (info == nullptr)
3960 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
863d01bd 3961 else
013e3554 3962 lwp->pending_signals.back ().info = *info;
863d01bd
PA
3963}
3964
df95181f
TBA
3965void
3966linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
fa5308bd 3967{
984a2c04
YQ
3968 struct thread_info *thread = get_lwp_thread (lwp);
3969 struct regcache *regcache = get_thread_regcache (thread, 1);
8ce47547 3970
24583e45 3971 scoped_restore_current_thread restore_thread;
984a2c04 3972
24583e45 3973 switch_to_thread (thread);
7582c77c 3974 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
fa5308bd 3975
a0ff9e1a 3976 for (CORE_ADDR pc : next_pcs)
3b9a79ef 3977 set_single_step_breakpoint (pc, current_ptid);
fa5308bd
AT
3978}
3979
df95181f
TBA
3980int
3981linux_process_target::single_step (lwp_info* lwp)
7fe5e27e
AT
3982{
3983 int step = 0;
3984
b31cdfa6 3985 if (supports_hardware_single_step ())
7fe5e27e
AT
3986 {
3987 step = 1;
3988 }
7582c77c 3989 else if (supports_software_single_step ())
7fe5e27e
AT
3990 {
3991 install_software_single_step_breakpoints (lwp);
3992 step = 0;
3993 }
3994 else
c058728c 3995 threads_debug_printf ("stepping is not implemented on this target");
7fe5e27e
AT
3996
3997 return step;
3998}
3999
35ac8b3e 4000/* The signal can be delivered to the inferior if we are not trying to
5b061e98
YQ
4001 finish a fast tracepoint collect. Since signal can be delivered in
4002 the step-over, the program may go to signal handler and trap again
4003 after return from the signal handler. We can live with the spurious
4004 double traps. */
35ac8b3e
YQ
4005
4006static int
4007lwp_signal_can_be_delivered (struct lwp_info *lwp)
4008{
229d26fc
SM
4009 return (lwp->collecting_fast_tracepoint
4010 == fast_tpoint_collect_result::not_collecting);
35ac8b3e
YQ
4011}
4012
df95181f
TBA
4013void
4014linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4015 int signal, siginfo_t *info)
da6d8c04 4016{
d86d4aaf 4017 struct thread_info *thread = get_lwp_thread (lwp);
82075af2 4018 int ptrace_request;
c06cbd92
YQ
4019 struct process_info *proc = get_thread_process (thread);
4020
4021 /* Note that target description may not be initialised
4022 (proc->tdesc == NULL) at this point because the program hasn't
4023 stopped at the first instruction yet. It means GDBserver skips
4024 the extra traps from the wrapper program (see option --wrapper).
4025 Code in this function that requires register access should be
4026 guarded by proc->tdesc == NULL or something else. */
0d62e5e8 4027
54a0b537 4028 if (lwp->stopped == 0)
0d62e5e8
DJ
4029 return;
4030
183be222 4031 gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
65706a29 4032
229d26fc
SM
4033 fast_tpoint_collect_result fast_tp_collecting
4034 = lwp->collecting_fast_tracepoint;
fa593d66 4035
229d26fc
SM
4036 gdb_assert (!stabilizing_threads
4037 || (fast_tp_collecting
4038 != fast_tpoint_collect_result::not_collecting));
fa593d66 4039
219f2f23
PA
4040 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4041 user used the "jump" command, or "set $pc = foo"). */
c06cbd92 4042 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
219f2f23
PA
4043 {
4044 /* Collecting 'while-stepping' actions doesn't make sense
4045 anymore. */
d86d4aaf 4046 release_while_stepping_state_list (thread);
219f2f23
PA
4047 }
4048
0d62e5e8 4049 /* If we have pending signals or status, and a new signal, enqueue the
35ac8b3e
YQ
4050 signal. Also enqueue the signal if it can't be delivered to the
4051 inferior right now. */
0d62e5e8 4052 if (signal != 0
fa593d66 4053 && (lwp->status_pending_p
013e3554 4054 || !lwp->pending_signals.empty ()
35ac8b3e 4055 || !lwp_signal_can_be_delivered (lwp)))
94610ec4
YQ
4056 {
4057 enqueue_pending_signal (lwp, signal, info);
4058
4059 /* Postpone any pending signal. It was enqueued above. */
4060 signal = 0;
4061 }
0d62e5e8 4062
d50171e4
PA
4063 if (lwp->status_pending_p)
4064 {
c058728c
SM
4065 threads_debug_printf
4066 ("Not resuming lwp %ld (%s, stop %s); has pending status",
4067 lwpid_of (thread), step ? "step" : "continue",
4068 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
4069 return;
4070 }
0d62e5e8 4071
24583e45
TBA
4072 scoped_restore_current_thread restore_thread;
4073 switch_to_thread (thread);
0d62e5e8 4074
0d62e5e8
DJ
4075 /* This bit needs some thinking about. If we get a signal that
4076 we must report while a single-step reinsert is still pending,
4077 we often end up resuming the thread. It might be better to
4078 (ew) allow a stack of pending events; then we could be sure that
4079 the reinsert happened right away and not lose any signals.
4080
4081 Making this stack would also shrink the window in which breakpoints are
54a0b537 4082 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
4083 complete correctness, so it won't solve that problem. It may be
4084 worthwhile just to solve this one, however. */
54a0b537 4085 if (lwp->bp_reinsert != 0)
0d62e5e8 4086 {
c058728c
SM
4087 threads_debug_printf (" pending reinsert at 0x%s",
4088 paddress (lwp->bp_reinsert));
d50171e4 4089
b31cdfa6 4090 if (supports_hardware_single_step ())
d50171e4 4091 {
229d26fc 4092 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
4093 {
4094 if (step == 0)
9986ba08 4095 warning ("BAD - reinserting but not stepping.");
fa593d66 4096 if (lwp->suspended)
9986ba08
PA
4097 warning ("BAD - reinserting and suspended(%d).",
4098 lwp->suspended);
fa593d66 4099 }
d50171e4 4100 }
f79b145d
YQ
4101
4102 step = maybe_hw_step (thread);
0d62e5e8
DJ
4103 }
4104
229d26fc 4105 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
c058728c
SM
4106 threads_debug_printf
4107 ("lwp %ld wants to get out of fast tracepoint jump pad "
4108 "(exit-jump-pad-bkpt)", lwpid_of (thread));
4109
229d26fc 4110 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
fa593d66 4111 {
c058728c
SM
4112 threads_debug_printf
4113 ("lwp %ld wants to get out of fast tracepoint jump pad single-stepping",
4114 lwpid_of (thread));
fa593d66 4115
b31cdfa6 4116 if (supports_hardware_single_step ())
fa593d66
PA
4117 step = 1;
4118 else
38e08fca 4119 {
f34652de 4120 internal_error ("moving out of jump pad single-stepping"
38e08fca
GB
4121 " not implemented on this target");
4122 }
fa593d66
PA
4123 }
4124
219f2f23
PA
4125 /* If we have while-stepping actions in this thread set it stepping.
4126 If we have a signal to deliver, it may or may not be set to
4127 SIG_IGN, we don't know. Assume so, and allow collecting
4128 while-stepping into a signal handler. A possible smart thing to
4129 do would be to set an internal breakpoint at the signal return
4130 address, continue, and carry on catching this while-stepping
4131 action only when that breakpoint is hit. A future
4132 enhancement. */
7fe5e27e 4133 if (thread->while_stepping != NULL)
219f2f23 4134 {
c058728c
SM
4135 threads_debug_printf
4136 ("lwp %ld has a while-stepping action -> forcing step.",
4137 lwpid_of (thread));
7fe5e27e
AT
4138
4139 step = single_step (lwp);
219f2f23
PA
4140 }
4141
bf9ae9d8 4142 if (proc->tdesc != NULL && low_supports_breakpoints ())
0d62e5e8 4143 {
0bfdf32f 4144 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be 4145
bf9ae9d8 4146 lwp->stop_pc = low_get_pc (regcache);
582511be 4147
c058728c
SM
4148 threads_debug_printf (" %s from pc 0x%lx", step ? "step" : "continue",
4149 (long) lwp->stop_pc);
0d62e5e8
DJ
4150 }
4151
35ac8b3e
YQ
4152 /* If we have pending signals, consume one if it can be delivered to
4153 the inferior. */
013e3554 4154 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
0d62e5e8 4155 {
013e3554 4156 const pending_signal &p_sig = lwp->pending_signals.front ();
0d62e5e8 4157
013e3554
TBA
4158 signal = p_sig.signal;
4159 if (p_sig.info.si_signo != 0)
d86d4aaf 4160 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554 4161 &p_sig.info);
32ca6d61 4162
013e3554 4163 lwp->pending_signals.pop_front ();
0d62e5e8
DJ
4164 }
4165
c058728c
SM
4166 threads_debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)",
4167 lwpid_of (thread), step ? "step" : "continue", signal,
4168 lwp->stop_expected ? "expected" : "not expected");
94610ec4 4169
d7599cc0 4170 low_prepare_to_resume (lwp);
aa5ca48f 4171
d86d4aaf 4172 regcache_invalidate_thread (thread);
da6d8c04 4173 errno = 0;
54a0b537 4174 lwp->stepping = step;
82075af2
JS
4175 if (step)
4176 ptrace_request = PTRACE_SINGLESTEP;
4177 else if (gdb_catching_syscalls_p (lwp))
4178 ptrace_request = PTRACE_SYSCALL;
4179 else
4180 ptrace_request = PTRACE_CONT;
4181 ptrace (ptrace_request,
4182 lwpid_of (thread),
b8e1b30e 4183 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
4184 /* Coerce to a uintptr_t first to avoid potential gcc warning
4185 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4186 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 4187
da6d8c04 4188 if (errno)
20471e00
SM
4189 {
4190 int saved_errno = errno;
4191
4192 threads_debug_printf ("ptrace errno = %d (%s)",
4193 saved_errno, strerror (saved_errno));
4194
4195 errno = saved_errno;
4196 perror_with_name ("resuming thread");
4197 }
23f238d3
PA
4198
4199 /* Successfully resumed. Clear state that no longer makes sense,
4200 and mark the LWP as running. Must not do this before resuming
4201 otherwise if that fails other code will be confused. E.g., we'd
4202 later try to stop the LWP and hang forever waiting for a stop
4203 status. Note that we must not throw after this is cleared,
4204 otherwise handle_zombie_lwp_error would get confused. */
4205 lwp->stopped = 0;
4206 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4207}
4208
d7599cc0
TBA
4209void
4210linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4211{
4212 /* Nop. */
4213}
4214
23f238d3
PA
4215/* Called when we try to resume a stopped LWP and that errors out. If
4216 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4217 or about to become), discard the error, clear any pending status
4218 the LWP may have, and return true (we'll collect the exit status
4219 soon enough). Otherwise, return false. */
4220
4221static int
4222check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4223{
4224 struct thread_info *thread = get_lwp_thread (lp);
4225
4226 /* If we get an error after resuming the LWP successfully, we'd
4227 confuse !T state for the LWP being gone. */
4228 gdb_assert (lp->stopped);
4229
4230 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4231 because even if ptrace failed with ESRCH, the tracee may be "not
4232 yet fully dead", but already refusing ptrace requests. In that
4233 case the tracee has 'R (Running)' state for a little bit
4234 (observed in Linux 3.18). See also the note on ESRCH in the
4235 ptrace(2) man page. Instead, check whether the LWP has any state
4236 other than ptrace-stopped. */
4237
4238 /* Don't assume anything if /proc/PID/status can't be read. */
4239 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 4240 {
23f238d3
PA
4241 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4242 lp->status_pending_p = 0;
4243 return 1;
4244 }
4245 return 0;
4246}
4247
df95181f
TBA
4248void
4249linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4250 siginfo_t *info)
23f238d3 4251{
a70b8144 4252 try
23f238d3 4253 {
df95181f 4254 resume_one_lwp_throw (lwp, step, signal, info);
23f238d3 4255 }
230d2906 4256 catch (const gdb_exception_error &ex)
23f238d3 4257 {
20471e00
SM
4258 if (check_ptrace_stopped_lwp_gone (lwp))
4259 {
4260 /* This could because we tried to resume an LWP after its leader
4261 exited. Mark it as resumed, so we can collect an exit event
4262 from it. */
4263 lwp->stopped = 0;
4264 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4265 }
4266 else
eedc3f4f 4267 throw;
3221518c 4268 }
da6d8c04
DJ
4269}
4270
5fdda392
SM
4271/* This function is called once per thread via for_each_thread.
4272 We look up which resume request applies to THREAD and mark it with a
4273 pointer to the appropriate resume request.
5544ad89
DJ
4274
4275 This algorithm is O(threads * resume elements), but resume elements
4276 is small (and will remain small at least until GDB supports thread
4277 suspension). */
ebcf782c 4278
5fdda392
SM
4279static void
4280linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
0d62e5e8 4281{
d86d4aaf 4282 struct lwp_info *lwp = get_thread_lwp (thread);
64386c31 4283
5fdda392 4284 for (int ndx = 0; ndx < n; ndx++)
95954743 4285 {
5fdda392 4286 ptid_t ptid = resume[ndx].thread;
d7e15655 4287 if (ptid == minus_one_ptid
9c80ecd6 4288 || ptid == thread->id
0c9070b3
YQ
4289 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4290 of PID'. */
e99b03dc 4291 || (ptid.pid () == pid_of (thread)
0e998d96 4292 && (ptid.is_pid ()
e38504b3 4293 || ptid.lwp () == -1)))
95954743 4294 {
5fdda392 4295 if (resume[ndx].kind == resume_stop
8336d594 4296 && thread->last_resume_kind == resume_stop)
d50171e4 4297 {
c058728c
SM
4298 threads_debug_printf
4299 ("already %s LWP %ld at GDB's request",
4300 (thread->last_status.kind () == TARGET_WAITKIND_STOPPED
4301 ? "stopped" : "stopping"),
4302 lwpid_of (thread));
d50171e4
PA
4303
4304 continue;
4305 }
4306
5a04c4cf
PA
4307 /* Ignore (wildcard) resume requests for already-resumed
4308 threads. */
5fdda392 4309 if (resume[ndx].kind != resume_stop
5a04c4cf
PA
4310 && thread->last_resume_kind != resume_stop)
4311 {
c058728c
SM
4312 threads_debug_printf
4313 ("already %s LWP %ld at GDB's request",
4314 (thread->last_resume_kind == resume_step
4315 ? "stepping" : "continuing"),
4316 lwpid_of (thread));
5a04c4cf
PA
4317 continue;
4318 }
4319
393a6b59
PA
4320 /* Don't let wildcard resumes resume fork/vfork/clone
4321 children that GDB does not yet know are new children. */
4322 if (lwp->relative != NULL)
5a04c4cf 4323 {
393a6b59 4324 struct lwp_info *rel = lwp->relative;
5a04c4cf
PA
4325
4326 if (rel->status_pending_p
393a6b59 4327 && is_new_child_status (rel->waitstatus.kind ()))
5a04c4cf 4328 {
c058728c
SM
4329 threads_debug_printf
4330 ("not resuming LWP %ld: has queued stop reply",
4331 lwpid_of (thread));
5a04c4cf
PA
4332 continue;
4333 }
4334 }
4335
4336 /* If the thread has a pending event that has already been
4337 reported to GDBserver core, but GDB has not pulled the
4338 event out of the vStopped queue yet, likewise, ignore the
4339 (wildcard) resume request. */
9c80ecd6 4340 if (in_queued_stop_replies (thread->id))
5a04c4cf 4341 {
c058728c
SM
4342 threads_debug_printf
4343 ("not resuming LWP %ld: has queued stop reply",
4344 lwpid_of (thread));
5a04c4cf
PA
4345 continue;
4346 }
4347
5fdda392 4348 lwp->resume = &resume[ndx];
8336d594 4349 thread->last_resume_kind = lwp->resume->kind;
fa593d66 4350
c2d6af84
PA
4351 lwp->step_range_start = lwp->resume->step_range_start;
4352 lwp->step_range_end = lwp->resume->step_range_end;
4353
fa593d66
PA
4354 /* If we had a deferred signal to report, dequeue one now.
4355 This can happen if LWP gets more than one signal while
4356 trying to get out of a jump pad. */
4357 if (lwp->stopped
4358 && !lwp->status_pending_p
4359 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4360 {
4361 lwp->status_pending_p = 1;
4362
c058728c
SM
4363 threads_debug_printf
4364 ("Dequeueing deferred signal %d for LWP %ld, "
4365 "leaving status pending.",
4366 WSTOPSIG (lwp->status_pending),
4367 lwpid_of (thread));
fa593d66
PA
4368 }
4369
5fdda392 4370 return;
95954743
PA
4371 }
4372 }
2bd7c093
PA
4373
4374 /* No resume action for this thread. */
4375 lwp->resume = NULL;
5544ad89
DJ
4376}
4377
df95181f
TBA
4378bool
4379linux_process_target::resume_status_pending (thread_info *thread)
5544ad89 4380{
d86d4aaf 4381 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4382
bd99dc85
PA
4383 /* LWPs which will not be resumed are not interesting, because
4384 we might not wait for them next time through linux_wait. */
2bd7c093 4385 if (lwp->resume == NULL)
25c28b4d 4386 return false;
64386c31 4387
df95181f 4388 return thread_still_has_status_pending (thread);
d50171e4
PA
4389}
4390
df95181f
TBA
4391bool
4392linux_process_target::thread_needs_step_over (thread_info *thread)
d50171e4 4393{
d86d4aaf 4394 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4395 CORE_ADDR pc;
c06cbd92
YQ
4396 struct process_info *proc = get_thread_process (thread);
4397
4398 /* GDBserver is skipping the extra traps from the wrapper program,
4399 don't have to do step over. */
4400 if (proc->tdesc == NULL)
eca55aec 4401 return false;
d50171e4
PA
4402
4403 /* LWPs which will not be resumed are not interesting, because we
4404 might not wait for them next time through linux_wait. */
4405
4406 if (!lwp->stopped)
4407 {
c058728c
SM
4408 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped",
4409 lwpid_of (thread));
eca55aec 4410 return false;
d50171e4
PA
4411 }
4412
8336d594 4413 if (thread->last_resume_kind == resume_stop)
d50171e4 4414 {
c058728c
SM
4415 threads_debug_printf
4416 ("Need step over [LWP %ld]? Ignoring, should remain stopped",
4417 lwpid_of (thread));
eca55aec 4418 return false;
d50171e4
PA
4419 }
4420
7984d532
PA
4421 gdb_assert (lwp->suspended >= 0);
4422
4423 if (lwp->suspended)
4424 {
c058728c
SM
4425 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, suspended",
4426 lwpid_of (thread));
eca55aec 4427 return false;
7984d532
PA
4428 }
4429
bd99dc85 4430 if (lwp->status_pending_p)
d50171e4 4431 {
c058728c
SM
4432 threads_debug_printf
4433 ("Need step over [LWP %ld]? Ignoring, has pending status.",
4434 lwpid_of (thread));
eca55aec 4435 return false;
d50171e4
PA
4436 }
4437
4438 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4439 or we have. */
4440 pc = get_pc (lwp);
4441
4442 /* If the PC has changed since we stopped, then don't do anything,
4443 and let the breakpoint/tracepoint be hit. This happens if, for
4444 instance, GDB handled the decr_pc_after_break subtraction itself,
4445 GDB is OOL stepping this thread, or the user has issued a "jump"
4446 command, or poked thread's registers herself. */
4447 if (pc != lwp->stop_pc)
4448 {
c058728c
SM
4449 threads_debug_printf
4450 ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4451 "Old stop_pc was 0x%s, PC is now 0x%s", lwpid_of (thread),
4452 paddress (lwp->stop_pc), paddress (pc));
eca55aec 4453 return false;
d50171e4
PA
4454 }
4455
484b3c32
YQ
4456 /* On software single step target, resume the inferior with signal
4457 rather than stepping over. */
7582c77c 4458 if (supports_software_single_step ()
013e3554 4459 && !lwp->pending_signals.empty ()
484b3c32
YQ
4460 && lwp_signal_can_be_delivered (lwp))
4461 {
c058728c
SM
4462 threads_debug_printf
4463 ("Need step over [LWP %ld]? Ignoring, has pending signals.",
4464 lwpid_of (thread));
484b3c32 4465
eca55aec 4466 return false;
484b3c32
YQ
4467 }
4468
24583e45
TBA
4469 scoped_restore_current_thread restore_thread;
4470 switch_to_thread (thread);
d50171e4 4471
8b07ae33 4472 /* We can only step over breakpoints we know about. */
fa593d66 4473 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 4474 {
8b07ae33 4475 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
4476 though. If the condition is being evaluated on the target's side
4477 and it evaluate to false, step over this breakpoint as well. */
4478 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
4479 && gdb_condition_true_at_breakpoint (pc)
4480 && gdb_no_commands_at_breakpoint (pc))
8b07ae33 4481 {
c058728c
SM
4482 threads_debug_printf ("Need step over [LWP %ld]? yes, but found"
4483 " GDB breakpoint at 0x%s; skipping step over",
4484 lwpid_of (thread), paddress (pc));
d50171e4 4485
eca55aec 4486 return false;
8b07ae33
PA
4487 }
4488 else
4489 {
c058728c
SM
4490 threads_debug_printf ("Need step over [LWP %ld]? yes, "
4491 "found breakpoint at 0x%s",
4492 lwpid_of (thread), paddress (pc));
d50171e4 4493
8b07ae33 4494 /* We've found an lwp that needs stepping over --- return 1 so
8f86d7aa 4495 that find_thread stops looking. */
eca55aec 4496 return true;
8b07ae33 4497 }
d50171e4
PA
4498 }
4499
c058728c
SM
4500 threads_debug_printf
4501 ("Need step over [LWP %ld]? No, no breakpoint found at 0x%s",
4502 lwpid_of (thread), paddress (pc));
c6ecbae5 4503
eca55aec 4504 return false;
5544ad89
DJ
4505}
4506
d16f3f6c
TBA
4507void
4508linux_process_target::start_step_over (lwp_info *lwp)
d50171e4 4509{
d86d4aaf 4510 struct thread_info *thread = get_lwp_thread (lwp);
d50171e4 4511 CORE_ADDR pc;
d50171e4 4512
c058728c
SM
4513 threads_debug_printf ("Starting step-over on LWP %ld. Stopping all threads",
4514 lwpid_of (thread));
d50171e4 4515
7984d532 4516 stop_all_lwps (1, lwp);
863d01bd
PA
4517
4518 if (lwp->suspended != 0)
4519 {
f34652de 4520 internal_error ("LWP %ld suspended=%d\n", lwpid_of (thread),
863d01bd
PA
4521 lwp->suspended);
4522 }
d50171e4 4523
c058728c 4524 threads_debug_printf ("Done stopping all threads for step-over.");
d50171e4
PA
4525
4526 /* Note, we should always reach here with an already adjusted PC,
4527 either by GDB (if we're resuming due to GDB's request), or by our
4528 caller, if we just finished handling an internal breakpoint GDB
4529 shouldn't care about. */
4530 pc = get_pc (lwp);
4531
24583e45
TBA
4532 bool step = false;
4533 {
4534 scoped_restore_current_thread restore_thread;
4535 switch_to_thread (thread);
d50171e4 4536
24583e45
TBA
4537 lwp->bp_reinsert = pc;
4538 uninsert_breakpoints_at (pc);
4539 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4 4540
24583e45
TBA
4541 step = single_step (lwp);
4542 }
d50171e4 4543
df95181f 4544 resume_one_lwp (lwp, step, 0, NULL);
d50171e4
PA
4545
4546 /* Require next event from this LWP. */
9c80ecd6 4547 step_over_bkpt = thread->id;
d50171e4
PA
4548}
4549
b31cdfa6
TBA
4550bool
4551linux_process_target::finish_step_over (lwp_info *lwp)
d50171e4
PA
4552{
4553 if (lwp->bp_reinsert != 0)
4554 {
24583e45 4555 scoped_restore_current_thread restore_thread;
f79b145d 4556
c058728c 4557 threads_debug_printf ("Finished step over.");
d50171e4 4558
24583e45 4559 switch_to_thread (get_lwp_thread (lwp));
f79b145d 4560
d50171e4
PA
4561 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4562 may be no breakpoint to reinsert there by now. */
4563 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4564 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4565
4566 lwp->bp_reinsert = 0;
4567
3b9a79ef
YQ
4568 /* Delete any single-step breakpoints. No longer needed. We
4569 don't have to worry about other threads hitting this trap,
4570 and later not being able to explain it, because we were
4571 stepping over a breakpoint, and we hold all threads but
4572 LWP stopped while doing that. */
b31cdfa6 4573 if (!supports_hardware_single_step ())
f79b145d 4574 {
3b9a79ef
YQ
4575 gdb_assert (has_single_step_breakpoints (current_thread));
4576 delete_single_step_breakpoints (current_thread);
f79b145d 4577 }
d50171e4
PA
4578
4579 step_over_bkpt = null_ptid;
b31cdfa6 4580 return true;
d50171e4
PA
4581 }
4582 else
b31cdfa6 4583 return false;
d50171e4
PA
4584}
4585
d16f3f6c
TBA
4586void
4587linux_process_target::complete_ongoing_step_over ()
863d01bd 4588{
d7e15655 4589 if (step_over_bkpt != null_ptid)
863d01bd
PA
4590 {
4591 struct lwp_info *lwp;
4592 int wstat;
4593 int ret;
4594
c058728c 4595 threads_debug_printf ("detach: step over in progress, finish it first");
863d01bd
PA
4596
4597 /* Passing NULL_PTID as filter indicates we want all events to
4598 be left pending. Eventually this returns when there are no
4599 unwaited-for children left. */
d16f3f6c
TBA
4600 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4601 __WALL);
863d01bd
PA
4602 gdb_assert (ret == -1);
4603
4604 lwp = find_lwp_pid (step_over_bkpt);
4605 if (lwp != NULL)
7e9cf1fe
PA
4606 {
4607 finish_step_over (lwp);
4608
4609 /* If we got our step SIGTRAP, don't leave it pending,
4610 otherwise we would report it to GDB as a spurious
4611 SIGTRAP. */
4612 gdb_assert (lwp->status_pending_p);
4613 if (WIFSTOPPED (lwp->status_pending)
4614 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4615 {
4616 thread_info *thread = get_lwp_thread (lwp);
4617 if (thread->last_resume_kind != resume_step)
4618 {
c058728c 4619 threads_debug_printf ("detach: discard step-over SIGTRAP");
7e9cf1fe
PA
4620
4621 lwp->status_pending_p = 0;
4622 lwp->status_pending = 0;
4623 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4624 }
4625 else
c058728c
SM
4626 threads_debug_printf
4627 ("detach: resume_step, not discarding step-over SIGTRAP");
7e9cf1fe
PA
4628 }
4629 }
863d01bd
PA
4630 step_over_bkpt = null_ptid;
4631 unsuspend_all_lwps (lwp);
4632 }
4633}
4634
df95181f
TBA
4635void
4636linux_process_target::resume_one_thread (thread_info *thread,
4637 bool leave_all_stopped)
5544ad89 4638{
d86d4aaf 4639 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4640 int leave_pending;
5544ad89 4641
2bd7c093 4642 if (lwp->resume == NULL)
c80825ff 4643 return;
5544ad89 4644
bd99dc85 4645 if (lwp->resume->kind == resume_stop)
5544ad89 4646 {
c058728c
SM
4647 threads_debug_printf ("resume_stop request for LWP %ld",
4648 lwpid_of (thread));
bd99dc85
PA
4649
4650 if (!lwp->stopped)
4651 {
c058728c 4652 threads_debug_printf ("stopping LWP %ld", lwpid_of (thread));
bd99dc85 4653
d50171e4
PA
4654 /* Stop the thread, and wait for the event asynchronously,
4655 through the event loop. */
02fc4de7 4656 send_sigstop (lwp);
bd99dc85
PA
4657 }
4658 else
4659 {
c058728c 4660 threads_debug_printf ("already stopped LWP %ld", lwpid_of (thread));
d50171e4
PA
4661
4662 /* The LWP may have been stopped in an internal event that
4663 was not meant to be notified back to GDB (e.g., gdbserver
4664 breakpoint), so we should be reporting a stop event in
4665 this case too. */
4666
4667 /* If the thread already has a pending SIGSTOP, this is a
4668 no-op. Otherwise, something later will presumably resume
4669 the thread and this will cause it to cancel any pending
4670 operation, due to last_resume_kind == resume_stop. If
4671 the thread already has a pending status to report, we
4672 will still report it the next time we wait - see
4673 status_pending_p_callback. */
1a981360
PA
4674
4675 /* If we already have a pending signal to report, then
4676 there's no need to queue a SIGSTOP, as this means we're
4677 midway through moving the LWP out of the jumppad, and we
4678 will report the pending signal as soon as that is
4679 finished. */
013e3554 4680 if (lwp->pending_signals_to_report.empty ())
1a981360 4681 send_sigstop (lwp);
bd99dc85 4682 }
32ca6d61 4683
bd99dc85
PA
4684 /* For stop requests, we're done. */
4685 lwp->resume = NULL;
183be222 4686 thread->last_status.set_ignore ();
c80825ff 4687 return;
5544ad89
DJ
4688 }
4689
bd99dc85 4690 /* If this thread which is about to be resumed has a pending status,
863d01bd
PA
4691 then don't resume it - we can just report the pending status.
4692 Likewise if it is suspended, because e.g., another thread is
4693 stepping past a breakpoint. Make sure to queue any signals that
4694 would otherwise be sent. In all-stop mode, we do this decision
4695 based on if *any* thread has a pending status. If there's a
4696 thread that needs the step-over-breakpoint dance, then don't
4697 resume any other thread but that particular one. */
4698 leave_pending = (lwp->suspended
4699 || lwp->status_pending_p
4700 || leave_all_stopped);
5544ad89 4701
0e9a339e
YQ
4702 /* If we have a new signal, enqueue the signal. */
4703 if (lwp->resume->sig != 0)
4704 {
4705 siginfo_t info, *info_p;
4706
4707 /* If this is the same signal we were previously stopped by,
4708 make sure to queue its siginfo. */
4709 if (WIFSTOPPED (lwp->last_status)
4710 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4711 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4712 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4713 info_p = &info;
4714 else
4715 info_p = NULL;
4716
4717 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4718 }
4719
d50171e4 4720 if (!leave_pending)
bd99dc85 4721 {
c058728c 4722 threads_debug_printf ("resuming LWP %ld", lwpid_of (thread));
5544ad89 4723
9c80ecd6 4724 proceed_one_lwp (thread, NULL);
bd99dc85
PA
4725 }
4726 else
c058728c 4727 threads_debug_printf ("leaving LWP %ld stopped", lwpid_of (thread));
5544ad89 4728
183be222 4729 thread->last_status.set_ignore ();
bd99dc85 4730 lwp->resume = NULL;
0d62e5e8
DJ
4731}
4732
0e4d7e35
TBA
4733void
4734linux_process_target::resume (thread_resume *resume_info, size_t n)
0d62e5e8 4735{
d86d4aaf 4736 struct thread_info *need_step_over = NULL;
c6ecbae5 4737
c058728c 4738 THREADS_SCOPED_DEBUG_ENTER_EXIT;
87ce2a04 4739
5fdda392
SM
4740 for_each_thread ([&] (thread_info *thread)
4741 {
4742 linux_set_resume_request (thread, resume_info, n);
4743 });
5544ad89 4744
d50171e4
PA
4745 /* If there is a thread which would otherwise be resumed, which has
4746 a pending status, then don't resume any threads - we can just
4747 report the pending status. Make sure to queue any signals that
4748 would otherwise be sent. In non-stop mode, we'll apply this
4749 logic to each thread individually. We consume all pending events
4750 before considering to start a step-over (in all-stop). */
25c28b4d 4751 bool any_pending = false;
bd99dc85 4752 if (!non_stop)
df95181f
TBA
4753 any_pending = find_thread ([this] (thread_info *thread)
4754 {
4755 return resume_status_pending (thread);
4756 }) != nullptr;
d50171e4
PA
4757
4758 /* If there is a thread which would otherwise be resumed, which is
4759 stopped at a breakpoint that needs stepping over, then don't
4760 resume any threads - have it step over the breakpoint with all
4761 other threads stopped, then resume all threads again. Make sure
4762 to queue any signals that would otherwise be delivered or
4763 queued. */
bf9ae9d8 4764 if (!any_pending && low_supports_breakpoints ())
df95181f
TBA
4765 need_step_over = find_thread ([this] (thread_info *thread)
4766 {
4767 return thread_needs_step_over (thread);
4768 });
d50171e4 4769
c80825ff 4770 bool leave_all_stopped = (need_step_over != NULL || any_pending);
d50171e4 4771
c058728c
SM
4772 if (need_step_over != NULL)
4773 threads_debug_printf ("Not resuming all, need step over");
4774 else if (any_pending)
4775 threads_debug_printf ("Not resuming, all-stop and found "
4776 "an LWP with pending status");
4777 else
4778 threads_debug_printf ("Resuming, no pending status or step over needed");
d50171e4
PA
4779
4780 /* Even if we're leaving threads stopped, queue all signals we'd
4781 otherwise deliver. */
c80825ff
SM
4782 for_each_thread ([&] (thread_info *thread)
4783 {
df95181f 4784 resume_one_thread (thread, leave_all_stopped);
c80825ff 4785 });
d50171e4
PA
4786
4787 if (need_step_over)
d86d4aaf 4788 start_step_over (get_thread_lwp (need_step_over));
87ce2a04 4789
1bebeeca
PA
4790 /* We may have events that were pending that can/should be sent to
4791 the client now. Trigger a linux_wait call. */
4792 if (target_is_async_p ())
4793 async_file_mark ();
d50171e4
PA
4794}
4795
df95181f
TBA
4796void
4797linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
d50171e4 4798{
d86d4aaf 4799 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4800 int step;
4801
7984d532 4802 if (lwp == except)
e2b44075 4803 return;
d50171e4 4804
c058728c 4805 threads_debug_printf ("lwp %ld", lwpid_of (thread));
d50171e4
PA
4806
4807 if (!lwp->stopped)
4808 {
c058728c 4809 threads_debug_printf (" LWP %ld already running", lwpid_of (thread));
e2b44075 4810 return;
d50171e4
PA
4811 }
4812
02fc4de7 4813 if (thread->last_resume_kind == resume_stop
183be222 4814 && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
d50171e4 4815 {
c058728c
SM
4816 threads_debug_printf (" client wants LWP to remain %ld stopped",
4817 lwpid_of (thread));
e2b44075 4818 return;
d50171e4
PA
4819 }
4820
4821 if (lwp->status_pending_p)
4822 {
c058728c
SM
4823 threads_debug_printf (" LWP %ld has pending status, leaving stopped",
4824 lwpid_of (thread));
e2b44075 4825 return;
d50171e4
PA
4826 }
4827
7984d532
PA
4828 gdb_assert (lwp->suspended >= 0);
4829
d50171e4
PA
4830 if (lwp->suspended)
4831 {
c058728c 4832 threads_debug_printf (" LWP %ld is suspended", lwpid_of (thread));
e2b44075 4833 return;
d50171e4
PA
4834 }
4835
1a981360 4836 if (thread->last_resume_kind == resume_stop
013e3554 4837 && lwp->pending_signals_to_report.empty ()
229d26fc
SM
4838 && (lwp->collecting_fast_tracepoint
4839 == fast_tpoint_collect_result::not_collecting))
02fc4de7
PA
4840 {
4841 /* We haven't reported this LWP as stopped yet (otherwise, the
4842 last_status.kind check above would catch it, and we wouldn't
4843 reach here. This LWP may have been momentarily paused by a
4844 stop_all_lwps call while handling for example, another LWP's
4845 step-over. In that case, the pending expected SIGSTOP signal
4846 that was queued at vCont;t handling time will have already
4847 been consumed by wait_for_sigstop, and so we need to requeue
4848 another one here. Note that if the LWP already has a SIGSTOP
4849 pending, this is a no-op. */
4850
c058728c
SM
4851 threads_debug_printf
4852 ("Client wants LWP %ld to stop. Making sure it has a SIGSTOP pending",
4853 lwpid_of (thread));
02fc4de7
PA
4854
4855 send_sigstop (lwp);
4856 }
4857
863d01bd
PA
4858 if (thread->last_resume_kind == resume_step)
4859 {
c058728c
SM
4860 threads_debug_printf (" stepping LWP %ld, client wants it stepping",
4861 lwpid_of (thread));
8901d193 4862
3b9a79ef 4863 /* If resume_step is requested by GDB, install single-step
8901d193 4864 breakpoints when the thread is about to be actually resumed if
3b9a79ef 4865 the single-step breakpoints weren't removed. */
7582c77c 4866 if (supports_software_single_step ()
3b9a79ef 4867 && !has_single_step_breakpoints (thread))
8901d193
YQ
4868 install_software_single_step_breakpoints (lwp);
4869
4870 step = maybe_hw_step (thread);
863d01bd
PA
4871 }
4872 else if (lwp->bp_reinsert != 0)
4873 {
c058728c
SM
4874 threads_debug_printf (" stepping LWP %ld, reinsert set",
4875 lwpid_of (thread));
f79b145d
YQ
4876
4877 step = maybe_hw_step (thread);
863d01bd
PA
4878 }
4879 else
4880 step = 0;
4881
df95181f 4882 resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
4883}
4884
df95181f
TBA
4885void
4886linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4887 lwp_info *except)
7984d532 4888{
d86d4aaf 4889 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
4890
4891 if (lwp == except)
e2b44075 4892 return;
7984d532 4893
863d01bd 4894 lwp_suspended_decr (lwp);
7984d532 4895
e2b44075 4896 proceed_one_lwp (thread, except);
d50171e4
PA
4897}
4898
d16f3f6c
TBA
4899void
4900linux_process_target::proceed_all_lwps ()
d50171e4 4901{
d86d4aaf 4902 struct thread_info *need_step_over;
d50171e4
PA
4903
4904 /* If there is a thread which would otherwise be resumed, which is
4905 stopped at a breakpoint that needs stepping over, then don't
4906 resume any threads - have it step over the breakpoint with all
4907 other threads stopped, then resume all threads again. */
4908
bf9ae9d8 4909 if (low_supports_breakpoints ())
d50171e4 4910 {
df95181f
TBA
4911 need_step_over = find_thread ([this] (thread_info *thread)
4912 {
4913 return thread_needs_step_over (thread);
4914 });
d50171e4
PA
4915
4916 if (need_step_over != NULL)
4917 {
c058728c
SM
4918 threads_debug_printf ("found thread %ld needing a step-over",
4919 lwpid_of (need_step_over));
d50171e4 4920
d86d4aaf 4921 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
4922 return;
4923 }
4924 }
5544ad89 4925
c058728c 4926 threads_debug_printf ("Proceeding, no step-over needed");
d50171e4 4927
df95181f 4928 for_each_thread ([this] (thread_info *thread)
e2b44075
SM
4929 {
4930 proceed_one_lwp (thread, NULL);
4931 });
d50171e4
PA
4932}
4933
d16f3f6c
TBA
4934void
4935linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
d50171e4 4936{
c058728c
SM
4937 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4938
4939 if (except)
4940 threads_debug_printf ("except=(LWP %ld)",
4941 lwpid_of (get_lwp_thread (except)));
4942 else
4943 threads_debug_printf ("except=nullptr");
5544ad89 4944
7984d532 4945 if (unsuspend)
e2b44075
SM
4946 for_each_thread ([&] (thread_info *thread)
4947 {
4948 unsuspend_and_proceed_one_lwp (thread, except);
4949 });
7984d532 4950 else
e2b44075
SM
4951 for_each_thread ([&] (thread_info *thread)
4952 {
4953 proceed_one_lwp (thread, except);
4954 });
0d62e5e8
DJ
4955}
4956
58caa3dc
DJ
4957
4958#ifdef HAVE_LINUX_REGSETS
4959
1faeff08
MR
4960#define use_linux_regsets 1
4961
030031ee
PA
4962/* Returns true if REGSET has been disabled. */
4963
4964static int
4965regset_disabled (struct regsets_info *info, struct regset_info *regset)
4966{
4967 return (info->disabled_regsets != NULL
4968 && info->disabled_regsets[regset - info->regsets]);
4969}
4970
4971/* Disable REGSET. */
4972
4973static void
4974disable_regset (struct regsets_info *info, struct regset_info *regset)
4975{
4976 int dr_offset;
4977
4978 dr_offset = regset - info->regsets;
4979 if (info->disabled_regsets == NULL)
224c3ddb 4980 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
030031ee
PA
4981 info->disabled_regsets[dr_offset] = 1;
4982}
4983
58caa3dc 4984static int
3aee8918
PA
4985regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4986 struct regcache *regcache)
58caa3dc
DJ
4987{
4988 struct regset_info *regset;
e9d25b98 4989 int saw_general_regs = 0;
95954743 4990 int pid;
1570b33e 4991 struct iovec iov;
58caa3dc 4992
0bfdf32f 4993 pid = lwpid_of (current_thread);
28eef672 4994 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 4995 {
1570b33e
L
4996 void *buf, *data;
4997 int nt_type, res;
58caa3dc 4998
030031ee 4999 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 5000 continue;
58caa3dc 5001
bca929d3 5002 buf = xmalloc (regset->size);
1570b33e
L
5003
5004 nt_type = regset->nt_type;
5005 if (nt_type)
5006 {
5007 iov.iov_base = buf;
5008 iov.iov_len = regset->size;
5009 data = (void *) &iov;
5010 }
5011 else
5012 data = buf;
5013
dfb64f85 5014#ifndef __sparc__
f15f9948 5015 res = ptrace (regset->get_request, pid,
b8e1b30e 5016 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5017#else
1570b33e 5018 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5019#endif
58caa3dc
DJ
5020 if (res < 0)
5021 {
1ef53e6b
AH
5022 if (errno == EIO
5023 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5024 {
1ef53e6b
AH
5025 /* If we get EIO on a regset, or an EINVAL and the regset is
5026 optional, do not try it again for this process mode. */
030031ee 5027 disable_regset (regsets_info, regset);
58caa3dc 5028 }
e5a9158d
AA
5029 else if (errno == ENODATA)
5030 {
5031 /* ENODATA may be returned if the regset is currently
5032 not "active". This can happen in normal operation,
5033 so suppress the warning in this case. */
5034 }
fcd4a73d
YQ
5035 else if (errno == ESRCH)
5036 {
5037 /* At this point, ESRCH should mean the process is
5038 already gone, in which case we simply ignore attempts
5039 to read its registers. */
5040 }
58caa3dc
DJ
5041 else
5042 {
0d62e5e8 5043 char s[256];
95954743
PA
5044 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5045 pid);
0d62e5e8 5046 perror (s);
58caa3dc
DJ
5047 }
5048 }
098dbe61
AA
5049 else
5050 {
5051 if (regset->type == GENERAL_REGS)
5052 saw_general_regs = 1;
5053 regset->store_function (regcache, buf);
5054 }
fdeb2a12 5055 free (buf);
58caa3dc 5056 }
e9d25b98
DJ
5057 if (saw_general_regs)
5058 return 0;
5059 else
5060 return 1;
58caa3dc
DJ
5061}
5062
5063static int
3aee8918
PA
5064regsets_store_inferior_registers (struct regsets_info *regsets_info,
5065 struct regcache *regcache)
58caa3dc
DJ
5066{
5067 struct regset_info *regset;
e9d25b98 5068 int saw_general_regs = 0;
95954743 5069 int pid;
1570b33e 5070 struct iovec iov;
58caa3dc 5071
0bfdf32f 5072 pid = lwpid_of (current_thread);
28eef672 5073 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5074 {
1570b33e
L
5075 void *buf, *data;
5076 int nt_type, res;
58caa3dc 5077
feea5f36
AA
5078 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5079 || regset->fill_function == NULL)
28eef672 5080 continue;
58caa3dc 5081
bca929d3 5082 buf = xmalloc (regset->size);
545587ee
DJ
5083
5084 /* First fill the buffer with the current register set contents,
5085 in case there are any items in the kernel's regset that are
5086 not in gdbserver's regcache. */
1570b33e
L
5087
5088 nt_type = regset->nt_type;
5089 if (nt_type)
5090 {
5091 iov.iov_base = buf;
5092 iov.iov_len = regset->size;
5093 data = (void *) &iov;
5094 }
5095 else
5096 data = buf;
5097
dfb64f85 5098#ifndef __sparc__
f15f9948 5099 res = ptrace (regset->get_request, pid,
b8e1b30e 5100 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5101#else
689cc2ae 5102 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5103#endif
545587ee
DJ
5104
5105 if (res == 0)
5106 {
5107 /* Then overlay our cached registers on that. */
442ea881 5108 regset->fill_function (regcache, buf);
545587ee
DJ
5109
5110 /* Only now do we write the register set. */
dfb64f85 5111#ifndef __sparc__
f15f9948 5112 res = ptrace (regset->set_request, pid,
b8e1b30e 5113 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5114#else
1570b33e 5115 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 5116#endif
545587ee
DJ
5117 }
5118
58caa3dc
DJ
5119 if (res < 0)
5120 {
1ef53e6b
AH
5121 if (errno == EIO
5122 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5123 {
1ef53e6b
AH
5124 /* If we get EIO on a regset, or an EINVAL and the regset is
5125 optional, do not try it again for this process mode. */
030031ee 5126 disable_regset (regsets_info, regset);
58caa3dc 5127 }
3221518c
UW
5128 else if (errno == ESRCH)
5129 {
1b3f6016
PA
5130 /* At this point, ESRCH should mean the process is
5131 already gone, in which case we simply ignore attempts
5132 to change its registers. See also the related
df95181f 5133 comment in resume_one_lwp. */
fdeb2a12 5134 free (buf);
3221518c
UW
5135 return 0;
5136 }
58caa3dc
DJ
5137 else
5138 {
ce3a066d 5139 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
5140 }
5141 }
e9d25b98
DJ
5142 else if (regset->type == GENERAL_REGS)
5143 saw_general_regs = 1;
09ec9b38 5144 free (buf);
58caa3dc 5145 }
e9d25b98
DJ
5146 if (saw_general_regs)
5147 return 0;
5148 else
5149 return 1;
58caa3dc
DJ
5150}
5151
1faeff08 5152#else /* !HAVE_LINUX_REGSETS */
58caa3dc 5153
1faeff08 5154#define use_linux_regsets 0
3aee8918
PA
5155#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5156#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 5157
58caa3dc 5158#endif
1faeff08
MR
5159
5160/* Return 1 if register REGNO is supported by one of the regset ptrace
5161 calls or 0 if it has to be transferred individually. */
5162
5163static int
3aee8918 5164linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
5165{
5166 unsigned char mask = 1 << (regno % 8);
5167 size_t index = regno / 8;
5168
5169 return (use_linux_regsets
3aee8918
PA
5170 && (regs_info->regset_bitmap == NULL
5171 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
5172}
5173
58caa3dc 5174#ifdef HAVE_LINUX_USRREGS
1faeff08 5175
5b3da067 5176static int
3aee8918 5177register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
5178{
5179 int addr;
5180
3aee8918 5181 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
5182 error ("Invalid register number %d.", regnum);
5183
3aee8918 5184 addr = usrregs->regmap[regnum];
1faeff08
MR
5185
5186 return addr;
5187}
5188
daca57a7
TBA
5189
5190void
5191linux_process_target::fetch_register (const usrregs_info *usrregs,
5192 regcache *regcache, int regno)
1faeff08
MR
5193{
5194 CORE_ADDR regaddr;
5195 int i, size;
5196 char *buf;
5197 int pid;
5198
3aee8918 5199 if (regno >= usrregs->num_regs)
1faeff08 5200 return;
daca57a7 5201 if (low_cannot_fetch_register (regno))
1faeff08
MR
5202 return;
5203
3aee8918 5204 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5205 if (regaddr == -1)
5206 return;
5207
3aee8918
PA
5208 size = ((register_size (regcache->tdesc, regno)
5209 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5210 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5211 buf = (char *) alloca (size);
1faeff08 5212
0bfdf32f 5213 pid = lwpid_of (current_thread);
1faeff08
MR
5214 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5215 {
5216 errno = 0;
5217 *(PTRACE_XFER_TYPE *) (buf + i) =
5218 ptrace (PTRACE_PEEKUSER, pid,
5219 /* Coerce to a uintptr_t first to avoid potential gcc warning
5220 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5221 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
5222 regaddr += sizeof (PTRACE_XFER_TYPE);
5223 if (errno != 0)
9a70f35c
YQ
5224 {
5225 /* Mark register REGNO unavailable. */
5226 supply_register (regcache, regno, NULL);
5227 return;
5228 }
1faeff08
MR
5229 }
5230
b35db733 5231 low_supply_ptrace_register (regcache, regno, buf);
1faeff08
MR
5232}
5233
daca57a7
TBA
5234void
5235linux_process_target::store_register (const usrregs_info *usrregs,
5236 regcache *regcache, int regno)
1faeff08
MR
5237{
5238 CORE_ADDR regaddr;
5239 int i, size;
5240 char *buf;
5241 int pid;
5242
3aee8918 5243 if (regno >= usrregs->num_regs)
1faeff08 5244 return;
daca57a7 5245 if (low_cannot_store_register (regno))
1faeff08
MR
5246 return;
5247
3aee8918 5248 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5249 if (regaddr == -1)
5250 return;
5251
3aee8918
PA
5252 size = ((register_size (regcache->tdesc, regno)
5253 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5254 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5255 buf = (char *) alloca (size);
1faeff08
MR
5256 memset (buf, 0, size);
5257
b35db733 5258 low_collect_ptrace_register (regcache, regno, buf);
1faeff08 5259
0bfdf32f 5260 pid = lwpid_of (current_thread);
1faeff08
MR
5261 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5262 {
5263 errno = 0;
5264 ptrace (PTRACE_POKEUSER, pid,
5265 /* Coerce to a uintptr_t first to avoid potential gcc warning
5266 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5267 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5268 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
5269 if (errno != 0)
5270 {
5271 /* At this point, ESRCH should mean the process is
5272 already gone, in which case we simply ignore attempts
5273 to change its registers. See also the related
df95181f 5274 comment in resume_one_lwp. */
1faeff08
MR
5275 if (errno == ESRCH)
5276 return;
5277
daca57a7
TBA
5278
5279 if (!low_cannot_store_register (regno))
6d91ce9a 5280 error ("writing register %d: %s", regno, safe_strerror (errno));
1faeff08
MR
5281 }
5282 regaddr += sizeof (PTRACE_XFER_TYPE);
5283 }
5284}
daca57a7 5285#endif /* HAVE_LINUX_USRREGS */
1faeff08 5286
b35db733
TBA
5287void
5288linux_process_target::low_collect_ptrace_register (regcache *regcache,
5289 int regno, char *buf)
5290{
5291 collect_register (regcache, regno, buf);
5292}
5293
5294void
5295linux_process_target::low_supply_ptrace_register (regcache *regcache,
5296 int regno, const char *buf)
5297{
5298 supply_register (regcache, regno, buf);
5299}
5300
daca57a7
TBA
5301void
5302linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5303 regcache *regcache,
5304 int regno, int all)
1faeff08 5305{
daca57a7 5306#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5307 struct usrregs_info *usr = regs_info->usrregs;
5308
1faeff08
MR
5309 if (regno == -1)
5310 {
3aee8918
PA
5311 for (regno = 0; regno < usr->num_regs; regno++)
5312 if (all || !linux_register_in_regsets (regs_info, regno))
5313 fetch_register (usr, regcache, regno);
1faeff08
MR
5314 }
5315 else
3aee8918 5316 fetch_register (usr, regcache, regno);
daca57a7 5317#endif
1faeff08
MR
5318}
5319
daca57a7
TBA
5320void
5321linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5322 regcache *regcache,
5323 int regno, int all)
1faeff08 5324{
daca57a7 5325#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5326 struct usrregs_info *usr = regs_info->usrregs;
5327
1faeff08
MR
5328 if (regno == -1)
5329 {
3aee8918
PA
5330 for (regno = 0; regno < usr->num_regs; regno++)
5331 if (all || !linux_register_in_regsets (regs_info, regno))
5332 store_register (usr, regcache, regno);
1faeff08
MR
5333 }
5334 else
3aee8918 5335 store_register (usr, regcache, regno);
58caa3dc 5336#endif
daca57a7 5337}
1faeff08 5338
a5a4d4cd
TBA
5339void
5340linux_process_target::fetch_registers (regcache *regcache, int regno)
1faeff08
MR
5341{
5342 int use_regsets;
5343 int all = 0;
aa8d21c9 5344 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5345
5346 if (regno == -1)
5347 {
bd70b1f2 5348 if (regs_info->usrregs != NULL)
3aee8918 5349 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
bd70b1f2 5350 low_fetch_register (regcache, regno);
c14dfd32 5351
3aee8918
PA
5352 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5353 if (regs_info->usrregs != NULL)
5354 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
5355 }
5356 else
5357 {
bd70b1f2 5358 if (low_fetch_register (regcache, regno))
c14dfd32
PA
5359 return;
5360
3aee8918 5361 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5362 if (use_regsets)
3aee8918
PA
5363 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5364 regcache);
5365 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5366 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5367 }
58caa3dc
DJ
5368}
5369
a5a4d4cd
TBA
5370void
5371linux_process_target::store_registers (regcache *regcache, int regno)
58caa3dc 5372{
1faeff08
MR
5373 int use_regsets;
5374 int all = 0;
aa8d21c9 5375 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5376
5377 if (regno == -1)
5378 {
3aee8918
PA
5379 all = regsets_store_inferior_registers (regs_info->regsets_info,
5380 regcache);
5381 if (regs_info->usrregs != NULL)
5382 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
5383 }
5384 else
5385 {
3aee8918 5386 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5387 if (use_regsets)
3aee8918
PA
5388 all = regsets_store_inferior_registers (regs_info->regsets_info,
5389 regcache);
5390 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5391 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5392 }
58caa3dc
DJ
5393}
5394
bd70b1f2
TBA
5395bool
5396linux_process_target::low_fetch_register (regcache *regcache, int regno)
5397{
5398 return false;
5399}
da6d8c04 5400
e2558df3 5401/* A wrapper for the read_memory target op. */
da6d8c04 5402
c3e735a6 5403static int
f450004a 5404linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
e2558df3 5405{
52405d85 5406 return the_target->read_memory (memaddr, myaddr, len);
e2558df3
TBA
5407}
5408
e2558df3 5409
421490af
PA
5410/* Helper for read_memory/write_memory using /proc/PID/mem. Because
5411 we can use a single read/write call, this can be much more
5412 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
5413 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running threads.
5414 One an only one of READBUF and WRITEBUF is non-null. If READBUF is
5415 not null, then we're reading, otherwise we're writing. */
5416
5417static int
5418proc_xfer_memory (CORE_ADDR memaddr, unsigned char *readbuf,
5419 const gdb_byte *writebuf, int len)
da6d8c04 5420{
421490af 5421 gdb_assert ((readbuf == nullptr) != (writebuf == nullptr));
fd462a61 5422
421490af
PA
5423 process_info *proc = current_process ();
5424
5425 int fd = proc->priv->mem_fd;
5426 if (fd == -1)
5427 return EIO;
5428
5429 while (len > 0)
fd462a61 5430 {
4934b29e
MR
5431 int bytes;
5432
31a56a22
PA
5433 /* Use pread64/pwrite64 if available, since they save a syscall
5434 and can handle 64-bit offsets even on 32-bit platforms (for
5435 instance, SPARC debugging a SPARC64 application). But only
5436 use them if the offset isn't so high that when cast to off_t
5437 it'd be negative, as seen on SPARC64. pread64/pwrite64
5438 outright reject such offsets. lseek does not. */
fd462a61 5439#ifdef HAVE_PREAD64
31a56a22 5440 if ((off_t) memaddr >= 0)
421490af 5441 bytes = (readbuf != nullptr
31a56a22
PA
5442 ? pread64 (fd, readbuf, len, memaddr)
5443 : pwrite64 (fd, writebuf, len, memaddr));
5444 else
fd462a61 5445#endif
31a56a22
PA
5446 {
5447 bytes = -1;
5448 if (lseek (fd, memaddr, SEEK_SET) != -1)
5449 bytes = (readbuf != nullptr
5450 ? read (fd, readbuf, len)
5451 : write (fd, writebuf, len));
5452 }
fd462a61 5453
421490af
PA
5454 if (bytes < 0)
5455 return errno;
5456 else if (bytes == 0)
4934b29e 5457 {
421490af
PA
5458 /* EOF means the address space is gone, the whole process
5459 exited or execed. */
5460 return EIO;
4934b29e 5461 }
da6d8c04 5462
421490af
PA
5463 memaddr += bytes;
5464 if (readbuf != nullptr)
5465 readbuf += bytes;
5466 else
5467 writebuf += bytes;
5468 len -= bytes;
da6d8c04
DJ
5469 }
5470
421490af
PA
5471 return 0;
5472}
c3e735a6 5473
421490af
PA
5474int
5475linux_process_target::read_memory (CORE_ADDR memaddr,
5476 unsigned char *myaddr, int len)
5477{
5478 return proc_xfer_memory (memaddr, myaddr, nullptr, len);
da6d8c04
DJ
5479}
5480
93ae6fdc
PA
5481/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5482 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 5483 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 5484
e2558df3
TBA
5485int
5486linux_process_target::write_memory (CORE_ADDR memaddr,
5487 const unsigned char *myaddr, int len)
da6d8c04 5488{
0d62e5e8
DJ
5489 if (debug_threads)
5490 {
58d6951d 5491 /* Dump up to four bytes. */
bf47e248
PA
5492 char str[4 * 2 + 1];
5493 char *p = str;
5494 int dump = len < 4 ? len : 4;
5495
421490af 5496 for (int i = 0; i < dump; i++)
bf47e248
PA
5497 {
5498 sprintf (p, "%02x", myaddr[i]);
5499 p += 2;
5500 }
5501 *p = '\0';
5502
c058728c 5503 threads_debug_printf ("Writing %s to 0x%08lx in process %d",
421490af 5504 str, (long) memaddr, current_process ()->pid);
0d62e5e8
DJ
5505 }
5506
421490af 5507 return proc_xfer_memory (memaddr, nullptr, myaddr, len);
da6d8c04 5508}
2f2893d9 5509
2a31c7aa
TBA
5510void
5511linux_process_target::look_up_symbols ()
2f2893d9 5512{
0d62e5e8 5513#ifdef USE_THREAD_DB
95954743
PA
5514 struct process_info *proc = current_process ();
5515
fe978cb0 5516 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5517 return;
5518
9b4c5f87 5519 thread_db_init ();
0d62e5e8
DJ
5520#endif
5521}
5522
eb497a2a
TBA
5523void
5524linux_process_target::request_interrupt ()
e5379b03 5525{
78708b7c
PA
5526 /* Send a SIGINT to the process group. This acts just like the user
5527 typed a ^C on the controlling terminal. */
4c35c4c6
TV
5528 int res = ::kill (-signal_pid, SIGINT);
5529 if (res == -1)
5530 warning (_("Sending SIGINT to process group of pid %ld failed: %s"),
5531 signal_pid, safe_strerror (errno));
e5379b03
DJ
5532}
5533
eac215cc
TBA
5534bool
5535linux_process_target::supports_read_auxv ()
5536{
5537 return true;
5538}
5539
aa691b87
RM
5540/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5541 to debugger memory starting at MYADDR. */
5542
eac215cc 5543int
43e5fbd8
TJB
5544linux_process_target::read_auxv (int pid, CORE_ADDR offset,
5545 unsigned char *myaddr, unsigned int len)
aa691b87
RM
5546{
5547 char filename[PATH_MAX];
5548 int fd, n;
5549
6cebaf6e 5550 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5551
5552 fd = open (filename, O_RDONLY);
5553 if (fd < 0)
5554 return -1;
5555
5556 if (offset != (CORE_ADDR) 0
5557 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5558 n = -1;
5559 else
5560 n = read (fd, myaddr, len);
5561
5562 close (fd);
5563
5564 return n;
5565}
5566
7e0bde70
TBA
5567int
5568linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5569 int size, raw_breakpoint *bp)
e013ee27 5570{
c8f4bfdd
YQ
5571 if (type == raw_bkpt_type_sw)
5572 return insert_memory_breakpoint (bp);
e013ee27 5573 else
9db9aa23
TBA
5574 return low_insert_point (type, addr, size, bp);
5575}
5576
5577int
5578linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5579 int size, raw_breakpoint *bp)
5580{
5581 /* Unsupported (see target.h). */
5582 return 1;
e013ee27
OF
5583}
5584
7e0bde70
TBA
5585int
5586linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5587 int size, raw_breakpoint *bp)
e013ee27 5588{
c8f4bfdd
YQ
5589 if (type == raw_bkpt_type_sw)
5590 return remove_memory_breakpoint (bp);
e013ee27 5591 else
9db9aa23
TBA
5592 return low_remove_point (type, addr, size, bp);
5593}
5594
5595int
5596linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5597 int size, raw_breakpoint *bp)
5598{
5599 /* Unsupported (see target.h). */
5600 return 1;
e013ee27
OF
5601}
5602
84320c4e 5603/* Implement the stopped_by_sw_breakpoint target_ops
3e572f71
PA
5604 method. */
5605
84320c4e
TBA
5606bool
5607linux_process_target::stopped_by_sw_breakpoint ()
3e572f71
PA
5608{
5609 struct lwp_info *lwp = get_thread_lwp (current_thread);
5610
5611 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5612}
5613
84320c4e 5614/* Implement the supports_stopped_by_sw_breakpoint target_ops
3e572f71
PA
5615 method. */
5616
84320c4e
TBA
5617bool
5618linux_process_target::supports_stopped_by_sw_breakpoint ()
3e572f71 5619{
5739a1b9 5620 return true;
3e572f71
PA
5621}
5622
93fe88b2 5623/* Implement the stopped_by_hw_breakpoint target_ops
3e572f71
PA
5624 method. */
5625
93fe88b2
TBA
5626bool
5627linux_process_target::stopped_by_hw_breakpoint ()
3e572f71
PA
5628{
5629 struct lwp_info *lwp = get_thread_lwp (current_thread);
5630
5631 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5632}
5633
93fe88b2 5634/* Implement the supports_stopped_by_hw_breakpoint target_ops
3e572f71
PA
5635 method. */
5636
93fe88b2
TBA
5637bool
5638linux_process_target::supports_stopped_by_hw_breakpoint ()
3e572f71 5639{
5739a1b9 5640 return true;
3e572f71
PA
5641}
5642
70b90b91 5643/* Implement the supports_hardware_single_step target_ops method. */
45614f15 5644
22aa6223
TBA
5645bool
5646linux_process_target::supports_hardware_single_step ()
45614f15 5647{
b31cdfa6 5648 return true;
45614f15
YQ
5649}
5650
6eeb5c55
TBA
5651bool
5652linux_process_target::stopped_by_watchpoint ()
e013ee27 5653{
0bfdf32f 5654 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 5655
15c66dd6 5656 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
5657}
5658
6eeb5c55
TBA
5659CORE_ADDR
5660linux_process_target::stopped_data_address ()
e013ee27 5661{
0bfdf32f 5662 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
5663
5664 return lwp->stopped_data_address;
e013ee27
OF
5665}
5666
db0dfaa0
LM
5667/* This is only used for targets that define PT_TEXT_ADDR,
5668 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5669 the target has different ways of acquiring this information, like
5670 loadmaps. */
52fb6437 5671
5203ae1e
TBA
5672bool
5673linux_process_target::supports_read_offsets ()
5674{
5675#ifdef SUPPORTS_READ_OFFSETS
5676 return true;
5677#else
5678 return false;
5679#endif
5680}
5681
52fb6437
NS
5682/* Under uClinux, programs are loaded at non-zero offsets, which we need
5683 to tell gdb about. */
5684
5203ae1e
TBA
5685int
5686linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
52fb6437 5687{
5203ae1e 5688#ifdef SUPPORTS_READ_OFFSETS
52fb6437 5689 unsigned long text, text_end, data;
62828379 5690 int pid = lwpid_of (current_thread);
52fb6437
NS
5691
5692 errno = 0;
5693
b8e1b30e
LM
5694 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5695 (PTRACE_TYPE_ARG4) 0);
5696 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5697 (PTRACE_TYPE_ARG4) 0);
5698 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5699 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
5700
5701 if (errno == 0)
5702 {
5703 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
5704 used by gdb) are relative to the beginning of the program,
5705 with the data segment immediately following the text segment.
5706 However, the actual runtime layout in memory may put the data
5707 somewhere else, so when we send gdb a data base-address, we
5708 use the real data base address and subtract the compile-time
5709 data base-address from it (which is just the length of the
5710 text segment). BSS immediately follows data in both
5711 cases. */
52fb6437
NS
5712 *text_p = text;
5713 *data_p = data - (text_end - text);
1b3f6016 5714
52fb6437
NS
5715 return 1;
5716 }
5203ae1e
TBA
5717 return 0;
5718#else
5719 gdb_assert_not_reached ("target op read_offsets not supported");
52fb6437 5720#endif
5203ae1e 5721}
52fb6437 5722
6e3fd7e9
TBA
5723bool
5724linux_process_target::supports_get_tls_address ()
5725{
5726#ifdef USE_THREAD_DB
5727 return true;
5728#else
5729 return false;
5730#endif
5731}
5732
5733int
5734linux_process_target::get_tls_address (thread_info *thread,
5735 CORE_ADDR offset,
5736 CORE_ADDR load_module,
5737 CORE_ADDR *address)
5738{
5739#ifdef USE_THREAD_DB
5740 return thread_db_get_tls_address (thread, offset, load_module, address);
5741#else
5742 return -1;
5743#endif
5744}
5745
2d0795ee
TBA
5746bool
5747linux_process_target::supports_qxfer_osdata ()
5748{
5749 return true;
5750}
5751
5752int
5753linux_process_target::qxfer_osdata (const char *annex,
5754 unsigned char *readbuf,
5755 unsigned const char *writebuf,
5756 CORE_ADDR offset, int len)
07e059b5 5757{
d26e3629 5758 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
5759}
5760
cb63de7c
TBA
5761void
5762linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5763 gdb_byte *inf_siginfo, int direction)
d0722149 5764{
cb63de7c 5765 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
d0722149
DE
5766
5767 /* If there was no callback, or the callback didn't do anything,
5768 then just do a straight memcpy. */
5769 if (!done)
5770 {
5771 if (direction == 1)
a5362b9a 5772 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 5773 else
a5362b9a 5774 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
5775 }
5776}
5777
cb63de7c
TBA
5778bool
5779linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5780 int direction)
5781{
5782 return false;
5783}
5784
d7abedf7
TBA
5785bool
5786linux_process_target::supports_qxfer_siginfo ()
5787{
5788 return true;
5789}
5790
5791int
5792linux_process_target::qxfer_siginfo (const char *annex,
5793 unsigned char *readbuf,
5794 unsigned const char *writebuf,
5795 CORE_ADDR offset, int len)
4aa995e1 5796{
d0722149 5797 int pid;
a5362b9a 5798 siginfo_t siginfo;
8adce034 5799 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1 5800
0bfdf32f 5801 if (current_thread == NULL)
4aa995e1
PA
5802 return -1;
5803
0bfdf32f 5804 pid = lwpid_of (current_thread);
4aa995e1 5805
c058728c
SM
5806 threads_debug_printf ("%s siginfo for lwp %d.",
5807 readbuf != NULL ? "Reading" : "Writing",
5808 pid);
4aa995e1 5809
0adea5f7 5810 if (offset >= sizeof (siginfo))
4aa995e1
PA
5811 return -1;
5812
b8e1b30e 5813 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5814 return -1;
5815
d0722149
DE
5816 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5817 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5818 inferior with a 64-bit GDBSERVER should look the same as debugging it
5819 with a 32-bit GDBSERVER, we need to convert it. */
5820 siginfo_fixup (&siginfo, inf_siginfo, 0);
5821
4aa995e1
PA
5822 if (offset + len > sizeof (siginfo))
5823 len = sizeof (siginfo) - offset;
5824
5825 if (readbuf != NULL)
d0722149 5826 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
5827 else
5828 {
d0722149
DE
5829 memcpy (inf_siginfo + offset, writebuf, len);
5830
5831 /* Convert back to ptrace layout before flushing it out. */
5832 siginfo_fixup (&siginfo, inf_siginfo, 1);
5833
b8e1b30e 5834 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5835 return -1;
5836 }
5837
5838 return len;
5839}
5840
bd99dc85
PA
5841/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5842 so we notice when children change state; as the handler for the
5843 sigsuspend in my_waitpid. */
5844
5845static void
5846sigchld_handler (int signo)
5847{
5848 int old_errno = errno;
5849
5850 if (debug_threads)
e581f2b4
PA
5851 {
5852 do
5853 {
a7e559cc
AH
5854 /* Use the async signal safe debug function. */
5855 if (debug_write ("sigchld_handler\n",
5856 sizeof ("sigchld_handler\n") - 1) < 0)
e581f2b4
PA
5857 break; /* just ignore */
5858 } while (0);
5859 }
bd99dc85
PA
5860
5861 if (target_is_async_p ())
5862 async_file_mark (); /* trigger a linux_wait */
5863
5864 errno = old_errno;
5865}
5866
0dc587d4
TBA
5867bool
5868linux_process_target::supports_non_stop ()
bd99dc85 5869{
0dc587d4 5870 return true;
bd99dc85
PA
5871}
5872
0dc587d4
TBA
5873bool
5874linux_process_target::async (bool enable)
bd99dc85 5875{
0dc587d4 5876 bool previous = target_is_async_p ();
bd99dc85 5877
c058728c
SM
5878 threads_debug_printf ("async (%d), previous=%d",
5879 enable, previous);
8336d594 5880
bd99dc85
PA
5881 if (previous != enable)
5882 {
5883 sigset_t mask;
5884 sigemptyset (&mask);
5885 sigaddset (&mask, SIGCHLD);
5886
21987b9c 5887 gdb_sigmask (SIG_BLOCK, &mask, NULL);
bd99dc85
PA
5888
5889 if (enable)
5890 {
8674f082 5891 if (!linux_event_pipe.open_pipe ())
aa96c426 5892 {
21987b9c 5893 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
aa96c426
GB
5894
5895 warning ("creating event pipe failed.");
5896 return previous;
5897 }
bd99dc85 5898
bd99dc85 5899 /* Register the event loop handler. */
cdc8e9b2 5900 add_file_handler (linux_event_pipe.event_fd (),
2554f6f5
SM
5901 handle_target_event, NULL,
5902 "linux-low");
bd99dc85
PA
5903
5904 /* Always trigger a linux_wait. */
5905 async_file_mark ();
5906 }
5907 else
5908 {
cdc8e9b2 5909 delete_file_handler (linux_event_pipe.event_fd ());
bd99dc85 5910
8674f082 5911 linux_event_pipe.close_pipe ();
bd99dc85
PA
5912 }
5913
21987b9c 5914 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
bd99dc85
PA
5915 }
5916
5917 return previous;
5918}
5919
0dc587d4
TBA
5920int
5921linux_process_target::start_non_stop (bool nonstop)
bd99dc85
PA
5922{
5923 /* Register or unregister from event-loop accordingly. */
0dc587d4 5924 target_async (nonstop);
aa96c426 5925
0dc587d4 5926 if (target_is_async_p () != (nonstop != false))
aa96c426
GB
5927 return -1;
5928
bd99dc85
PA
5929 return 0;
5930}
5931
652aef77
TBA
5932bool
5933linux_process_target::supports_multi_process ()
cf8fd78b 5934{
652aef77 5935 return true;
cf8fd78b
PA
5936}
5937
89245bc0
DB
5938/* Check if fork events are supported. */
5939
9690a72a
TBA
5940bool
5941linux_process_target::supports_fork_events ()
89245bc0 5942{
a2885186 5943 return true;
89245bc0
DB
5944}
5945
5946/* Check if vfork events are supported. */
5947
9690a72a
TBA
5948bool
5949linux_process_target::supports_vfork_events ()
89245bc0 5950{
a2885186 5951 return true;
89245bc0
DB
5952}
5953
393a6b59
PA
5954/* Return the set of supported thread options. */
5955
5956gdb_thread_options
5957linux_process_target::supported_thread_options ()
5958{
48989498 5959 return GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
393a6b59
PA
5960}
5961
94585166
DB
5962/* Check if exec events are supported. */
5963
9690a72a
TBA
5964bool
5965linux_process_target::supports_exec_events ()
94585166 5966{
a2885186 5967 return true;
94585166
DB
5968}
5969
de0d863e
DB
5970/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5971 ptrace flags for all inferiors. This is in case the new GDB connection
5972 doesn't support the same set of events that the previous one did. */
5973
fb00dfce
TBA
5974void
5975linux_process_target::handle_new_gdb_connection ()
de0d863e 5976{
de0d863e 5977 /* Request that all the lwps reset their ptrace options. */
bbf550d5
SM
5978 for_each_thread ([] (thread_info *thread)
5979 {
5980 struct lwp_info *lwp = get_thread_lwp (thread);
5981
5982 if (!lwp->stopped)
5983 {
5984 /* Stop the lwp so we can modify its ptrace options. */
5985 lwp->must_set_ptrace_flags = 1;
5986 linux_stop_lwp (lwp);
5987 }
5988 else
5989 {
5990 /* Already stopped; go ahead and set the ptrace options. */
5991 struct process_info *proc = find_process_pid (pid_of (thread));
5992 int options = linux_low_ptrace_options (proc->attached);
5993
5994 linux_enable_event_reporting (lwpid_of (thread), options);
5995 lwp->must_set_ptrace_flags = 0;
5996 }
5997 });
de0d863e
DB
5998}
5999
55cf3021
TBA
6000int
6001linux_process_target::handle_monitor_command (char *mon)
6002{
6003#ifdef USE_THREAD_DB
6004 return thread_db_handle_monitor_command (mon);
6005#else
6006 return 0;
6007#endif
6008}
6009
95a45fc1
TBA
6010int
6011linux_process_target::core_of_thread (ptid_t ptid)
6012{
6013 return linux_common_core_of_thread (ptid);
6014}
6015
c756403b
TBA
6016bool
6017linux_process_target::supports_disable_randomization ()
03583c20 6018{
c756403b 6019 return true;
03583c20 6020}
efcbbd14 6021
c0245cb9
TBA
6022bool
6023linux_process_target::supports_agent ()
d1feda86 6024{
c0245cb9 6025 return true;
d1feda86
YQ
6026}
6027
2526e0cd
TBA
6028bool
6029linux_process_target::supports_range_stepping ()
c2d6af84 6030{
7582c77c 6031 if (supports_software_single_step ())
2526e0cd 6032 return true;
c2d6af84 6033
9cfd8715
TBA
6034 return low_supports_range_stepping ();
6035}
6036
6037bool
6038linux_process_target::low_supports_range_stepping ()
6039{
6040 return false;
c2d6af84
PA
6041}
6042
8247b823
TBA
6043bool
6044linux_process_target::supports_pid_to_exec_file ()
6045{
6046 return true;
6047}
6048
04977957 6049const char *
8247b823
TBA
6050linux_process_target::pid_to_exec_file (int pid)
6051{
6052 return linux_proc_pid_to_exec_file (pid);
6053}
6054
c9b7b804
TBA
6055bool
6056linux_process_target::supports_multifs ()
6057{
6058 return true;
6059}
6060
6061int
6062linux_process_target::multifs_open (int pid, const char *filename,
6063 int flags, mode_t mode)
6064{
6065 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6066}
6067
6068int
6069linux_process_target::multifs_unlink (int pid, const char *filename)
6070{
6071 return linux_mntns_unlink (pid, filename);
6072}
6073
6074ssize_t
6075linux_process_target::multifs_readlink (int pid, const char *filename,
6076 char *buf, size_t bufsiz)
6077{
6078 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6079}
6080
723b724b 6081#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
6082struct target_loadseg
6083{
6084 /* Core address to which the segment is mapped. */
6085 Elf32_Addr addr;
6086 /* VMA recorded in the program header. */
6087 Elf32_Addr p_vaddr;
6088 /* Size of this segment in memory. */
6089 Elf32_Word p_memsz;
6090};
6091
723b724b 6092# if defined PT_GETDSBT
78d85199
YQ
6093struct target_loadmap
6094{
6095 /* Protocol version number, must be zero. */
6096 Elf32_Word version;
6097 /* Pointer to the DSBT table, its size, and the DSBT index. */
6098 unsigned *dsbt_table;
6099 unsigned dsbt_size, dsbt_index;
6100 /* Number of segments in this map. */
6101 Elf32_Word nsegs;
6102 /* The actual memory map. */
6103 struct target_loadseg segs[/*nsegs*/];
6104};
723b724b
MF
6105# define LINUX_LOADMAP PT_GETDSBT
6106# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6107# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6108# else
6109struct target_loadmap
6110{
6111 /* Protocol version number, must be zero. */
6112 Elf32_Half version;
6113 /* Number of segments in this map. */
6114 Elf32_Half nsegs;
6115 /* The actual memory map. */
6116 struct target_loadseg segs[/*nsegs*/];
6117};
6118# define LINUX_LOADMAP PTRACE_GETFDPIC
6119# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6120# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6121# endif
78d85199 6122
9da41fda
TBA
6123bool
6124linux_process_target::supports_read_loadmap ()
6125{
6126 return true;
6127}
6128
6129int
6130linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6131 unsigned char *myaddr, unsigned int len)
78d85199 6132{
0bfdf32f 6133 int pid = lwpid_of (current_thread);
78d85199
YQ
6134 int addr = -1;
6135 struct target_loadmap *data = NULL;
6136 unsigned int actual_length, copy_length;
6137
6138 if (strcmp (annex, "exec") == 0)
723b724b 6139 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 6140 else if (strcmp (annex, "interp") == 0)
723b724b 6141 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
6142 else
6143 return -1;
6144
723b724b 6145 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
6146 return -1;
6147
6148 if (data == NULL)
6149 return -1;
6150
6151 actual_length = sizeof (struct target_loadmap)
6152 + sizeof (struct target_loadseg) * data->nsegs;
6153
6154 if (offset < 0 || offset > actual_length)
6155 return -1;
6156
6157 copy_length = actual_length - offset < len ? actual_length - offset : len;
6158 memcpy (myaddr, (char *) data + offset, copy_length);
6159 return copy_length;
6160}
723b724b 6161#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 6162
bc8d3ae4
TBA
6163bool
6164linux_process_target::supports_catch_syscall ()
82075af2 6165{
a2885186 6166 return low_supports_catch_syscall ();
82075af2
JS
6167}
6168
9eedd27d
TBA
6169bool
6170linux_process_target::low_supports_catch_syscall ()
6171{
6172 return false;
6173}
6174
770d8f6a
TBA
6175CORE_ADDR
6176linux_process_target::read_pc (regcache *regcache)
219f2f23 6177{
bf9ae9d8 6178 if (!low_supports_breakpoints ())
219f2f23
PA
6179 return 0;
6180
bf9ae9d8 6181 return low_get_pc (regcache);
219f2f23
PA
6182}
6183
770d8f6a
TBA
6184void
6185linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
219f2f23 6186{
bf9ae9d8 6187 gdb_assert (low_supports_breakpoints ());
219f2f23 6188
bf9ae9d8 6189 low_set_pc (regcache, pc);
219f2f23
PA
6190}
6191
68119632
TBA
6192bool
6193linux_process_target::supports_thread_stopped ()
6194{
6195 return true;
6196}
6197
6198bool
6199linux_process_target::thread_stopped (thread_info *thread)
8336d594
PA
6200{
6201 return get_thread_lwp (thread)->stopped;
6202}
6203
ef980d65
PA
6204bool
6205linux_process_target::any_resumed ()
6206{
6207 bool any_resumed;
6208
6209 auto status_pending_p_any = [&] (thread_info *thread)
6210 {
6211 return status_pending_p_callback (thread, minus_one_ptid);
6212 };
6213
6214 auto not_stopped = [&] (thread_info *thread)
6215 {
6216 return not_stopped_callback (thread, minus_one_ptid);
6217 };
6218
6219 /* Find a resumed LWP, if any. */
6220 if (find_thread (status_pending_p_any) != NULL)
6221 any_resumed = 1;
6222 else if (find_thread (not_stopped) != NULL)
6223 any_resumed = 1;
6224 else
6225 any_resumed = 0;
6226
6227 return any_resumed;
6228}
6229
8336d594
PA
6230/* This exposes stop-all-threads functionality to other modules. */
6231
29e8dc09
TBA
6232void
6233linux_process_target::pause_all (bool freeze)
8336d594 6234{
7984d532
PA
6235 stop_all_lwps (freeze, NULL);
6236}
6237
6238/* This exposes unstop-all-threads functionality to other gdbserver
6239 modules. */
6240
29e8dc09
TBA
6241void
6242linux_process_target::unpause_all (bool unfreeze)
7984d532
PA
6243{
6244 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
6245}
6246
2268b414
JK
6247/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6248
6249static int
6250get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6251 CORE_ADDR *phdr_memaddr, int *num_phdr)
6252{
6253 char filename[PATH_MAX];
6254 int fd;
6255 const int auxv_size = is_elf64
6256 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6257 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6258
6259 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6260
6261 fd = open (filename, O_RDONLY);
6262 if (fd < 0)
6263 return 1;
6264
6265 *phdr_memaddr = 0;
6266 *num_phdr = 0;
6267 while (read (fd, buf, auxv_size) == auxv_size
6268 && (*phdr_memaddr == 0 || *num_phdr == 0))
6269 {
6270 if (is_elf64)
6271 {
6272 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6273
6274 switch (aux->a_type)
6275 {
6276 case AT_PHDR:
6277 *phdr_memaddr = aux->a_un.a_val;
6278 break;
6279 case AT_PHNUM:
6280 *num_phdr = aux->a_un.a_val;
6281 break;
6282 }
6283 }
6284 else
6285 {
6286 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6287
6288 switch (aux->a_type)
6289 {
6290 case AT_PHDR:
6291 *phdr_memaddr = aux->a_un.a_val;
6292 break;
6293 case AT_PHNUM:
6294 *num_phdr = aux->a_un.a_val;
6295 break;
6296 }
6297 }
6298 }
6299
6300 close (fd);
6301
6302 if (*phdr_memaddr == 0 || *num_phdr == 0)
6303 {
6304 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6305 "phdr_memaddr = %ld, phdr_num = %d",
6306 (long) *phdr_memaddr, *num_phdr);
6307 return 2;
6308 }
6309
6310 return 0;
6311}
6312
6313/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6314
6315static CORE_ADDR
6316get_dynamic (const int pid, const int is_elf64)
6317{
6318 CORE_ADDR phdr_memaddr, relocation;
db1ff28b 6319 int num_phdr, i;
2268b414 6320 unsigned char *phdr_buf;
db1ff28b 6321 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
2268b414
JK
6322
6323 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6324 return 0;
6325
6326 gdb_assert (num_phdr < 100); /* Basic sanity check. */
224c3ddb 6327 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
2268b414
JK
6328
6329 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6330 return 0;
6331
6332 /* Compute relocation: it is expected to be 0 for "regular" executables,
6333 non-zero for PIE ones. */
6334 relocation = -1;
db1ff28b
JK
6335 for (i = 0; relocation == -1 && i < num_phdr; i++)
6336 if (is_elf64)
6337 {
6338 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6339
6340 if (p->p_type == PT_PHDR)
6341 relocation = phdr_memaddr - p->p_vaddr;
6342 }
6343 else
6344 {
6345 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6346
6347 if (p->p_type == PT_PHDR)
6348 relocation = phdr_memaddr - p->p_vaddr;
6349 }
6350
2268b414
JK
6351 if (relocation == -1)
6352 {
e237a7e2
JK
6353 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6354 any real world executables, including PIE executables, have always
6355 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6356 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6357 or present DT_DEBUG anyway (fpc binaries are statically linked).
6358
6359 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6360
6361 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6362
2268b414
JK
6363 return 0;
6364 }
6365
db1ff28b
JK
6366 for (i = 0; i < num_phdr; i++)
6367 {
6368 if (is_elf64)
6369 {
6370 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6371
6372 if (p->p_type == PT_DYNAMIC)
6373 return p->p_vaddr + relocation;
6374 }
6375 else
6376 {
6377 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
2268b414 6378
db1ff28b
JK
6379 if (p->p_type == PT_DYNAMIC)
6380 return p->p_vaddr + relocation;
6381 }
6382 }
2268b414
JK
6383
6384 return 0;
6385}
6386
6387/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
6388 can be 0 if the inferior does not yet have the library list initialized.
6389 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6390 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
6391
6392static CORE_ADDR
6393get_r_debug (const int pid, const int is_elf64)
6394{
6395 CORE_ADDR dynamic_memaddr;
6396 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6397 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 6398 CORE_ADDR map = -1;
2268b414
JK
6399
6400 dynamic_memaddr = get_dynamic (pid, is_elf64);
6401 if (dynamic_memaddr == 0)
367ba2c2 6402 return map;
2268b414
JK
6403
6404 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6405 {
6406 if (is_elf64)
6407 {
6408 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
a738da3a 6409#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6410 union
6411 {
6412 Elf64_Xword map;
6413 unsigned char buf[sizeof (Elf64_Xword)];
6414 }
6415 rld_map;
a738da3a
MF
6416#endif
6417#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6418 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6419 {
6420 if (linux_read_memory (dyn->d_un.d_val,
6421 rld_map.buf, sizeof (rld_map.buf)) == 0)
6422 return rld_map.map;
6423 else
6424 break;
6425 }
75f62ce7 6426#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6427#ifdef DT_MIPS_RLD_MAP_REL
6428 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6429 {
6430 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6431 rld_map.buf, sizeof (rld_map.buf)) == 0)
6432 return rld_map.map;
6433 else
6434 break;
6435 }
6436#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6437
367ba2c2
MR
6438 if (dyn->d_tag == DT_DEBUG && map == -1)
6439 map = dyn->d_un.d_val;
2268b414
JK
6440
6441 if (dyn->d_tag == DT_NULL)
6442 break;
6443 }
6444 else
6445 {
6446 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
a738da3a 6447#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6448 union
6449 {
6450 Elf32_Word map;
6451 unsigned char buf[sizeof (Elf32_Word)];
6452 }
6453 rld_map;
a738da3a
MF
6454#endif
6455#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6456 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6457 {
6458 if (linux_read_memory (dyn->d_un.d_val,
6459 rld_map.buf, sizeof (rld_map.buf)) == 0)
6460 return rld_map.map;
6461 else
6462 break;
6463 }
75f62ce7 6464#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6465#ifdef DT_MIPS_RLD_MAP_REL
6466 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6467 {
6468 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6469 rld_map.buf, sizeof (rld_map.buf)) == 0)
6470 return rld_map.map;
6471 else
6472 break;
6473 }
6474#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6475
367ba2c2
MR
6476 if (dyn->d_tag == DT_DEBUG && map == -1)
6477 map = dyn->d_un.d_val;
2268b414
JK
6478
6479 if (dyn->d_tag == DT_NULL)
6480 break;
6481 }
6482
6483 dynamic_memaddr += dyn_size;
6484 }
6485
367ba2c2 6486 return map;
2268b414
JK
6487}
6488
6489/* Read one pointer from MEMADDR in the inferior. */
6490
6491static int
6492read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6493{
485f1ee4
PA
6494 int ret;
6495
6496 /* Go through a union so this works on either big or little endian
6497 hosts, when the inferior's pointer size is smaller than the size
6498 of CORE_ADDR. It is assumed the inferior's endianness is the
6499 same of the superior's. */
6500 union
6501 {
6502 CORE_ADDR core_addr;
6503 unsigned int ui;
6504 unsigned char uc;
6505 } addr;
6506
6507 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6508 if (ret == 0)
6509 {
6510 if (ptr_size == sizeof (CORE_ADDR))
6511 *ptr = addr.core_addr;
6512 else if (ptr_size == sizeof (unsigned int))
6513 *ptr = addr.ui;
6514 else
6515 gdb_assert_not_reached ("unhandled pointer size");
6516 }
6517 return ret;
2268b414
JK
6518}
6519
974387bb
TBA
6520bool
6521linux_process_target::supports_qxfer_libraries_svr4 ()
6522{
6523 return true;
6524}
6525
2268b414
JK
6526struct link_map_offsets
6527 {
6528 /* Offset and size of r_debug.r_version. */
6529 int r_version_offset;
6530
6531 /* Offset and size of r_debug.r_map. */
6532 int r_map_offset;
6533
8d56636a
MM
6534 /* Offset of r_debug_extended.r_next. */
6535 int r_next_offset;
6536
2268b414
JK
6537 /* Offset to l_addr field in struct link_map. */
6538 int l_addr_offset;
6539
6540 /* Offset to l_name field in struct link_map. */
6541 int l_name_offset;
6542
6543 /* Offset to l_ld field in struct link_map. */
6544 int l_ld_offset;
6545
6546 /* Offset to l_next field in struct link_map. */
6547 int l_next_offset;
6548
6549 /* Offset to l_prev field in struct link_map. */
6550 int l_prev_offset;
6551 };
6552
8d56636a
MM
6553static const link_map_offsets lmo_32bit_offsets =
6554 {
6555 0, /* r_version offset. */
6556 4, /* r_debug.r_map offset. */
6557 20, /* r_debug_extended.r_next. */
6558 0, /* l_addr offset in link_map. */
6559 4, /* l_name offset in link_map. */
6560 8, /* l_ld offset in link_map. */
6561 12, /* l_next offset in link_map. */
6562 16 /* l_prev offset in link_map. */
6563 };
6564
6565static const link_map_offsets lmo_64bit_offsets =
6566 {
6567 0, /* r_version offset. */
6568 8, /* r_debug.r_map offset. */
6569 40, /* r_debug_extended.r_next. */
6570 0, /* l_addr offset in link_map. */
6571 8, /* l_name offset in link_map. */
6572 16, /* l_ld offset in link_map. */
6573 24, /* l_next offset in link_map. */
6574 32 /* l_prev offset in link_map. */
6575 };
6576
6577/* Get the loaded shared libraries from one namespace. */
6578
6579static void
2733d9d5
MM
6580read_link_map (std::string &document, CORE_ADDR lmid, CORE_ADDR lm_addr,
6581 CORE_ADDR lm_prev, int ptr_size, const link_map_offsets *lmo)
8d56636a
MM
6582{
6583 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6584
6585 while (lm_addr
6586 && read_one_ptr (lm_addr + lmo->l_name_offset,
6587 &l_name, ptr_size) == 0
6588 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6589 &l_addr, ptr_size) == 0
6590 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6591 &l_ld, ptr_size) == 0
6592 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6593 &l_prev, ptr_size) == 0
6594 && read_one_ptr (lm_addr + lmo->l_next_offset,
6595 &l_next, ptr_size) == 0)
6596 {
6597 unsigned char libname[PATH_MAX];
6598
6599 if (lm_prev != l_prev)
6600 {
6601 warning ("Corrupted shared library list: 0x%s != 0x%s",
6602 paddress (lm_prev), paddress (l_prev));
6603 break;
6604 }
6605
ad10f44e
MM
6606 /* Not checking for error because reading may stop before we've got
6607 PATH_MAX worth of characters. */
6608 libname[0] = '\0';
6609 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6610 libname[sizeof (libname) - 1] = '\0';
6611 if (libname[0] != '\0')
8d56636a 6612 {
ad10f44e 6613 string_appendf (document, "<library name=\"");
de75275f 6614 xml_escape_text_append (document, (char *) libname);
ad10f44e 6615 string_appendf (document, "\" lm=\"0x%s\" l_addr=\"0x%s\" "
2733d9d5 6616 "l_ld=\"0x%s\" lmid=\"0x%s\"/>",
ad10f44e 6617 paddress (lm_addr), paddress (l_addr),
2733d9d5 6618 paddress (l_ld), paddress (lmid));
8d56636a
MM
6619 }
6620
6621 lm_prev = lm_addr;
6622 lm_addr = l_next;
6623 }
6624}
6625
fb723180 6626/* Construct qXfer:libraries-svr4:read reply. */
2268b414 6627
974387bb
TBA
6628int
6629linux_process_target::qxfer_libraries_svr4 (const char *annex,
6630 unsigned char *readbuf,
6631 unsigned const char *writebuf,
6632 CORE_ADDR offset, int len)
2268b414 6633{
fe978cb0 6634 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
6635 char filename[PATH_MAX];
6636 int pid, is_elf64;
214d508e 6637 unsigned int machine;
2733d9d5 6638 CORE_ADDR lmid = 0, lm_addr = 0, lm_prev = 0;
2268b414
JK
6639
6640 if (writebuf != NULL)
6641 return -2;
6642 if (readbuf == NULL)
6643 return -1;
6644
0bfdf32f 6645 pid = lwpid_of (current_thread);
2268b414 6646 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 6647 is_elf64 = elf_64_file_p (filename, &machine);
8d56636a
MM
6648 const link_map_offsets *lmo;
6649 int ptr_size;
6650 if (is_elf64)
6651 {
6652 lmo = &lmo_64bit_offsets;
6653 ptr_size = 8;
6654 }
6655 else
6656 {
6657 lmo = &lmo_32bit_offsets;
6658 ptr_size = 4;
6659 }
2268b414 6660
b1fbec62
GB
6661 while (annex[0] != '\0')
6662 {
6663 const char *sep;
6664 CORE_ADDR *addrp;
da4ae14a 6665 int name_len;
2268b414 6666
b1fbec62
GB
6667 sep = strchr (annex, '=');
6668 if (sep == NULL)
6669 break;
0c5bf5a9 6670
da4ae14a 6671 name_len = sep - annex;
2733d9d5
MM
6672 if (name_len == 4 && startswith (annex, "lmid"))
6673 addrp = &lmid;
6674 else if (name_len == 5 && startswith (annex, "start"))
b1fbec62 6675 addrp = &lm_addr;
da4ae14a 6676 else if (name_len == 4 && startswith (annex, "prev"))
b1fbec62
GB
6677 addrp = &lm_prev;
6678 else
6679 {
6680 annex = strchr (sep, ';');
6681 if (annex == NULL)
6682 break;
6683 annex++;
6684 continue;
6685 }
6686
6687 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 6688 }
b1fbec62 6689
8d56636a
MM
6690 std::string document = "<library-list-svr4 version=\"1.0\"";
6691
6692 /* When the starting LM_ADDR is passed in the annex, only traverse that
2733d9d5 6693 namespace, which is assumed to be identified by LMID.
8d56636a
MM
6694
6695 Otherwise, start with R_DEBUG and traverse all namespaces we find. */
6696 if (lm_addr != 0)
ad10f44e
MM
6697 {
6698 document += ">";
2733d9d5 6699 read_link_map (document, lmid, lm_addr, lm_prev, ptr_size, lmo);
ad10f44e 6700 }
8d56636a 6701 else
2268b414 6702 {
8d56636a
MM
6703 if (lm_prev != 0)
6704 warning ("ignoring prev=0x%s without start", paddress (lm_prev));
b1fbec62 6705
2733d9d5
MM
6706 /* We could interpret LMID as 'provide only the libraries for this
6707 namespace' but GDB is currently only providing lmid, start, and
6708 prev, or nothing. */
6709 if (lmid != 0)
6710 warning ("ignoring lmid=0x%s without start", paddress (lmid));
6711
8d56636a
MM
6712 CORE_ADDR r_debug = priv->r_debug;
6713 if (r_debug == 0)
6714 r_debug = priv->r_debug = get_r_debug (pid, is_elf64);
b1fbec62
GB
6715
6716 /* We failed to find DT_DEBUG. Such situation will not change
6717 for this inferior - do not retry it. Report it to GDB as
6718 E01, see for the reasons at the GDB solib-svr4.c side. */
8d56636a 6719 if (r_debug == (CORE_ADDR) -1)
b1fbec62
GB
6720 return -1;
6721
ad10f44e
MM
6722 /* Terminate the header if we end up with an empty list. */
6723 if (r_debug == 0)
6724 document += ">";
6725
8d56636a 6726 while (r_debug != 0)
2268b414 6727 {
8d56636a
MM
6728 int r_version = 0;
6729 if (linux_read_memory (r_debug + lmo->r_version_offset,
b1fbec62 6730 (unsigned char *) &r_version,
8d56636a
MM
6731 sizeof (r_version)) != 0)
6732 {
6733 warning ("unable to read r_version from 0x%s",
6734 paddress (r_debug + lmo->r_version_offset));
6735 break;
6736 }
6737
6738 if (r_version < 1)
b1fbec62
GB
6739 {
6740 warning ("unexpected r_debug version %d", r_version);
8d56636a 6741 break;
b1fbec62 6742 }
8d56636a
MM
6743
6744 if (read_one_ptr (r_debug + lmo->r_map_offset, &lm_addr,
6745 ptr_size) != 0)
b1fbec62 6746 {
8d56636a
MM
6747 warning ("unable to read r_map from 0x%s",
6748 paddress (r_debug + lmo->r_map_offset));
6749 break;
b1fbec62 6750 }
2268b414 6751
ad10f44e
MM
6752 /* We read the entire namespace. */
6753 lm_prev = 0;
6754
6755 /* The first entry corresponds to the main executable unless the
6756 dynamic loader was loaded late by a static executable. But
6757 in such case the main executable does not have PT_DYNAMIC
6758 present and we would not have gotten here. */
6759 if (r_debug == priv->r_debug)
6760 {
6761 if (lm_addr != 0)
6762 string_appendf (document, " main-lm=\"0x%s\">",
6763 paddress (lm_addr));
6764 else
6765 document += ">";
6766
6767 lm_prev = lm_addr;
6768 if (read_one_ptr (lm_addr + lmo->l_next_offset,
6769 &lm_addr, ptr_size) != 0)
6770 {
6771 warning ("unable to read l_next from 0x%s",
6772 paddress (lm_addr + lmo->l_next_offset));
6773 break;
6774 }
6775 }
6776
2733d9d5 6777 read_link_map (document, r_debug, lm_addr, lm_prev, ptr_size, lmo);
b1fbec62 6778
8d56636a
MM
6779 if (r_version < 2)
6780 break;
b1fbec62 6781
8d56636a
MM
6782 if (read_one_ptr (r_debug + lmo->r_next_offset, &r_debug,
6783 ptr_size) != 0)
2268b414 6784 {
8d56636a
MM
6785 warning ("unable to read r_next from 0x%s",
6786 paddress (r_debug + lmo->r_next_offset));
6787 break;
d878444c 6788 }
0afae3cf 6789 }
2268b414
JK
6790 }
6791
ad10f44e 6792 document += "</library-list-svr4>";
b1fbec62 6793
f6e8a41e 6794 int document_len = document.length ();
2268b414
JK
6795 if (offset < document_len)
6796 document_len -= offset;
6797 else
6798 document_len = 0;
6799 if (len > document_len)
6800 len = document_len;
6801
f6e8a41e 6802 memcpy (readbuf, document.data () + offset, len);
2268b414
JK
6803
6804 return len;
6805}
6806
9accd112
MM
6807#ifdef HAVE_LINUX_BTRACE
6808
8263b346
TBA
6809bool
6810linux_process_target::supports_btrace ()
6811{
6812 return true;
6813}
6814
79597bdd 6815btrace_target_info *
696c0d5e 6816linux_process_target::enable_btrace (thread_info *tp,
79597bdd
TBA
6817 const btrace_config *conf)
6818{
696c0d5e 6819 return linux_enable_btrace (tp->id, conf);
79597bdd
TBA
6820}
6821
969c39fb 6822/* See to_disable_btrace target method. */
9accd112 6823
79597bdd
TBA
6824int
6825linux_process_target::disable_btrace (btrace_target_info *tinfo)
969c39fb
MM
6826{
6827 enum btrace_error err;
6828
6829 err = linux_disable_btrace (tinfo);
6830 return (err == BTRACE_ERR_NONE ? 0 : -1);
6831}
6832
bc504a31 6833/* Encode an Intel Processor Trace configuration. */
b20a6524
MM
6834
6835static void
873a185b 6836linux_low_encode_pt_config (std::string *buffer,
b20a6524
MM
6837 const struct btrace_data_pt_config *config)
6838{
873a185b 6839 *buffer += "<pt-config>\n";
b20a6524
MM
6840
6841 switch (config->cpu.vendor)
6842 {
6843 case CV_INTEL:
873a185b
TT
6844 string_xml_appendf (*buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6845 "model=\"%u\" stepping=\"%u\"/>\n",
6846 config->cpu.family, config->cpu.model,
6847 config->cpu.stepping);
b20a6524
MM
6848 break;
6849
6850 default:
6851 break;
6852 }
6853
873a185b 6854 *buffer += "</pt-config>\n";
b20a6524
MM
6855}
6856
6857/* Encode a raw buffer. */
6858
6859static void
873a185b 6860linux_low_encode_raw (std::string *buffer, const gdb_byte *data,
b20a6524
MM
6861 unsigned int size)
6862{
6863 if (size == 0)
6864 return;
6865
268a13a5 6866 /* We use hex encoding - see gdbsupport/rsp-low.h. */
873a185b 6867 *buffer += "<raw>\n";
b20a6524
MM
6868
6869 while (size-- > 0)
6870 {
6871 char elem[2];
6872
6873 elem[0] = tohex ((*data >> 4) & 0xf);
6874 elem[1] = tohex (*data++ & 0xf);
6875
8b2d5ef8 6876 buffer->append (elem, 2);
b20a6524
MM
6877 }
6878
873a185b 6879 *buffer += "</raw>\n";
b20a6524
MM
6880}
6881
969c39fb
MM
6882/* See to_read_btrace target method. */
6883
79597bdd
TBA
6884int
6885linux_process_target::read_btrace (btrace_target_info *tinfo,
873a185b 6886 std::string *buffer,
79597bdd 6887 enum btrace_read_type type)
9accd112 6888{
734b0e4b 6889 struct btrace_data btrace;
969c39fb 6890 enum btrace_error err;
9accd112 6891
969c39fb
MM
6892 err = linux_read_btrace (&btrace, tinfo, type);
6893 if (err != BTRACE_ERR_NONE)
6894 {
6895 if (err == BTRACE_ERR_OVERFLOW)
873a185b 6896 *buffer += "E.Overflow.";
969c39fb 6897 else
873a185b 6898 *buffer += "E.Generic Error.";
969c39fb 6899
8dcc53b3 6900 return -1;
969c39fb 6901 }
9accd112 6902
734b0e4b
MM
6903 switch (btrace.format)
6904 {
6905 case BTRACE_FORMAT_NONE:
873a185b 6906 *buffer += "E.No Trace.";
8dcc53b3 6907 return -1;
734b0e4b
MM
6908
6909 case BTRACE_FORMAT_BTS:
873a185b
TT
6910 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6911 *buffer += "<btrace version=\"1.0\">\n";
9accd112 6912
46f29a9a 6913 for (const btrace_block &block : *btrace.variant.bts.blocks)
873a185b
TT
6914 string_xml_appendf (*buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6915 paddress (block.begin), paddress (block.end));
9accd112 6916
873a185b 6917 *buffer += "</btrace>\n";
734b0e4b
MM
6918 break;
6919
b20a6524 6920 case BTRACE_FORMAT_PT:
873a185b
TT
6921 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6922 *buffer += "<btrace version=\"1.0\">\n";
6923 *buffer += "<pt>\n";
b20a6524
MM
6924
6925 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
9accd112 6926
b20a6524
MM
6927 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6928 btrace.variant.pt.size);
6929
873a185b
TT
6930 *buffer += "</pt>\n";
6931 *buffer += "</btrace>\n";
b20a6524
MM
6932 break;
6933
6934 default:
873a185b 6935 *buffer += "E.Unsupported Trace Format.";
8dcc53b3 6936 return -1;
734b0e4b 6937 }
969c39fb
MM
6938
6939 return 0;
9accd112 6940}
f4abbc16
MM
6941
6942/* See to_btrace_conf target method. */
6943
79597bdd
TBA
6944int
6945linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
873a185b 6946 std::string *buffer)
f4abbc16
MM
6947{
6948 const struct btrace_config *conf;
6949
873a185b
TT
6950 *buffer += "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n";
6951 *buffer += "<btrace-conf version=\"1.0\">\n";
f4abbc16
MM
6952
6953 conf = linux_btrace_conf (tinfo);
6954 if (conf != NULL)
6955 {
6956 switch (conf->format)
6957 {
6958 case BTRACE_FORMAT_NONE:
6959 break;
6960
6961 case BTRACE_FORMAT_BTS:
873a185b
TT
6962 string_xml_appendf (*buffer, "<bts");
6963 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->bts.size);
6964 string_xml_appendf (*buffer, " />\n");
f4abbc16 6965 break;
b20a6524
MM
6966
6967 case BTRACE_FORMAT_PT:
873a185b
TT
6968 string_xml_appendf (*buffer, "<pt");
6969 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->pt.size);
6970 string_xml_appendf (*buffer, "/>\n");
b20a6524 6971 break;
f4abbc16
MM
6972 }
6973 }
6974
873a185b 6975 *buffer += "</btrace-conf>\n";
f4abbc16
MM
6976 return 0;
6977}
9accd112
MM
6978#endif /* HAVE_LINUX_BTRACE */
6979
7b669087
GB
6980/* See nat/linux-nat.h. */
6981
6982ptid_t
6983current_lwp_ptid (void)
6984{
6985 return ptid_of (current_thread);
6986}
6987
07b3255c 6988/* A helper function that copies NAME to DEST, replacing non-printable
862180a2
TT
6989 characters with '?'. Returns the original DEST as a
6990 convenience. */
07b3255c
TT
6991
6992static const char *
6993replace_non_ascii (char *dest, const char *name)
6994{
862180a2 6995 const char *result = dest;
07b3255c
TT
6996 while (*name != '\0')
6997 {
6998 if (!ISPRINT (*name))
6999 *dest++ = '?';
7000 else
7001 *dest++ = *name;
7002 ++name;
7003 }
862180a2
TT
7004 *dest = '\0';
7005 return result;
07b3255c
TT
7006}
7007
7f63b89b
TBA
7008const char *
7009linux_process_target::thread_name (ptid_t thread)
7010{
07b3255c
TT
7011 static char dest[100];
7012
7013 const char *name = linux_proc_tid_get_name (thread);
7014 if (name == nullptr)
7015 return nullptr;
7016
7017 /* Linux limits the comm file to 16 bytes (including the trailing
7018 \0. If the program or thread name is set when using a multi-byte
7019 encoding, this might cause it to be truncated mid-character. In
7020 this situation, sending the truncated form in an XML <thread>
7021 response will cause a parse error in gdb. So, instead convert
7022 from the locale's encoding (we can't be sure this is the correct
7023 encoding, but it's as good a guess as we have) to UTF-8, but in a
7024 way that ignores any encoding errors. See PR remote/30618. */
7025 const char *cset = nl_langinfo (CODESET);
7026 iconv_t handle = iconv_open ("UTF-8//IGNORE", cset);
7027 if (handle == (iconv_t) -1)
7028 return replace_non_ascii (dest, name);
7029
7030 size_t inbytes = strlen (name);
7031 char *inbuf = const_cast<char *> (name);
7032 size_t outbytes = sizeof (dest);
7033 char *outbuf = dest;
7034 size_t result = iconv (handle, &inbuf, &inbytes, &outbuf, &outbytes);
7035
7036 if (result == (size_t) -1)
7037 {
7038 if (errno == E2BIG)
7039 outbuf = &dest[sizeof (dest) - 1];
7040 else if ((errno == EILSEQ || errno == EINVAL)
7041 && outbuf < &dest[sizeof (dest) - 2])
7042 *outbuf++ = '?';
07b3255c 7043 }
862180a2 7044 *outbuf = '\0';
07b3255c
TT
7045
7046 iconv_close (handle);
7047 return *dest == '\0' ? nullptr : dest;
7f63b89b
TBA
7048}
7049
7050#if USE_THREAD_DB
7051bool
7052linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7053 int *handle_len)
7054{
7055 return thread_db_thread_handle (ptid, handle, handle_len);
7056}
7057#endif
7058
7b961964
SM
7059thread_info *
7060linux_process_target::thread_pending_parent (thread_info *thread)
7061{
7062 lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
7063
7064 if (parent == nullptr)
7065 return nullptr;
7066
7067 return get_lwp_thread (parent);
7068}
7069
df5ad102 7070thread_info *
faf44a31
PA
7071linux_process_target::thread_pending_child (thread_info *thread,
7072 target_waitkind *kind)
df5ad102 7073{
faf44a31 7074 lwp_info *child = get_thread_lwp (thread)->pending_child (kind);
df5ad102
SM
7075
7076 if (child == nullptr)
7077 return nullptr;
7078
7079 return get_lwp_thread (child);
7080}
7081
276d4552
YQ
7082/* Default implementation of linux_target_ops method "set_pc" for
7083 32-bit pc register which is literally named "pc". */
7084
7085void
7086linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7087{
7088 uint32_t newpc = pc;
7089
7090 supply_register_by_name (regcache, "pc", &newpc);
7091}
7092
7093/* Default implementation of linux_target_ops method "get_pc" for
7094 32-bit pc register which is literally named "pc". */
7095
7096CORE_ADDR
7097linux_get_pc_32bit (struct regcache *regcache)
7098{
7099 uint32_t pc;
7100
7101 collect_register_by_name (regcache, "pc", &pc);
c058728c 7102 threads_debug_printf ("stop pc is 0x%" PRIx32, pc);
276d4552
YQ
7103 return pc;
7104}
7105
6f69e520
YQ
7106/* Default implementation of linux_target_ops method "set_pc" for
7107 64-bit pc register which is literally named "pc". */
7108
7109void
7110linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7111{
7112 uint64_t newpc = pc;
7113
7114 supply_register_by_name (regcache, "pc", &newpc);
7115}
7116
7117/* Default implementation of linux_target_ops method "get_pc" for
7118 64-bit pc register which is literally named "pc". */
7119
7120CORE_ADDR
7121linux_get_pc_64bit (struct regcache *regcache)
7122{
7123 uint64_t pc;
7124
7125 collect_register_by_name (regcache, "pc", &pc);
c058728c 7126 threads_debug_printf ("stop pc is 0x%" PRIx64, pc);
6f69e520
YQ
7127 return pc;
7128}
7129
0570503d 7130/* See linux-low.h. */
974c89e0 7131
0570503d 7132int
43e5fbd8 7133linux_get_auxv (int pid, int wordsize, CORE_ADDR match, CORE_ADDR *valp)
974c89e0
AH
7134{
7135 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7136 int offset = 0;
7137
7138 gdb_assert (wordsize == 4 || wordsize == 8);
7139
43e5fbd8
TJB
7140 while (the_target->read_auxv (pid, offset, data, 2 * wordsize)
7141 == 2 * wordsize)
974c89e0
AH
7142 {
7143 if (wordsize == 4)
7144 {
0570503d 7145 uint32_t *data_p = (uint32_t *) data;
974c89e0 7146 if (data_p[0] == match)
0570503d
PFC
7147 {
7148 *valp = data_p[1];
7149 return 1;
7150 }
974c89e0
AH
7151 }
7152 else
7153 {
0570503d 7154 uint64_t *data_p = (uint64_t *) data;
974c89e0 7155 if (data_p[0] == match)
0570503d
PFC
7156 {
7157 *valp = data_p[1];
7158 return 1;
7159 }
974c89e0
AH
7160 }
7161
7162 offset += 2 * wordsize;
7163 }
7164
7165 return 0;
7166}
7167
7168/* See linux-low.h. */
7169
7170CORE_ADDR
43e5fbd8 7171linux_get_hwcap (int pid, int wordsize)
974c89e0 7172{
0570503d 7173 CORE_ADDR hwcap = 0;
43e5fbd8 7174 linux_get_auxv (pid, wordsize, AT_HWCAP, &hwcap);
0570503d 7175 return hwcap;
974c89e0
AH
7176}
7177
7178/* See linux-low.h. */
7179
7180CORE_ADDR
43e5fbd8 7181linux_get_hwcap2 (int pid, int wordsize)
974c89e0 7182{
0570503d 7183 CORE_ADDR hwcap2 = 0;
43e5fbd8 7184 linux_get_auxv (pid, wordsize, AT_HWCAP2, &hwcap2);
0570503d 7185 return hwcap2;
974c89e0 7186}
6f69e520 7187
3aee8918
PA
7188#ifdef HAVE_LINUX_REGSETS
7189void
7190initialize_regsets_info (struct regsets_info *info)
7191{
7192 for (info->num_regsets = 0;
7193 info->regsets[info->num_regsets].size >= 0;
7194 info->num_regsets++)
7195 ;
3aee8918
PA
7196}
7197#endif
7198
da6d8c04
DJ
7199void
7200initialize_low (void)
7201{
bd99dc85 7202 struct sigaction sigchld_action;
dd373349 7203
bd99dc85 7204 memset (&sigchld_action, 0, sizeof (sigchld_action));
ef0478f6 7205 set_target_ops (the_linux_target);
dd373349 7206
aa7c7447 7207 linux_ptrace_init_warnings ();
1b919490 7208 linux_proc_init_warnings ();
bd99dc85
PA
7209
7210 sigchld_action.sa_handler = sigchld_handler;
7211 sigemptyset (&sigchld_action.sa_mask);
7212 sigchld_action.sa_flags = SA_RESTART;
7213 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
7214
7215 initialize_low_arch ();
89245bc0
DB
7216
7217 linux_check_ptrace_features ();
da6d8c04 7218}