]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdbserver/linux-low.cc
gdbserver: Reorganize linux_process_target::filter_event
[thirdparty/binutils-gdb.git] / gdbserver / linux-low.cc
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
4a94e368 2 Copyright (C) 1995-2022 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
268a13a5 22#include "gdbsupport/agent.h"
de0d863e 23#include "tdesc.h"
cdc8e9b2
JB
24#include "gdbsupport/event-loop.h"
25#include "gdbsupport/event-pipe.h"
268a13a5
TT
26#include "gdbsupport/rsp-low.h"
27#include "gdbsupport/signals-state-save-restore.h"
96d7229d
LM
28#include "nat/linux-nat.h"
29#include "nat/linux-waitpid.h"
268a13a5 30#include "gdbsupport/gdb_wait.h"
5826e159 31#include "nat/gdb_ptrace.h"
125f8a3d
GB
32#include "nat/linux-ptrace.h"
33#include "nat/linux-procfs.h"
8cc73a39 34#include "nat/linux-personality.h"
da6d8c04
DJ
35#include <signal.h>
36#include <sys/ioctl.h>
37#include <fcntl.h>
0a30fbc4 38#include <unistd.h>
fd500816 39#include <sys/syscall.h>
f9387fc3 40#include <sched.h>
07e059b5
VP
41#include <ctype.h>
42#include <pwd.h>
43#include <sys/types.h>
44#include <dirent.h>
53ce3c39 45#include <sys/stat.h>
efcbbd14 46#include <sys/vfs.h>
1570b33e 47#include <sys/uio.h>
268a13a5 48#include "gdbsupport/filestuff.h"
c144c7a0 49#include "tracepoint.h"
276d4552 50#include <inttypes.h>
268a13a5 51#include "gdbsupport/common-inferior.h"
2090129c 52#include "nat/fork-inferior.h"
268a13a5 53#include "gdbsupport/environ.h"
21987b9c 54#include "gdbsupport/gdb-sigmask.h"
268a13a5 55#include "gdbsupport/scoped_restore.h"
957f3f49
DE
56#ifndef ELFMAG0
57/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
58 then ELFMAG0 will have been defined. If it didn't get included by
59 gdb_proc_service.h then including it will likely introduce a duplicate
60 definition of elf_fpregset_t. */
61#include <elf.h>
62#endif
14d2069a 63#include "nat/linux-namespaces.h"
efcbbd14 64
fd462a61
DJ
65#ifndef O_LARGEFILE
66#define O_LARGEFILE 0
67#endif
1a981360 68
69f4c9cc
AH
69#ifndef AT_HWCAP2
70#define AT_HWCAP2 26
71#endif
72
db0dfaa0
LM
73/* Some targets did not define these ptrace constants from the start,
74 so gdbserver defines them locally here. In the future, these may
75 be removed after they are added to asm/ptrace.h. */
76#if !(defined(PT_TEXT_ADDR) \
77 || defined(PT_DATA_ADDR) \
78 || defined(PT_TEXT_END_ADDR))
79#if defined(__mcoldfire__)
80/* These are still undefined in 3.10 kernels. */
81#define PT_TEXT_ADDR 49*4
82#define PT_DATA_ADDR 50*4
83#define PT_TEXT_END_ADDR 51*4
db0dfaa0
LM
84/* These are still undefined in 3.10 kernels. */
85#elif defined(__TMS320C6X__)
86#define PT_TEXT_ADDR (0x10000*4)
87#define PT_DATA_ADDR (0x10004*4)
88#define PT_TEXT_END_ADDR (0x10008*4)
89#endif
90#endif
91
5203ae1e
TBA
92#if (defined(__UCLIBC__) \
93 && defined(HAS_NOMMU) \
94 && defined(PT_TEXT_ADDR) \
95 && defined(PT_DATA_ADDR) \
96 && defined(PT_TEXT_END_ADDR))
97#define SUPPORTS_READ_OFFSETS
98#endif
99
9accd112 100#ifdef HAVE_LINUX_BTRACE
125f8a3d 101# include "nat/linux-btrace.h"
268a13a5 102# include "gdbsupport/btrace-common.h"
9accd112
MM
103#endif
104
8365dcf5
TJB
105#ifndef HAVE_ELF32_AUXV_T
106/* Copied from glibc's elf.h. */
107typedef struct
108{
109 uint32_t a_type; /* Entry type */
110 union
111 {
112 uint32_t a_val; /* Integer value */
113 /* We use to have pointer elements added here. We cannot do that,
114 though, since it does not work when using 32-bit definitions
115 on 64-bit platforms and vice versa. */
116 } a_un;
117} Elf32_auxv_t;
118#endif
119
120#ifndef HAVE_ELF64_AUXV_T
121/* Copied from glibc's elf.h. */
122typedef struct
123{
124 uint64_t a_type; /* Entry type */
125 union
126 {
127 uint64_t a_val; /* Integer value */
128 /* We use to have pointer elements added here. We cannot do that,
129 though, since it does not work when using 32-bit definitions
130 on 64-bit platforms and vice versa. */
131 } a_un;
132} Elf64_auxv_t;
133#endif
134
ded48a5e
YQ
135/* Does the current host support PTRACE_GETREGSET? */
136int have_ptrace_getregset = -1;
137
cff068da
GB
138/* LWP accessors. */
139
140/* See nat/linux-nat.h. */
141
142ptid_t
143ptid_of_lwp (struct lwp_info *lwp)
144{
145 return ptid_of (get_lwp_thread (lwp));
146}
147
148/* See nat/linux-nat.h. */
149
4b134ca1
GB
150void
151lwp_set_arch_private_info (struct lwp_info *lwp,
152 struct arch_lwp_info *info)
153{
154 lwp->arch_private = info;
155}
156
157/* See nat/linux-nat.h. */
158
159struct arch_lwp_info *
160lwp_arch_private_info (struct lwp_info *lwp)
161{
162 return lwp->arch_private;
163}
164
165/* See nat/linux-nat.h. */
166
cff068da
GB
167int
168lwp_is_stopped (struct lwp_info *lwp)
169{
170 return lwp->stopped;
171}
172
173/* See nat/linux-nat.h. */
174
175enum target_stop_reason
176lwp_stop_reason (struct lwp_info *lwp)
177{
178 return lwp->stop_reason;
179}
180
0e00e962
AA
181/* See nat/linux-nat.h. */
182
183int
184lwp_is_stepping (struct lwp_info *lwp)
185{
186 return lwp->stepping;
187}
188
05044653
PA
189/* A list of all unknown processes which receive stop signals. Some
190 other process will presumably claim each of these as forked
191 children momentarily. */
24a09b5f 192
05044653
PA
193struct simple_pid_list
194{
195 /* The process ID. */
196 int pid;
197
198 /* The status as reported by waitpid. */
199 int status;
200
201 /* Next in chain. */
202 struct simple_pid_list *next;
203};
05c309a8 204static struct simple_pid_list *stopped_pids;
05044653
PA
205
206/* Trivial list manipulation functions to keep track of a list of new
207 stopped processes. */
208
209static void
210add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
211{
8d749320 212 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
05044653
PA
213
214 new_pid->pid = pid;
215 new_pid->status = status;
216 new_pid->next = *listp;
217 *listp = new_pid;
218}
219
220static int
221pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
222{
223 struct simple_pid_list **p;
224
225 for (p = listp; *p != NULL; p = &(*p)->next)
226 if ((*p)->pid == pid)
227 {
228 struct simple_pid_list *next = (*p)->next;
229
230 *statusp = (*p)->status;
231 xfree (*p);
232 *p = next;
233 return 1;
234 }
235 return 0;
236}
24a09b5f 237
bde24c0a
PA
238enum stopping_threads_kind
239 {
240 /* Not stopping threads presently. */
241 NOT_STOPPING_THREADS,
242
243 /* Stopping threads. */
244 STOPPING_THREADS,
245
246 /* Stopping and suspending threads. */
247 STOPPING_AND_SUSPENDING_THREADS
248 };
249
250/* This is set while stop_all_lwps is in effect. */
6bd434d6 251static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
252
253/* FIXME make into a target method? */
24a09b5f 254int using_threads = 1;
24a09b5f 255
fa593d66
PA
256/* True if we're presently stabilizing threads (moving them out of
257 jump pads). */
258static int stabilizing_threads;
259
f50bf8e5 260static void unsuspend_all_lwps (struct lwp_info *except);
95954743 261static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
00db26fa 262static int lwp_is_marked_dead (struct lwp_info *lwp);
d50171e4 263static int kill_lwp (unsigned long lwpid, int signo);
863d01bd 264static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
ece66d65 265static int linux_low_ptrace_options (int attached);
ced2dffb 266static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
d50171e4 267
582511be
PA
268/* When the event-loop is doing a step-over, this points at the thread
269 being stepped. */
6bd434d6 270static ptid_t step_over_bkpt;
582511be 271
bf9ae9d8
TBA
272bool
273linux_process_target::low_supports_breakpoints ()
274{
275 return false;
276}
d50171e4 277
bf9ae9d8
TBA
278CORE_ADDR
279linux_process_target::low_get_pc (regcache *regcache)
280{
281 return 0;
282}
283
284void
285linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
d50171e4 286{
bf9ae9d8 287 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
d50171e4 288}
0d62e5e8 289
7582c77c
TBA
290std::vector<CORE_ADDR>
291linux_process_target::low_get_next_pcs (regcache *regcache)
292{
293 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
294 "implemented");
295}
296
d4807ea2
TBA
297int
298linux_process_target::low_decr_pc_after_break ()
299{
300 return 0;
301}
302
c2d6af84
PA
303/* True if LWP is stopped in its stepping range. */
304
305static int
306lwp_in_step_range (struct lwp_info *lwp)
307{
308 CORE_ADDR pc = lwp->stop_pc;
309
310 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
311}
312
cdc8e9b2
JB
313/* The event pipe registered as a waitable file in the event loop. */
314static event_pipe linux_event_pipe;
bd99dc85
PA
315
316/* True if we're currently in async mode. */
cdc8e9b2 317#define target_is_async_p() (linux_event_pipe.is_open ())
bd99dc85 318
02fc4de7 319static void send_sigstop (struct lwp_info *lwp);
bd99dc85 320
d0722149
DE
321/* Return non-zero if HEADER is a 64-bit ELF file. */
322
323static int
214d508e 324elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 325{
214d508e
L
326 if (header->e_ident[EI_MAG0] == ELFMAG0
327 && header->e_ident[EI_MAG1] == ELFMAG1
328 && header->e_ident[EI_MAG2] == ELFMAG2
329 && header->e_ident[EI_MAG3] == ELFMAG3)
330 {
331 *machine = header->e_machine;
332 return header->e_ident[EI_CLASS] == ELFCLASS64;
333
334 }
335 *machine = EM_NONE;
336 return -1;
d0722149
DE
337}
338
339/* Return non-zero if FILE is a 64-bit ELF file,
340 zero if the file is not a 64-bit ELF file,
341 and -1 if the file is not accessible or doesn't exist. */
342
be07f1a2 343static int
214d508e 344elf_64_file_p (const char *file, unsigned int *machine)
d0722149 345{
957f3f49 346 Elf64_Ehdr header;
d0722149
DE
347 int fd;
348
349 fd = open (file, O_RDONLY);
350 if (fd < 0)
351 return -1;
352
353 if (read (fd, &header, sizeof (header)) != sizeof (header))
354 {
355 close (fd);
356 return 0;
357 }
358 close (fd);
359
214d508e 360 return elf_64_header_p (&header, machine);
d0722149
DE
361}
362
be07f1a2
PA
363/* Accepts an integer PID; Returns true if the executable PID is
364 running is a 64-bit ELF file.. */
365
366int
214d508e 367linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 368{
d8d2a3ee 369 char file[PATH_MAX];
be07f1a2
PA
370
371 sprintf (file, "/proc/%d/exe", pid);
214d508e 372 return elf_64_file_p (file, machine);
be07f1a2
PA
373}
374
fd000fb3
TBA
375void
376linux_process_target::delete_lwp (lwp_info *lwp)
bd99dc85 377{
fa96cb38
PA
378 struct thread_info *thr = get_lwp_thread (lwp);
379
c058728c 380 threads_debug_printf ("deleting %ld", lwpid_of (thr));
fa96cb38
PA
381
382 remove_thread (thr);
466eecee 383
fd000fb3 384 low_delete_thread (lwp->arch_private);
466eecee 385
013e3554 386 delete lwp;
bd99dc85
PA
387}
388
fd000fb3
TBA
389void
390linux_process_target::low_delete_thread (arch_lwp_info *info)
391{
392 /* Default implementation should be overridden if architecture-specific
393 info is being used. */
394 gdb_assert (info == nullptr);
395}
95954743 396
fd000fb3
TBA
397process_info *
398linux_process_target::add_linux_process (int pid, int attached)
95954743
PA
399{
400 struct process_info *proc;
401
95954743 402 proc = add_process (pid, attached);
8d749320 403 proc->priv = XCNEW (struct process_info_private);
95954743 404
fd000fb3 405 proc->priv->arch_private = low_new_process ();
aa5ca48f 406
95954743
PA
407 return proc;
408}
409
fd000fb3
TBA
410arch_process_info *
411linux_process_target::low_new_process ()
412{
413 return nullptr;
414}
415
416void
417linux_process_target::low_delete_process (arch_process_info *info)
418{
419 /* Default implementation must be overridden if architecture-specific
420 info exists. */
421 gdb_assert (info == nullptr);
422}
423
424void
425linux_process_target::low_new_fork (process_info *parent, process_info *child)
426{
427 /* Nop. */
428}
429
797bcff5
TBA
430void
431linux_process_target::arch_setup_thread (thread_info *thread)
94585166 432{
24583e45
TBA
433 scoped_restore_current_thread restore_thread;
434 switch_to_thread (thread);
94585166 435
797bcff5 436 low_arch_setup ();
94585166
DB
437}
438
d16f3f6c
TBA
439int
440linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
441 int wstat)
24a09b5f 442{
c12a5089 443 client_state &cs = get_client_state ();
94585166 444 struct lwp_info *event_lwp = *orig_event_lwp;
89a5711c 445 int event = linux_ptrace_get_extended_event (wstat);
de0d863e 446 struct thread_info *event_thr = get_lwp_thread (event_lwp);
54a0b537 447 struct lwp_info *new_lwp;
24a09b5f 448
183be222 449 gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
65706a29 450
82075af2
JS
451 /* All extended events we currently use are mid-syscall. Only
452 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
453 you have to be using PTRACE_SEIZE to get that. */
454 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
455
c269dbdb
DB
456 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
457 || (event == PTRACE_EVENT_CLONE))
24a09b5f 458 {
95954743 459 ptid_t ptid;
24a09b5f 460 unsigned long new_pid;
05044653 461 int ret, status;
24a09b5f 462
de0d863e 463 /* Get the pid of the new lwp. */
d86d4aaf 464 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 465 &new_pid);
24a09b5f
DJ
466
467 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 468 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
469 {
470 /* The new child has a pending SIGSTOP. We can't affect it until it
471 hits the SIGSTOP, but we're already attached. */
472
97438e3f 473 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
474
475 if (ret == -1)
476 perror_with_name ("waiting for new child");
477 else if (ret != new_pid)
478 warning ("wait returned unexpected PID %d", ret);
da5898ce 479 else if (!WIFSTOPPED (status))
24a09b5f
DJ
480 warning ("wait returned unexpected status 0x%x", status);
481 }
482
c269dbdb 483 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
de0d863e
DB
484 {
485 struct process_info *parent_proc;
486 struct process_info *child_proc;
487 struct lwp_info *child_lwp;
bfacd19d 488 struct thread_info *child_thr;
de0d863e 489
184ea2f7 490 ptid = ptid_t (new_pid, new_pid);
de0d863e 491
c058728c
SM
492 threads_debug_printf ("Got fork event from LWP %ld, "
493 "new child is %d",
494 ptid_of (event_thr).lwp (),
495 ptid.pid ());
de0d863e
DB
496
497 /* Add the new process to the tables and clone the breakpoint
498 lists of the parent. We need to do this even if the new process
499 will be detached, since we will need the process object and the
500 breakpoints to remove any breakpoints from memory when we
501 detach, and the client side will access registers. */
fd000fb3 502 child_proc = add_linux_process (new_pid, 0);
de0d863e
DB
503 gdb_assert (child_proc != NULL);
504 child_lwp = add_lwp (ptid);
505 gdb_assert (child_lwp != NULL);
506 child_lwp->stopped = 1;
bfacd19d
DB
507 child_lwp->must_set_ptrace_flags = 1;
508 child_lwp->status_pending_p = 0;
509 child_thr = get_lwp_thread (child_lwp);
510 child_thr->last_resume_kind = resume_stop;
183be222 511 child_thr->last_status.set_stopped (GDB_SIGNAL_0);
998d452a 512
863d01bd 513 /* If we're suspending all threads, leave this one suspended
0f8288ae
YQ
514 too. If the fork/clone parent is stepping over a breakpoint,
515 all other threads have been suspended already. Leave the
516 child suspended too. */
517 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
518 || event_lwp->bp_reinsert != 0)
863d01bd 519 {
c058728c 520 threads_debug_printf ("leaving child suspended");
863d01bd
PA
521 child_lwp->suspended = 1;
522 }
523
de0d863e
DB
524 parent_proc = get_thread_process (event_thr);
525 child_proc->attached = parent_proc->attached;
2e7b624b
YQ
526
527 if (event_lwp->bp_reinsert != 0
7582c77c 528 && supports_software_single_step ()
2e7b624b
YQ
529 && event == PTRACE_EVENT_VFORK)
530 {
3b9a79ef
YQ
531 /* If we leave single-step breakpoints there, child will
532 hit it, so uninsert single-step breakpoints from parent
2e7b624b
YQ
533 (and child). Once vfork child is done, reinsert
534 them back to parent. */
3b9a79ef 535 uninsert_single_step_breakpoints (event_thr);
2e7b624b
YQ
536 }
537
63c40ec7 538 clone_all_breakpoints (child_thr, event_thr);
de0d863e 539
51a948fd
AB
540 target_desc_up tdesc = allocate_target_description ();
541 copy_target_description (tdesc.get (), parent_proc->tdesc);
542 child_proc->tdesc = tdesc.release ();
de0d863e 543
3a8a0396 544 /* Clone arch-specific process data. */
fd000fb3 545 low_new_fork (parent_proc, child_proc);
3a8a0396 546
de0d863e 547 /* Save fork info in the parent thread. */
c269dbdb 548 if (event == PTRACE_EVENT_FORK)
183be222 549 event_lwp->waitstatus.set_forked (ptid);
c269dbdb 550 else if (event == PTRACE_EVENT_VFORK)
183be222 551 event_lwp->waitstatus.set_vforked (ptid);
c269dbdb 552
de0d863e
DB
553 /* The status_pending field contains bits denoting the
554 extended event, so when the pending event is handled,
555 the handler will look at lwp->waitstatus. */
556 event_lwp->status_pending_p = 1;
557 event_lwp->status_pending = wstat;
558
5a04c4cf
PA
559 /* Link the threads until the parent event is passed on to
560 higher layers. */
561 event_lwp->fork_relative = child_lwp;
562 child_lwp->fork_relative = event_lwp;
563
3b9a79ef
YQ
564 /* If the parent thread is doing step-over with single-step
565 breakpoints, the list of single-step breakpoints are cloned
2e7b624b
YQ
566 from the parent's. Remove them from the child process.
567 In case of vfork, we'll reinsert them back once vforked
568 child is done. */
8a81c5d7 569 if (event_lwp->bp_reinsert != 0
7582c77c 570 && supports_software_single_step ())
8a81c5d7 571 {
8a81c5d7
YQ
572 /* The child process is forked and stopped, so it is safe
573 to access its memory without stopping all other threads
574 from other processes. */
3b9a79ef 575 delete_single_step_breakpoints (child_thr);
8a81c5d7 576
3b9a79ef
YQ
577 gdb_assert (has_single_step_breakpoints (event_thr));
578 gdb_assert (!has_single_step_breakpoints (child_thr));
8a81c5d7
YQ
579 }
580
de0d863e
DB
581 /* Report the event. */
582 return 0;
583 }
584
c058728c
SM
585 threads_debug_printf
586 ("Got clone event from LWP %ld, new child is LWP %ld",
587 lwpid_of (event_thr), new_pid);
fa96cb38 588
184ea2f7 589 ptid = ptid_t (pid_of (event_thr), new_pid);
b3312d80 590 new_lwp = add_lwp (ptid);
24a09b5f 591
e27d73f6 592 /* Either we're going to immediately resume the new thread
df95181f 593 or leave it stopped. resume_one_lwp is a nop if it
e27d73f6 594 thinks the thread is currently running, so set this first
df95181f 595 before calling resume_one_lwp. */
e27d73f6
DE
596 new_lwp->stopped = 1;
597
0f8288ae
YQ
598 /* If we're suspending all threads, leave this one suspended
599 too. If the fork/clone parent is stepping over a breakpoint,
600 all other threads have been suspended already. Leave the
601 child suspended too. */
602 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
603 || event_lwp->bp_reinsert != 0)
bde24c0a
PA
604 new_lwp->suspended = 1;
605
da5898ce
DJ
606 /* Normally we will get the pending SIGSTOP. But in some cases
607 we might get another signal delivered to the group first.
f21cc1a2 608 If we do get another signal, be sure not to lose it. */
20ba1ce6 609 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 610 {
54a0b537 611 new_lwp->stop_expected = 1;
20ba1ce6
PA
612 new_lwp->status_pending_p = 1;
613 new_lwp->status_pending = status;
da5898ce 614 }
c12a5089 615 else if (cs.report_thread_events)
65706a29 616 {
183be222 617 new_lwp->waitstatus.set_thread_created ();
65706a29
PA
618 new_lwp->status_pending_p = 1;
619 new_lwp->status_pending = status;
620 }
de0d863e 621
a0aad537 622#ifdef USE_THREAD_DB
94c207e0 623 thread_db_notice_clone (event_thr, ptid);
a0aad537 624#endif
86299109 625
de0d863e
DB
626 /* Don't report the event. */
627 return 1;
24a09b5f 628 }
c269dbdb
DB
629 else if (event == PTRACE_EVENT_VFORK_DONE)
630 {
183be222 631 event_lwp->waitstatus.set_vfork_done ();
c269dbdb 632
7582c77c 633 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
2e7b624b 634 {
3b9a79ef 635 reinsert_single_step_breakpoints (event_thr);
2e7b624b 636
3b9a79ef 637 gdb_assert (has_single_step_breakpoints (event_thr));
2e7b624b
YQ
638 }
639
c269dbdb
DB
640 /* Report the event. */
641 return 0;
642 }
c12a5089 643 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
94585166
DB
644 {
645 struct process_info *proc;
f27866ba 646 std::vector<int> syscalls_to_catch;
94585166
DB
647 ptid_t event_ptid;
648 pid_t event_pid;
649
c058728c
SM
650 threads_debug_printf ("Got exec event from LWP %ld",
651 lwpid_of (event_thr));
94585166
DB
652
653 /* Get the event ptid. */
654 event_ptid = ptid_of (event_thr);
e99b03dc 655 event_pid = event_ptid.pid ();
94585166 656
82075af2 657 /* Save the syscall list from the execing process. */
94585166 658 proc = get_thread_process (event_thr);
f27866ba 659 syscalls_to_catch = std::move (proc->syscalls_to_catch);
82075af2
JS
660
661 /* Delete the execing process and all its threads. */
d16f3f6c 662 mourn (proc);
24583e45 663 switch_to_thread (nullptr);
94585166
DB
664
665 /* Create a new process/lwp/thread. */
fd000fb3 666 proc = add_linux_process (event_pid, 0);
94585166
DB
667 event_lwp = add_lwp (event_ptid);
668 event_thr = get_lwp_thread (event_lwp);
669 gdb_assert (current_thread == event_thr);
797bcff5 670 arch_setup_thread (event_thr);
94585166
DB
671
672 /* Set the event status. */
183be222
SM
673 event_lwp->waitstatus.set_execd
674 (make_unique_xstrdup
675 (linux_proc_pid_to_exec_file (lwpid_of (event_thr))));
94585166
DB
676
677 /* Mark the exec status as pending. */
678 event_lwp->stopped = 1;
679 event_lwp->status_pending_p = 1;
680 event_lwp->status_pending = wstat;
681 event_thr->last_resume_kind = resume_continue;
183be222 682 event_thr->last_status.set_ignore ();
94585166 683
82075af2
JS
684 /* Update syscall state in the new lwp, effectively mid-syscall too. */
685 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
686
687 /* Restore the list to catch. Don't rely on the client, which is free
688 to avoid sending a new list when the architecture doesn't change.
689 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
f27866ba 690 proc->syscalls_to_catch = std::move (syscalls_to_catch);
82075af2 691
94585166
DB
692 /* Report the event. */
693 *orig_event_lwp = event_lwp;
694 return 0;
695 }
de0d863e
DB
696
697 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
24a09b5f
DJ
698}
699
df95181f
TBA
700CORE_ADDR
701linux_process_target::get_pc (lwp_info *lwp)
d50171e4 702{
d50171e4
PA
703 struct regcache *regcache;
704 CORE_ADDR pc;
705
bf9ae9d8 706 if (!low_supports_breakpoints ())
d50171e4
PA
707 return 0;
708
24583e45
TBA
709 scoped_restore_current_thread restore_thread;
710 switch_to_thread (get_lwp_thread (lwp));
d50171e4 711
0bfdf32f 712 regcache = get_thread_regcache (current_thread, 1);
bf9ae9d8 713 pc = low_get_pc (regcache);
d50171e4 714
c058728c 715 threads_debug_printf ("pc is 0x%lx", (long) pc);
d50171e4 716
d50171e4
PA
717 return pc;
718}
719
9eedd27d
TBA
720void
721linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
82075af2 722{
82075af2
JS
723 struct regcache *regcache;
724
24583e45
TBA
725 scoped_restore_current_thread restore_thread;
726 switch_to_thread (get_lwp_thread (lwp));
82075af2
JS
727
728 regcache = get_thread_regcache (current_thread, 1);
9eedd27d 729 low_get_syscall_trapinfo (regcache, sysno);
82075af2 730
c058728c 731 threads_debug_printf ("get_syscall_trapinfo sysno %d", *sysno);
82075af2
JS
732}
733
9eedd27d
TBA
734void
735linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
736{
737 /* By default, report an unknown system call number. */
738 *sysno = UNKNOWN_SYSCALL;
739}
740
df95181f
TBA
741bool
742linux_process_target::save_stop_reason (lwp_info *lwp)
0d62e5e8 743{
582511be
PA
744 CORE_ADDR pc;
745 CORE_ADDR sw_breakpoint_pc;
3e572f71
PA
746#if USE_SIGTRAP_SIGINFO
747 siginfo_t siginfo;
748#endif
d50171e4 749
bf9ae9d8 750 if (!low_supports_breakpoints ())
df95181f 751 return false;
0d62e5e8 752
582511be 753 pc = get_pc (lwp);
d4807ea2 754 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
d50171e4 755
582511be 756 /* breakpoint_at reads from the current thread. */
24583e45
TBA
757 scoped_restore_current_thread restore_thread;
758 switch_to_thread (get_lwp_thread (lwp));
47c0c975 759
3e572f71
PA
760#if USE_SIGTRAP_SIGINFO
761 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
762 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
763 {
764 if (siginfo.si_signo == SIGTRAP)
765 {
e7ad2f14
PA
766 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
767 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 768 {
e7ad2f14
PA
769 /* The si_code is ambiguous on this arch -- check debug
770 registers. */
771 if (!check_stopped_by_watchpoint (lwp))
772 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
773 }
774 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
775 {
776 /* If we determine the LWP stopped for a SW breakpoint,
777 trust it. Particularly don't check watchpoint
778 registers, because at least on s390, we'd find
779 stopped-by-watchpoint as long as there's a watchpoint
780 set. */
3e572f71 781 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
3e572f71 782 }
e7ad2f14 783 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 784 {
e7ad2f14
PA
785 /* This can indicate either a hardware breakpoint or
786 hardware watchpoint. Check debug registers. */
787 if (!check_stopped_by_watchpoint (lwp))
788 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
3e572f71 789 }
2bf6fb9d
PA
790 else if (siginfo.si_code == TRAP_TRACE)
791 {
e7ad2f14
PA
792 /* We may have single stepped an instruction that
793 triggered a watchpoint. In that case, on some
794 architectures (such as x86), instead of TRAP_HWBKPT,
795 si_code indicates TRAP_TRACE, and we need to check
796 the debug registers separately. */
797 if (!check_stopped_by_watchpoint (lwp))
798 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
2bf6fb9d 799 }
3e572f71
PA
800 }
801 }
802#else
582511be
PA
803 /* We may have just stepped a breakpoint instruction. E.g., in
804 non-stop mode, GDB first tells the thread A to step a range, and
805 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
806 case we need to report the breakpoint PC. */
807 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
d7146cda 808 && low_breakpoint_at (sw_breakpoint_pc))
e7ad2f14
PA
809 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
810
811 if (hardware_breakpoint_inserted_here (pc))
812 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
813
814 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
815 check_stopped_by_watchpoint (lwp);
816#endif
817
818 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
582511be 819 {
c058728c
SM
820 threads_debug_printf
821 ("%s stopped by software breakpoint",
822 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
582511be
PA
823
824 /* Back up the PC if necessary. */
825 if (pc != sw_breakpoint_pc)
e7ad2f14 826 {
582511be
PA
827 struct regcache *regcache
828 = get_thread_regcache (current_thread, 1);
bf9ae9d8 829 low_set_pc (regcache, sw_breakpoint_pc);
582511be
PA
830 }
831
e7ad2f14
PA
832 /* Update this so we record the correct stop PC below. */
833 pc = sw_breakpoint_pc;
582511be 834 }
e7ad2f14 835 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
c058728c
SM
836 threads_debug_printf
837 ("%s stopped by hardware breakpoint",
838 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14 839 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
c058728c
SM
840 threads_debug_printf
841 ("%s stopped by hardware watchpoint",
842 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14 843 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
c058728c
SM
844 threads_debug_printf
845 ("%s stopped by trace",
846 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14
PA
847
848 lwp->stop_pc = pc;
df95181f 849 return true;
0d62e5e8 850}
ce3a066d 851
fd000fb3
TBA
852lwp_info *
853linux_process_target::add_lwp (ptid_t ptid)
611cb4a5 854{
c360a473 855 lwp_info *lwp = new lwp_info;
0d62e5e8 856
754e3168
AH
857 lwp->thread = add_thread (ptid, lwp);
858
fd000fb3 859 low_new_thread (lwp);
aa5ca48f 860
54a0b537 861 return lwp;
0d62e5e8 862}
611cb4a5 863
fd000fb3
TBA
864void
865linux_process_target::low_new_thread (lwp_info *info)
866{
867 /* Nop. */
868}
869
2090129c
SDJ
870/* Callback to be used when calling fork_inferior, responsible for
871 actually initiating the tracing of the inferior. */
872
873static void
874linux_ptrace_fun ()
875{
876 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
877 (PTRACE_TYPE_ARG4) 0) < 0)
50fa3001 878 trace_start_error_with_name ("ptrace");
2090129c
SDJ
879
880 if (setpgid (0, 0) < 0)
881 trace_start_error_with_name ("setpgid");
882
883 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
884 stdout to stderr so that inferior i/o doesn't corrupt the connection.
885 Also, redirect stdin to /dev/null. */
886 if (remote_connection_is_stdio ())
887 {
888 if (close (0) < 0)
889 trace_start_error_with_name ("close");
890 if (open ("/dev/null", O_RDONLY) < 0)
891 trace_start_error_with_name ("open");
892 if (dup2 (2, 1) < 0)
893 trace_start_error_with_name ("dup2");
894 if (write (2, "stdin/stdout redirected\n",
895 sizeof ("stdin/stdout redirected\n") - 1) < 0)
896 {
897 /* Errors ignored. */;
898 }
899 }
900}
901
da6d8c04 902/* Start an inferior process and returns its pid.
2090129c
SDJ
903 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
904 are its arguments. */
da6d8c04 905
15295543
TBA
906int
907linux_process_target::create_inferior (const char *program,
908 const std::vector<char *> &program_args)
da6d8c04 909{
c12a5089 910 client_state &cs = get_client_state ();
a6dbe5df 911 struct lwp_info *new_lwp;
da6d8c04 912 int pid;
95954743 913 ptid_t ptid;
03583c20 914
41272101
TT
915 {
916 maybe_disable_address_space_randomization restore_personality
c12a5089 917 (cs.disable_randomization);
bea571eb 918 std::string str_program_args = construct_inferior_arguments (program_args);
41272101
TT
919
920 pid = fork_inferior (program,
921 str_program_args.c_str (),
922 get_environ ()->envp (), linux_ptrace_fun,
923 NULL, NULL, NULL, NULL);
924 }
03583c20 925
fd000fb3 926 add_linux_process (pid, 0);
95954743 927
184ea2f7 928 ptid = ptid_t (pid, pid);
95954743 929 new_lwp = add_lwp (ptid);
a6dbe5df 930 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 931
2090129c
SDJ
932 post_fork_inferior (pid, program);
933
a9fa9f7d 934 return pid;
da6d8c04
DJ
935}
936
ece66d65
JS
937/* Implement the post_create_inferior target_ops method. */
938
6dee9afb
TBA
939void
940linux_process_target::post_create_inferior ()
ece66d65
JS
941{
942 struct lwp_info *lwp = get_thread_lwp (current_thread);
943
797bcff5 944 low_arch_setup ();
ece66d65
JS
945
946 if (lwp->must_set_ptrace_flags)
947 {
948 struct process_info *proc = current_process ();
949 int options = linux_low_ptrace_options (proc->attached);
950
951 linux_enable_event_reporting (lwpid_of (current_thread), options);
952 lwp->must_set_ptrace_flags = 0;
953 }
954}
955
7ae1a6a6 956int
fd000fb3 957linux_process_target::attach_lwp (ptid_t ptid)
da6d8c04 958{
54a0b537 959 struct lwp_info *new_lwp;
e38504b3 960 int lwpid = ptid.lwp ();
611cb4a5 961
b8e1b30e 962 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 963 != 0)
7ae1a6a6 964 return errno;
24a09b5f 965
b3312d80 966 new_lwp = add_lwp (ptid);
0d62e5e8 967
a6dbe5df
PA
968 /* We need to wait for SIGSTOP before being able to make the next
969 ptrace call on this LWP. */
970 new_lwp->must_set_ptrace_flags = 1;
971
644cebc9 972 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2 973 {
c058728c 974 threads_debug_printf ("Attached to a stopped process");
c14d7ab2
PA
975
976 /* The process is definitely stopped. It is in a job control
977 stop, unless the kernel predates the TASK_STOPPED /
978 TASK_TRACED distinction, in which case it might be in a
979 ptrace stop. Make sure it is in a ptrace stop; from there we
980 can kill it, signal it, et cetera.
981
982 First make sure there is a pending SIGSTOP. Since we are
983 already attached, the process can not transition from stopped
984 to running without a PTRACE_CONT; so we know this signal will
985 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
986 probably already in the queue (unless this kernel is old
987 enough to use TASK_STOPPED for ptrace stops); but since
988 SIGSTOP is not an RT signal, it can only be queued once. */
989 kill_lwp (lwpid, SIGSTOP);
990
991 /* Finally, resume the stopped process. This will deliver the
992 SIGSTOP (or a higher priority signal, just like normal
993 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 994 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
995 }
996
0d62e5e8 997 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
998 brings it to a halt.
999
1000 There are several cases to consider here:
1001
1002 1) gdbserver has already attached to the process and is being notified
1b3f6016 1003 of a new thread that is being created.
d50171e4
PA
1004 In this case we should ignore that SIGSTOP and resume the
1005 process. This is handled below by setting stop_expected = 1,
8336d594 1006 and the fact that add_thread sets last_resume_kind ==
d50171e4 1007 resume_continue.
0e21c1ec
DE
1008
1009 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
1010 to it via attach_inferior.
1011 In this case we want the process thread to stop.
d50171e4
PA
1012 This is handled by having linux_attach set last_resume_kind ==
1013 resume_stop after we return.
e3deef73
LM
1014
1015 If the pid we are attaching to is also the tgid, we attach to and
1016 stop all the existing threads. Otherwise, we attach to pid and
1017 ignore any other threads in the same group as this pid.
0e21c1ec
DE
1018
1019 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
1020 existing threads.
1021 In this case we want the thread to stop.
1022 FIXME: This case is currently not properly handled.
1023 We should wait for the SIGSTOP but don't. Things work apparently
1024 because enough time passes between when we ptrace (ATTACH) and when
1025 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
1026
1027 On the other hand, if we are currently trying to stop all threads, we
1028 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 1029 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
1030 end of the list, and so the new thread has not yet reached
1031 wait_for_sigstop (but will). */
d50171e4 1032 new_lwp->stop_expected = 1;
0d62e5e8 1033
7ae1a6a6 1034 return 0;
95954743
PA
1035}
1036
8784d563
PA
1037/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1038 already attached. Returns true if a new LWP is found, false
1039 otherwise. */
1040
1041static int
1042attach_proc_task_lwp_callback (ptid_t ptid)
1043{
1044 /* Is this a new thread? */
1045 if (find_thread_ptid (ptid) == NULL)
1046 {
e38504b3 1047 int lwpid = ptid.lwp ();
8784d563
PA
1048 int err;
1049
c058728c 1050 threads_debug_printf ("Found new lwp %d", lwpid);
8784d563 1051
fd000fb3 1052 err = the_linux_target->attach_lwp (ptid);
8784d563
PA
1053
1054 /* Be quiet if we simply raced with the thread exiting. EPERM
1055 is returned if the thread's task still exists, and is marked
1056 as exited or zombie, as well as other conditions, so in that
1057 case, confirm the status in /proc/PID/status. */
1058 if (err == ESRCH
1059 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
c058728c
SM
1060 threads_debug_printf
1061 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1062 lwpid, err, safe_strerror (err));
8784d563
PA
1063 else if (err != 0)
1064 {
4d9b86e1 1065 std::string reason
50fa3001 1066 = linux_ptrace_attach_fail_reason_string (ptid, err);
4d9b86e1
SM
1067
1068 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
8784d563
PA
1069 }
1070
1071 return 1;
1072 }
1073 return 0;
1074}
1075
500c1d85
PA
1076static void async_file_mark (void);
1077
e3deef73
LM
1078/* Attach to PID. If PID is the tgid, attach to it and all
1079 of its threads. */
1080
ef03dad8
TBA
1081int
1082linux_process_target::attach (unsigned long pid)
0d62e5e8 1083{
500c1d85
PA
1084 struct process_info *proc;
1085 struct thread_info *initial_thread;
184ea2f7 1086 ptid_t ptid = ptid_t (pid, pid);
7ae1a6a6
PA
1087 int err;
1088
fd000fb3 1089 proc = add_linux_process (pid, 1);
df0da8a2 1090
e3deef73
LM
1091 /* Attach to PID. We will check for other threads
1092 soon. */
fd000fb3 1093 err = attach_lwp (ptid);
7ae1a6a6 1094 if (err != 0)
4d9b86e1 1095 {
df0da8a2 1096 remove_process (proc);
4d9b86e1 1097
50fa3001
SDJ
1098 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1099 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
4d9b86e1 1100 }
7ae1a6a6 1101
500c1d85
PA
1102 /* Don't ignore the initial SIGSTOP if we just attached to this
1103 process. It will be collected by wait shortly. */
184ea2f7 1104 initial_thread = find_thread_ptid (ptid_t (pid, pid));
500c1d85 1105 initial_thread->last_resume_kind = resume_stop;
0d62e5e8 1106
8784d563
PA
1107 /* We must attach to every LWP. If /proc is mounted, use that to
1108 find them now. On the one hand, the inferior may be using raw
1109 clone instead of using pthreads. On the other hand, even if it
1110 is using pthreads, GDB may not be connected yet (thread_db needs
1111 to do symbol lookups, through qSymbol). Also, thread_db walks
1112 structures in the inferior's address space to find the list of
1113 threads/LWPs, and those structures may well be corrupted. Note
1114 that once thread_db is loaded, we'll still use it to list threads
1115 and associate pthread info with each LWP. */
1116 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
500c1d85
PA
1117
1118 /* GDB will shortly read the xml target description for this
1119 process, to figure out the process' architecture. But the target
1120 description is only filled in when the first process/thread in
1121 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1122 that now, otherwise, if GDB is fast enough, it could read the
1123 target description _before_ that initial stop. */
1124 if (non_stop)
1125 {
1126 struct lwp_info *lwp;
1127 int wstat, lwpid;
f2907e49 1128 ptid_t pid_ptid = ptid_t (pid);
500c1d85 1129
d16f3f6c 1130 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
500c1d85
PA
1131 gdb_assert (lwpid > 0);
1132
f2907e49 1133 lwp = find_lwp_pid (ptid_t (lwpid));
500c1d85
PA
1134
1135 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1136 {
1137 lwp->status_pending_p = 1;
1138 lwp->status_pending = wstat;
1139 }
1140
1141 initial_thread->last_resume_kind = resume_continue;
1142
1143 async_file_mark ();
1144
1145 gdb_assert (proc->tdesc != NULL);
1146 }
1147
95954743
PA
1148 return 0;
1149}
1150
95954743 1151static int
e4eb0dec 1152last_thread_of_process_p (int pid)
95954743 1153{
e4eb0dec 1154 bool seen_one = false;
95954743 1155
da4ae14a 1156 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
95954743 1157 {
e4eb0dec
SM
1158 if (!seen_one)
1159 {
1160 /* This is the first thread of this process we see. */
1161 seen_one = true;
1162 return false;
1163 }
1164 else
1165 {
1166 /* This is the second thread of this process we see. */
1167 return true;
1168 }
1169 });
da6d8c04 1170
e4eb0dec 1171 return thread == NULL;
95954743
PA
1172}
1173
da84f473
PA
1174/* Kill LWP. */
1175
1176static void
1177linux_kill_one_lwp (struct lwp_info *lwp)
1178{
d86d4aaf
DE
1179 struct thread_info *thr = get_lwp_thread (lwp);
1180 int pid = lwpid_of (thr);
da84f473
PA
1181
1182 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1183 there is no signal context, and ptrace(PTRACE_KILL) (or
1184 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1185 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1186 alternative is to kill with SIGKILL. We only need one SIGKILL
1187 per process, not one for each thread. But since we still support
4a6ed09b
PA
1188 support debugging programs using raw clone without CLONE_THREAD,
1189 we send one for each thread. For years, we used PTRACE_KILL
1190 only, so we're being a bit paranoid about some old kernels where
1191 PTRACE_KILL might work better (dubious if there are any such, but
1192 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1193 second, and so we're fine everywhere. */
da84f473
PA
1194
1195 errno = 0;
69ff6be5 1196 kill_lwp (pid, SIGKILL);
da84f473 1197 if (debug_threads)
ce9e3fe7
PA
1198 {
1199 int save_errno = errno;
1200
c058728c
SM
1201 threads_debug_printf ("kill_lwp (SIGKILL) %s, 0, 0 (%s)",
1202 target_pid_to_str (ptid_of (thr)).c_str (),
1203 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1204 }
da84f473
PA
1205
1206 errno = 0;
b8e1b30e 1207 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 1208 if (debug_threads)
ce9e3fe7
PA
1209 {
1210 int save_errno = errno;
1211
c058728c
SM
1212 threads_debug_printf ("PTRACE_KILL %s, 0, 0 (%s)",
1213 target_pid_to_str (ptid_of (thr)).c_str (),
1214 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1215 }
da84f473
PA
1216}
1217
e76126e8
PA
1218/* Kill LWP and wait for it to die. */
1219
1220static void
1221kill_wait_lwp (struct lwp_info *lwp)
1222{
1223 struct thread_info *thr = get_lwp_thread (lwp);
e99b03dc 1224 int pid = ptid_of (thr).pid ();
e38504b3 1225 int lwpid = ptid_of (thr).lwp ();
e76126e8
PA
1226 int wstat;
1227 int res;
1228
c058728c 1229 threads_debug_printf ("killing lwp %d, for pid: %d", lwpid, pid);
e76126e8
PA
1230
1231 do
1232 {
1233 linux_kill_one_lwp (lwp);
1234
1235 /* Make sure it died. Notes:
1236
1237 - The loop is most likely unnecessary.
1238
d16f3f6c 1239 - We don't use wait_for_event as that could delete lwps
e76126e8
PA
1240 while we're iterating over them. We're not interested in
1241 any pending status at this point, only in making sure all
1242 wait status on the kernel side are collected until the
1243 process is reaped.
1244
1245 - We don't use __WALL here as the __WALL emulation relies on
1246 SIGCHLD, and killing a stopped process doesn't generate
1247 one, nor an exit status.
1248 */
1249 res = my_waitpid (lwpid, &wstat, 0);
1250 if (res == -1 && errno == ECHILD)
1251 res = my_waitpid (lwpid, &wstat, __WCLONE);
1252 } while (res > 0 && WIFSTOPPED (wstat));
1253
586b02a9
PA
1254 /* Even if it was stopped, the child may have already disappeared.
1255 E.g., if it was killed by SIGKILL. */
1256 if (res < 0 && errno != ECHILD)
1257 perror_with_name ("kill_wait_lwp");
e76126e8
PA
1258}
1259
578290ec 1260/* Callback for `for_each_thread'. Kills an lwp of a given process,
da84f473 1261 except the leader. */
95954743 1262
578290ec
SM
1263static void
1264kill_one_lwp_callback (thread_info *thread, int pid)
da6d8c04 1265{
54a0b537 1266 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 1267
fd500816
DJ
1268 /* We avoid killing the first thread here, because of a Linux kernel (at
1269 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1270 the children get a chance to be reaped, it will remain a zombie
1271 forever. */
95954743 1272
d86d4aaf 1273 if (lwpid_of (thread) == pid)
95954743 1274 {
c058728c
SM
1275 threads_debug_printf ("is last of process %s",
1276 target_pid_to_str (thread->id).c_str ());
578290ec 1277 return;
95954743 1278 }
fd500816 1279
e76126e8 1280 kill_wait_lwp (lwp);
da6d8c04
DJ
1281}
1282
c6885a57
TBA
1283int
1284linux_process_target::kill (process_info *process)
0d62e5e8 1285{
a780ef4f 1286 int pid = process->pid;
9d606399 1287
f9e39928
PA
1288 /* If we're killing a running inferior, make sure it is stopped
1289 first, as PTRACE_KILL will not work otherwise. */
7984d532 1290 stop_all_lwps (0, NULL);
f9e39928 1291
578290ec
SM
1292 for_each_thread (pid, [&] (thread_info *thread)
1293 {
1294 kill_one_lwp_callback (thread, pid);
1295 });
fd500816 1296
54a0b537 1297 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1298 thread in the list, so do so now. */
a780ef4f 1299 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
bd99dc85 1300
784867a5 1301 if (lwp == NULL)
c058728c 1302 threads_debug_printf ("cannot find lwp for pid: %d", pid);
784867a5 1303 else
e76126e8 1304 kill_wait_lwp (lwp);
2d717e4f 1305
8adb37b9 1306 mourn (process);
f9e39928
PA
1307
1308 /* Since we presently can only stop all lwps of all processes, we
1309 need to unstop lwps of other processes. */
7984d532 1310 unstop_all_lwps (0, NULL);
95954743 1311 return 0;
0d62e5e8
DJ
1312}
1313
9b224c5e
PA
1314/* Get pending signal of THREAD, for detaching purposes. This is the
1315 signal the thread last stopped for, which we need to deliver to the
1316 thread when detaching, otherwise, it'd be suppressed/lost. */
1317
1318static int
1319get_detach_signal (struct thread_info *thread)
1320{
c12a5089 1321 client_state &cs = get_client_state ();
a493e3e2 1322 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1323 int status;
1324 struct lwp_info *lp = get_thread_lwp (thread);
1325
1326 if (lp->status_pending_p)
1327 status = lp->status_pending;
1328 else
1329 {
1330 /* If the thread had been suspended by gdbserver, and it stopped
1331 cleanly, then it'll have stopped with SIGSTOP. But we don't
1332 want to deliver that SIGSTOP. */
183be222
SM
1333 if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1334 || thread->last_status.sig () == GDB_SIGNAL_0)
9b224c5e
PA
1335 return 0;
1336
1337 /* Otherwise, we may need to deliver the signal we
1338 intercepted. */
1339 status = lp->last_status;
1340 }
1341
1342 if (!WIFSTOPPED (status))
1343 {
c058728c
SM
1344 threads_debug_printf ("lwp %s hasn't stopped: no pending signal",
1345 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e
PA
1346 return 0;
1347 }
1348
1349 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1350 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e 1351 {
c058728c
SM
1352 threads_debug_printf ("lwp %s had stopped with extended "
1353 "status: no pending signal",
1354 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e
PA
1355 return 0;
1356 }
1357
2ea28649 1358 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e 1359
c12a5089 1360 if (cs.program_signals_p && !cs.program_signals[signo])
9b224c5e 1361 {
c058728c
SM
1362 threads_debug_printf ("lwp %s had signal %s, but it is in nopass state",
1363 target_pid_to_str (ptid_of (thread)).c_str (),
1364 gdb_signal_to_string (signo));
9b224c5e
PA
1365 return 0;
1366 }
c12a5089 1367 else if (!cs.program_signals_p
9b224c5e
PA
1368 /* If we have no way to know which signals GDB does not
1369 want to have passed to the program, assume
1370 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1371 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e 1372 {
c058728c
SM
1373 threads_debug_printf ("lwp %s had signal %s, "
1374 "but we don't know if we should pass it. "
1375 "Default to not.",
1376 target_pid_to_str (ptid_of (thread)).c_str (),
1377 gdb_signal_to_string (signo));
9b224c5e
PA
1378 return 0;
1379 }
1380 else
1381 {
c058728c
SM
1382 threads_debug_printf ("lwp %s has pending signal %s: delivering it",
1383 target_pid_to_str (ptid_of (thread)).c_str (),
1384 gdb_signal_to_string (signo));
9b224c5e
PA
1385
1386 return WSTOPSIG (status);
1387 }
1388}
1389
fd000fb3
TBA
1390void
1391linux_process_target::detach_one_lwp (lwp_info *lwp)
6ad8ae5c 1392{
ced2dffb 1393 struct thread_info *thread = get_lwp_thread (lwp);
9b224c5e 1394 int sig;
ced2dffb 1395 int lwpid;
6ad8ae5c 1396
9b224c5e 1397 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1398 if (lwp->stop_expected)
ae13219e 1399 {
c058728c
SM
1400 threads_debug_printf ("Sending SIGCONT to %s",
1401 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e 1402
d86d4aaf 1403 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1404 lwp->stop_expected = 0;
ae13219e
DJ
1405 }
1406
9b224c5e
PA
1407 /* Pass on any pending signal for this thread. */
1408 sig = get_detach_signal (thread);
1409
ced2dffb
PA
1410 /* Preparing to resume may try to write registers, and fail if the
1411 lwp is zombie. If that happens, ignore the error. We'll handle
1412 it below, when detach fails with ESRCH. */
a70b8144 1413 try
ced2dffb
PA
1414 {
1415 /* Flush any pending changes to the process's registers. */
1416 regcache_invalidate_thread (thread);
1417
1418 /* Finally, let it resume. */
d7599cc0 1419 low_prepare_to_resume (lwp);
ced2dffb 1420 }
230d2906 1421 catch (const gdb_exception_error &ex)
ced2dffb
PA
1422 {
1423 if (!check_ptrace_stopped_lwp_gone (lwp))
eedc3f4f 1424 throw;
ced2dffb 1425 }
ced2dffb
PA
1426
1427 lwpid = lwpid_of (thread);
1428 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1429 (PTRACE_TYPE_ARG4) (long) sig) < 0)
ced2dffb
PA
1430 {
1431 int save_errno = errno;
1432
1433 /* We know the thread exists, so ESRCH must mean the lwp is
1434 zombie. This can happen if one of the already-detached
1435 threads exits the whole thread group. In that case we're
1436 still attached, and must reap the lwp. */
1437 if (save_errno == ESRCH)
1438 {
1439 int ret, status;
1440
1441 ret = my_waitpid (lwpid, &status, __WALL);
1442 if (ret == -1)
1443 {
1444 warning (_("Couldn't reap LWP %d while detaching: %s"),
6d91ce9a 1445 lwpid, safe_strerror (errno));
ced2dffb
PA
1446 }
1447 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1448 {
1449 warning (_("Reaping LWP %d while detaching "
1450 "returned unexpected status 0x%x"),
1451 lwpid, status);
1452 }
1453 }
1454 else
1455 {
1456 error (_("Can't detach %s: %s"),
61d7f128 1457 target_pid_to_str (ptid_of (thread)).c_str (),
6d91ce9a 1458 safe_strerror (save_errno));
ced2dffb
PA
1459 }
1460 }
c058728c
SM
1461 else
1462 threads_debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)",
1463 target_pid_to_str (ptid_of (thread)).c_str (),
1464 strsignal (sig));
bd99dc85
PA
1465
1466 delete_lwp (lwp);
ced2dffb
PA
1467}
1468
9061c9cf
TBA
1469int
1470linux_process_target::detach (process_info *process)
95954743 1471{
ced2dffb 1472 struct lwp_info *main_lwp;
95954743 1473
863d01bd
PA
1474 /* As there's a step over already in progress, let it finish first,
1475 otherwise nesting a stabilize_threads operation on top gets real
1476 messy. */
1477 complete_ongoing_step_over ();
1478
f9e39928 1479 /* Stop all threads before detaching. First, ptrace requires that
30baf67b 1480 the thread is stopped to successfully detach. Second, thread_db
f9e39928
PA
1481 may need to uninstall thread event breakpoints from memory, which
1482 only works with a stopped process anyway. */
7984d532 1483 stop_all_lwps (0, NULL);
f9e39928 1484
ca5c370d 1485#ifdef USE_THREAD_DB
8336d594 1486 thread_db_detach (process);
ca5c370d
PA
1487#endif
1488
fa593d66 1489 /* Stabilize threads (move out of jump pads). */
5c9eb2f2 1490 target_stabilize_threads ();
fa593d66 1491
ced2dffb
PA
1492 /* Detach from the clone lwps first. If the thread group exits just
1493 while we're detaching, we must reap the clone lwps before we're
1494 able to reap the leader. */
fd000fb3
TBA
1495 for_each_thread (process->pid, [this] (thread_info *thread)
1496 {
1497 /* We don't actually detach from the thread group leader just yet.
1498 If the thread group exits, we must reap the zombie clone lwps
1499 before we're able to reap the leader. */
1500 if (thread->id.pid () == thread->id.lwp ())
1501 return;
1502
1503 lwp_info *lwp = get_thread_lwp (thread);
1504 detach_one_lwp (lwp);
1505 });
ced2dffb 1506
ef2ddb33 1507 main_lwp = find_lwp_pid (ptid_t (process->pid));
fd000fb3 1508 detach_one_lwp (main_lwp);
8336d594 1509
8adb37b9 1510 mourn (process);
f9e39928
PA
1511
1512 /* Since we presently can only stop all lwps of all processes, we
1513 need to unstop lwps of other processes. */
7984d532 1514 unstop_all_lwps (0, NULL);
f9e39928
PA
1515 return 0;
1516}
1517
1518/* Remove all LWPs that belong to process PROC from the lwp list. */
1519
8adb37b9
TBA
1520void
1521linux_process_target::mourn (process_info *process)
8336d594
PA
1522{
1523 struct process_info_private *priv;
1524
1525#ifdef USE_THREAD_DB
1526 thread_db_mourn (process);
1527#endif
1528
fd000fb3 1529 for_each_thread (process->pid, [this] (thread_info *thread)
6b2a85da
SM
1530 {
1531 delete_lwp (get_thread_lwp (thread));
1532 });
f9e39928 1533
8336d594 1534 /* Freeing all private data. */
fe978cb0 1535 priv = process->priv;
fd000fb3 1536 low_delete_process (priv->arch_private);
8336d594 1537 free (priv);
fe978cb0 1538 process->priv = NULL;
505106cd
PA
1539
1540 remove_process (process);
8336d594
PA
1541}
1542
95a49a39
TBA
1543void
1544linux_process_target::join (int pid)
444d6139 1545{
444d6139
PA
1546 int status, ret;
1547
1548 do {
d105de22 1549 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1550 if (WIFEXITED (status) || WIFSIGNALED (status))
1551 break;
1552 } while (ret != -1 || errno != ECHILD);
1553}
1554
13d3d99b
TBA
1555/* Return true if the given thread is still alive. */
1556
1557bool
1558linux_process_target::thread_alive (ptid_t ptid)
0d62e5e8 1559{
95954743
PA
1560 struct lwp_info *lwp = find_lwp_pid (ptid);
1561
1562 /* We assume we always know if a thread exits. If a whole process
1563 exited but we still haven't been able to report it to GDB, we'll
1564 hold on to the last lwp of the dead process. */
1565 if (lwp != NULL)
00db26fa 1566 return !lwp_is_marked_dead (lwp);
0d62e5e8
DJ
1567 else
1568 return 0;
1569}
1570
df95181f
TBA
1571bool
1572linux_process_target::thread_still_has_status_pending (thread_info *thread)
582511be
PA
1573{
1574 struct lwp_info *lp = get_thread_lwp (thread);
1575
1576 if (!lp->status_pending_p)
1577 return 0;
1578
582511be 1579 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1580 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1581 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be 1582 {
582511be
PA
1583 CORE_ADDR pc;
1584 int discard = 0;
1585
1586 gdb_assert (lp->last_status != 0);
1587
1588 pc = get_pc (lp);
1589
24583e45
TBA
1590 scoped_restore_current_thread restore_thread;
1591 switch_to_thread (thread);
582511be
PA
1592
1593 if (pc != lp->stop_pc)
1594 {
c058728c
SM
1595 threads_debug_printf ("PC of %ld changed",
1596 lwpid_of (thread));
582511be
PA
1597 discard = 1;
1598 }
3e572f71
PA
1599
1600#if !USE_SIGTRAP_SIGINFO
15c66dd6 1601 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
d7146cda 1602 && !low_breakpoint_at (pc))
582511be 1603 {
c058728c
SM
1604 threads_debug_printf ("previous SW breakpoint of %ld gone",
1605 lwpid_of (thread));
582511be
PA
1606 discard = 1;
1607 }
15c66dd6 1608 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1609 && !hardware_breakpoint_inserted_here (pc))
1610 {
c058728c
SM
1611 threads_debug_printf ("previous HW breakpoint of %ld gone",
1612 lwpid_of (thread));
582511be
PA
1613 discard = 1;
1614 }
3e572f71 1615#endif
582511be 1616
582511be
PA
1617 if (discard)
1618 {
c058728c 1619 threads_debug_printf ("discarding pending breakpoint status");
582511be
PA
1620 lp->status_pending_p = 0;
1621 return 0;
1622 }
1623 }
1624
1625 return 1;
1626}
1627
a681f9c9
PA
1628/* Returns true if LWP is resumed from the client's perspective. */
1629
1630static int
1631lwp_resumed (struct lwp_info *lwp)
1632{
1633 struct thread_info *thread = get_lwp_thread (lwp);
1634
1635 if (thread->last_resume_kind != resume_stop)
1636 return 1;
1637
1638 /* Did gdb send us a `vCont;t', but we haven't reported the
1639 corresponding stop to gdb yet? If so, the thread is still
1640 resumed/running from gdb's perspective. */
1641 if (thread->last_resume_kind == resume_stop
183be222 1642 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
a681f9c9
PA
1643 return 1;
1644
1645 return 0;
1646}
1647
df95181f
TBA
1648bool
1649linux_process_target::status_pending_p_callback (thread_info *thread,
1650 ptid_t ptid)
0d62e5e8 1651{
582511be 1652 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1653
1654 /* Check if we're only interested in events from a specific process
afa8d396 1655 or a specific LWP. */
83e1b6c1 1656 if (!thread->id.matches (ptid))
95954743 1657 return 0;
0d62e5e8 1658
a681f9c9
PA
1659 if (!lwp_resumed (lp))
1660 return 0;
1661
582511be 1662 if (lp->status_pending_p
df95181f 1663 && !thread_still_has_status_pending (thread))
582511be 1664 {
df95181f 1665 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
582511be
PA
1666 return 0;
1667 }
0d62e5e8 1668
582511be 1669 return lp->status_pending_p;
0d62e5e8
DJ
1670}
1671
95954743
PA
1672struct lwp_info *
1673find_lwp_pid (ptid_t ptid)
1674{
da4ae14a 1675 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
454296a2
SM
1676 {
1677 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
da4ae14a 1678 return thr_arg->id.lwp () == lwp;
454296a2 1679 });
d86d4aaf
DE
1680
1681 if (thread == NULL)
1682 return NULL;
1683
9c80ecd6 1684 return get_thread_lwp (thread);
95954743
PA
1685}
1686
fa96cb38 1687/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1688
fa96cb38
PA
1689static int
1690num_lwps (int pid)
1691{
fa96cb38 1692 int count = 0;
0d62e5e8 1693
4d3bb80e
SM
1694 for_each_thread (pid, [&] (thread_info *thread)
1695 {
9c80ecd6 1696 count++;
4d3bb80e 1697 });
3aee8918 1698
fa96cb38
PA
1699 return count;
1700}
d61ddec4 1701
6d4ee8c6
GB
1702/* See nat/linux-nat.h. */
1703
1704struct lwp_info *
1705iterate_over_lwps (ptid_t filter,
d3a70e03 1706 gdb::function_view<iterate_over_lwps_ftype> callback)
6d4ee8c6 1707{
da4ae14a 1708 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
6d1e5673 1709 {
da4ae14a 1710 lwp_info *lwp = get_thread_lwp (thr_arg);
6d1e5673 1711
d3a70e03 1712 return callback (lwp);
6d1e5673 1713 });
6d4ee8c6 1714
9c80ecd6 1715 if (thread == NULL)
6d4ee8c6
GB
1716 return NULL;
1717
9c80ecd6 1718 return get_thread_lwp (thread);
6d4ee8c6
GB
1719}
1720
fd000fb3
TBA
1721void
1722linux_process_target::check_zombie_leaders ()
fa96cb38 1723{
fd000fb3 1724 for_each_process ([this] (process_info *proc) {
9179355e
SM
1725 pid_t leader_pid = pid_of (proc);
1726 struct lwp_info *leader_lp;
1727
f2907e49 1728 leader_lp = find_lwp_pid (ptid_t (leader_pid));
9179355e 1729
c058728c
SM
1730 threads_debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1731 "num_lwps=%d, zombie=%d",
1732 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1733 linux_proc_pid_is_zombie (leader_pid));
9179355e
SM
1734
1735 if (leader_lp != NULL && !leader_lp->stopped
1736 /* Check if there are other threads in the group, as we may
1737 have raced with the inferior simply exiting. */
1738 && !last_thread_of_process_p (leader_pid)
1739 && linux_proc_pid_is_zombie (leader_pid))
1740 {
1741 /* A leader zombie can mean one of two things:
1742
1743 - It exited, and there's an exit status pending
1744 available, or only the leader exited (not the whole
1745 program). In the latter case, we can't waitpid the
1746 leader's exit status until all other threads are gone.
1747
1748 - There are 3 or more threads in the group, and a thread
1749 other than the leader exec'd. On an exec, the Linux
1750 kernel destroys all other threads (except the execing
1751 one) in the thread group, and resets the execing thread's
1752 tid to the tgid. No exit notification is sent for the
1753 execing thread -- from the ptracer's perspective, it
1754 appears as though the execing thread just vanishes.
1755 Until we reap all other threads except the leader and the
1756 execing thread, the leader will be zombie, and the
1757 execing thread will be in `D (disc sleep)'. As soon as
1758 all other threads are reaped, the execing thread changes
1759 it's tid to the tgid, and the previous (zombie) leader
1760 vanishes, giving place to the "new" leader. We could try
1761 distinguishing the exit and exec cases, by waiting once
1762 more, and seeing if something comes out, but it doesn't
1763 sound useful. The previous leader _does_ go away, and
1764 we'll re-add the new one once we see the exec event
1765 (which is just the same as what would happen if the
1766 previous leader did exit voluntarily before some other
1767 thread execs). */
1768
c058728c
SM
1769 threads_debug_printf ("Thread group leader %d zombie "
1770 "(it exited, or another thread execd).",
1771 leader_pid);
9179355e
SM
1772
1773 delete_lwp (leader_lp);
1774 }
1775 });
fa96cb38 1776}
c3adc08c 1777
a1385b7b
SM
1778/* Callback for `find_thread'. Returns the first LWP that is not
1779 stopped. */
d50171e4 1780
a1385b7b
SM
1781static bool
1782not_stopped_callback (thread_info *thread, ptid_t filter)
fa96cb38 1783{
a1385b7b
SM
1784 if (!thread->id.matches (filter))
1785 return false;
47c0c975 1786
a1385b7b 1787 lwp_info *lwp = get_thread_lwp (thread);
fa96cb38 1788
a1385b7b 1789 return !lwp->stopped;
0d62e5e8 1790}
611cb4a5 1791
863d01bd
PA
1792/* Increment LWP's suspend count. */
1793
1794static void
1795lwp_suspended_inc (struct lwp_info *lwp)
1796{
1797 lwp->suspended++;
1798
c058728c
SM
1799 if (lwp->suspended > 4)
1800 threads_debug_printf
1801 ("LWP %ld has a suspiciously high suspend count, suspended=%d",
1802 lwpid_of (get_lwp_thread (lwp)), lwp->suspended);
863d01bd
PA
1803}
1804
1805/* Decrement LWP's suspend count. */
1806
1807static void
1808lwp_suspended_decr (struct lwp_info *lwp)
1809{
1810 lwp->suspended--;
1811
1812 if (lwp->suspended < 0)
1813 {
1814 struct thread_info *thread = get_lwp_thread (lwp);
1815
1816 internal_error (__FILE__, __LINE__,
1817 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1818 lwp->suspended);
1819 }
1820}
1821
219f2f23
PA
1822/* This function should only be called if the LWP got a SIGTRAP.
1823
1824 Handle any tracepoint steps or hits. Return true if a tracepoint
1825 event was handled, 0 otherwise. */
1826
1827static int
1828handle_tracepoints (struct lwp_info *lwp)
1829{
1830 struct thread_info *tinfo = get_lwp_thread (lwp);
1831 int tpoint_related_event = 0;
1832
582511be
PA
1833 gdb_assert (lwp->suspended == 0);
1834
7984d532
PA
1835 /* If this tracepoint hit causes a tracing stop, we'll immediately
1836 uninsert tracepoints. To do this, we temporarily pause all
1837 threads, unpatch away, and then unpause threads. We need to make
1838 sure the unpausing doesn't resume LWP too. */
863d01bd 1839 lwp_suspended_inc (lwp);
7984d532 1840
219f2f23
PA
1841 /* And we need to be sure that any all-threads-stopping doesn't try
1842 to move threads out of the jump pads, as it could deadlock the
1843 inferior (LWP could be in the jump pad, maybe even holding the
1844 lock.) */
1845
1846 /* Do any necessary step collect actions. */
1847 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1848
fa593d66
PA
1849 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1850
219f2f23
PA
1851 /* See if we just hit a tracepoint and do its main collect
1852 actions. */
1853 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1854
863d01bd 1855 lwp_suspended_decr (lwp);
7984d532
PA
1856
1857 gdb_assert (lwp->suspended == 0);
229d26fc
SM
1858 gdb_assert (!stabilizing_threads
1859 || (lwp->collecting_fast_tracepoint
1860 != fast_tpoint_collect_result::not_collecting));
7984d532 1861
219f2f23
PA
1862 if (tpoint_related_event)
1863 {
c058728c 1864 threads_debug_printf ("got a tracepoint event");
219f2f23
PA
1865 return 1;
1866 }
1867
1868 return 0;
1869}
1870
13e567af
TBA
1871fast_tpoint_collect_result
1872linux_process_target::linux_fast_tracepoint_collecting
1873 (lwp_info *lwp, fast_tpoint_collect_status *status)
fa593d66
PA
1874{
1875 CORE_ADDR thread_area;
d86d4aaf 1876 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66 1877
fa593d66
PA
1878 /* Get the thread area address. This is used to recognize which
1879 thread is which when tracing with the in-process agent library.
1880 We don't read anything from the address, and treat it as opaque;
1881 it's the address itself that we assume is unique per-thread. */
13e567af 1882 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
229d26fc 1883 return fast_tpoint_collect_result::not_collecting;
fa593d66
PA
1884
1885 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1886}
1887
13e567af
TBA
1888int
1889linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1890{
1891 return -1;
1892}
1893
d16f3f6c
TBA
1894bool
1895linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
fa593d66 1896{
24583e45
TBA
1897 scoped_restore_current_thread restore_thread;
1898 switch_to_thread (get_lwp_thread (lwp));
fa593d66
PA
1899
1900 if ((wstat == NULL
1901 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1902 && supports_fast_tracepoints ()
58b4daa5 1903 && agent_loaded_p ())
fa593d66
PA
1904 {
1905 struct fast_tpoint_collect_status status;
fa593d66 1906
c058728c
SM
1907 threads_debug_printf
1908 ("Checking whether LWP %ld needs to move out of the jump pad.",
1909 lwpid_of (current_thread));
fa593d66 1910
229d26fc
SM
1911 fast_tpoint_collect_result r
1912 = linux_fast_tracepoint_collecting (lwp, &status);
fa593d66
PA
1913
1914 if (wstat == NULL
1915 || (WSTOPSIG (*wstat) != SIGILL
1916 && WSTOPSIG (*wstat) != SIGFPE
1917 && WSTOPSIG (*wstat) != SIGSEGV
1918 && WSTOPSIG (*wstat) != SIGBUS))
1919 {
1920 lwp->collecting_fast_tracepoint = r;
1921
229d26fc 1922 if (r != fast_tpoint_collect_result::not_collecting)
fa593d66 1923 {
229d26fc
SM
1924 if (r == fast_tpoint_collect_result::before_insn
1925 && lwp->exit_jump_pad_bkpt == NULL)
fa593d66
PA
1926 {
1927 /* Haven't executed the original instruction yet.
1928 Set breakpoint there, and wait till it's hit,
1929 then single-step until exiting the jump pad. */
1930 lwp->exit_jump_pad_bkpt
1931 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1932 }
1933
c058728c
SM
1934 threads_debug_printf
1935 ("Checking whether LWP %ld needs to move out of the jump pad..."
1936 " it does", lwpid_of (current_thread));
fa593d66 1937
d16f3f6c 1938 return true;
fa593d66
PA
1939 }
1940 }
1941 else
1942 {
1943 /* If we get a synchronous signal while collecting, *and*
1944 while executing the (relocated) original instruction,
1945 reset the PC to point at the tpoint address, before
1946 reporting to GDB. Otherwise, it's an IPA lib bug: just
1947 report the signal to GDB, and pray for the best. */
1948
229d26fc
SM
1949 lwp->collecting_fast_tracepoint
1950 = fast_tpoint_collect_result::not_collecting;
fa593d66 1951
229d26fc 1952 if (r != fast_tpoint_collect_result::not_collecting
fa593d66
PA
1953 && (status.adjusted_insn_addr <= lwp->stop_pc
1954 && lwp->stop_pc < status.adjusted_insn_addr_end))
1955 {
1956 siginfo_t info;
1957 struct regcache *regcache;
1958
1959 /* The si_addr on a few signals references the address
1960 of the faulting instruction. Adjust that as
1961 well. */
1962 if ((WSTOPSIG (*wstat) == SIGILL
1963 || WSTOPSIG (*wstat) == SIGFPE
1964 || WSTOPSIG (*wstat) == SIGBUS
1965 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 1966 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 1967 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
1968 /* Final check just to make sure we don't clobber
1969 the siginfo of non-kernel-sent signals. */
1970 && (uintptr_t) info.si_addr == lwp->stop_pc)
1971 {
1972 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 1973 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 1974 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
1975 }
1976
0bfdf32f 1977 regcache = get_thread_regcache (current_thread, 1);
bf9ae9d8 1978 low_set_pc (regcache, status.tpoint_addr);
fa593d66
PA
1979 lwp->stop_pc = status.tpoint_addr;
1980
1981 /* Cancel any fast tracepoint lock this thread was
1982 holding. */
1983 force_unlock_trace_buffer ();
1984 }
1985
1986 if (lwp->exit_jump_pad_bkpt != NULL)
1987 {
c058728c
SM
1988 threads_debug_printf
1989 ("Cancelling fast exit-jump-pad: removing bkpt."
1990 "stopping all threads momentarily.");
fa593d66
PA
1991
1992 stop_all_lwps (1, lwp);
fa593d66
PA
1993
1994 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1995 lwp->exit_jump_pad_bkpt = NULL;
1996
1997 unstop_all_lwps (1, lwp);
1998
1999 gdb_assert (lwp->suspended >= 0);
2000 }
2001 }
2002 }
2003
c058728c
SM
2004 threads_debug_printf
2005 ("Checking whether LWP %ld needs to move out of the jump pad... no",
2006 lwpid_of (current_thread));
0cccb683 2007
d16f3f6c 2008 return false;
fa593d66
PA
2009}
2010
2011/* Enqueue one signal in the "signals to report later when out of the
2012 jump pad" list. */
2013
2014static void
2015enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2016{
d86d4aaf 2017 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66 2018
c058728c
SM
2019 threads_debug_printf ("Deferring signal %d for LWP %ld.",
2020 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2021
2022 if (debug_threads)
2023 {
013e3554 2024 for (const auto &sig : lwp->pending_signals_to_report)
c058728c 2025 threads_debug_printf (" Already queued %d", sig.signal);
fa593d66 2026
c058728c 2027 threads_debug_printf (" (no more currently queued signals)");
fa593d66
PA
2028 }
2029
1a981360
PA
2030 /* Don't enqueue non-RT signals if they are already in the deferred
2031 queue. (SIGSTOP being the easiest signal to see ending up here
2032 twice) */
2033 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2034 {
013e3554 2035 for (const auto &sig : lwp->pending_signals_to_report)
1a981360 2036 {
013e3554 2037 if (sig.signal == WSTOPSIG (*wstat))
1a981360 2038 {
c058728c
SM
2039 threads_debug_printf
2040 ("Not requeuing already queued non-RT signal %d for LWP %ld",
2041 sig.signal, lwpid_of (thread));
1a981360
PA
2042 return;
2043 }
2044 }
2045 }
2046
013e3554 2047 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
8d749320 2048
d86d4aaf 2049 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554 2050 &lwp->pending_signals_to_report.back ().info);
fa593d66
PA
2051}
2052
2053/* Dequeue one signal from the "signals to report later when out of
2054 the jump pad" list. */
2055
2056static int
2057dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2058{
d86d4aaf
DE
2059 struct thread_info *thread = get_lwp_thread (lwp);
2060
013e3554 2061 if (!lwp->pending_signals_to_report.empty ())
fa593d66 2062 {
013e3554 2063 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
fa593d66 2064
013e3554
TBA
2065 *wstat = W_STOPCODE (p_sig.signal);
2066 if (p_sig.info.si_signo != 0)
d86d4aaf 2067 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554
TBA
2068 &p_sig.info);
2069
2070 lwp->pending_signals_to_report.pop_front ();
fa593d66 2071
c058728c
SM
2072 threads_debug_printf ("Reporting deferred signal %d for LWP %ld.",
2073 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2074
2075 if (debug_threads)
2076 {
013e3554 2077 for (const auto &sig : lwp->pending_signals_to_report)
c058728c 2078 threads_debug_printf (" Still queued %d", sig.signal);
fa593d66 2079
c058728c 2080 threads_debug_printf (" (no more queued signals)");
fa593d66
PA
2081 }
2082
2083 return 1;
2084 }
2085
2086 return 0;
2087}
2088
ac1bbaca
TBA
2089bool
2090linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
582511be 2091{
24583e45
TBA
2092 scoped_restore_current_thread restore_thread;
2093 switch_to_thread (get_lwp_thread (child));
d50171e4 2094
ac1bbaca
TBA
2095 if (low_stopped_by_watchpoint ())
2096 {
2097 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2098 child->stopped_data_address = low_stopped_data_address ();
2099 }
582511be 2100
ac1bbaca
TBA
2101 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2102}
d50171e4 2103
ac1bbaca
TBA
2104bool
2105linux_process_target::low_stopped_by_watchpoint ()
2106{
2107 return false;
2108}
d50171e4 2109
ac1bbaca
TBA
2110CORE_ADDR
2111linux_process_target::low_stopped_data_address ()
2112{
2113 return 0;
c4d9ceb6
YQ
2114}
2115
de0d863e
DB
2116/* Return the ptrace options that we want to try to enable. */
2117
2118static int
2119linux_low_ptrace_options (int attached)
2120{
c12a5089 2121 client_state &cs = get_client_state ();
de0d863e
DB
2122 int options = 0;
2123
2124 if (!attached)
2125 options |= PTRACE_O_EXITKILL;
2126
c12a5089 2127 if (cs.report_fork_events)
de0d863e
DB
2128 options |= PTRACE_O_TRACEFORK;
2129
c12a5089 2130 if (cs.report_vfork_events)
c269dbdb
DB
2131 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2132
c12a5089 2133 if (cs.report_exec_events)
94585166
DB
2134 options |= PTRACE_O_TRACEEXEC;
2135
82075af2
JS
2136 options |= PTRACE_O_TRACESYSGOOD;
2137
de0d863e
DB
2138 return options;
2139}
2140
1a48f002 2141void
d16f3f6c 2142linux_process_target::filter_event (int lwpid, int wstat)
fa96cb38 2143{
c12a5089 2144 client_state &cs = get_client_state ();
fa96cb38
PA
2145 struct lwp_info *child;
2146 struct thread_info *thread;
582511be 2147 int have_stop_pc = 0;
fa96cb38 2148
f2907e49 2149 child = find_lwp_pid (ptid_t (lwpid));
fa96cb38 2150
5406bc3f
PA
2151 /* Check for events reported by anything not in our LWP list. */
2152 if (child == nullptr)
94585166 2153 {
5406bc3f
PA
2154 if (WIFSTOPPED (wstat))
2155 {
2156 if (WSTOPSIG (wstat) == SIGTRAP
2157 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2158 {
2159 /* A non-leader thread exec'ed after we've seen the
2160 leader zombie, and removed it from our lists (in
2161 check_zombie_leaders). The non-leader thread changes
2162 its tid to the tgid. */
2163 threads_debug_printf
2164 ("Re-adding thread group leader LWP %d after exec.",
2165 lwpid);
94585166 2166
5406bc3f
PA
2167 child = add_lwp (ptid_t (lwpid, lwpid));
2168 child->stopped = 1;
2169 switch_to_thread (child->thread);
2170 }
2171 else
2172 {
2173 /* A process we are controlling has forked and the new
2174 child's stop was reported to us by the kernel. Save
2175 its PID and go back to waiting for the fork event to
2176 be reported - the stopped process might be returned
2177 from waitpid before or after the fork event is. */
2178 threads_debug_printf
2179 ("Saving LWP %d status %s in stopped_pids list",
2180 lwpid, status_to_str (wstat).c_str ());
2181 add_to_pid_list (&stopped_pids, lwpid, wstat);
2182 }
2183 }
2184 else
2185 {
2186 /* Don't report an event for the exit of an LWP not in our
2187 list, i.e. not part of any inferior we're debugging.
2188 This can happen if we detach from a program we originally
2189 forked and then it exits. */
2190 }
94585166 2191
5406bc3f
PA
2192 if (child == nullptr)
2193 return;
fa96cb38 2194 }
fa96cb38
PA
2195
2196 thread = get_lwp_thread (child);
2197
2198 child->stopped = 1;
2199
2200 child->last_status = wstat;
2201
582511be
PA
2202 /* Check if the thread has exited. */
2203 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2204 {
c058728c 2205 threads_debug_printf ("%d exited", lwpid);
f50bf8e5
YQ
2206
2207 if (finish_step_over (child))
2208 {
2209 /* Unsuspend all other LWPs, and set them back running again. */
2210 unsuspend_all_lwps (child);
2211 }
2212
65706a29
PA
2213 /* If there is at least one more LWP, then the exit signal was
2214 not the end of the debugged application and should be
2215 ignored, unless GDB wants to hear about thread exits. */
c12a5089 2216 if (cs.report_thread_events
65706a29 2217 || last_thread_of_process_p (pid_of (thread)))
582511be 2218 {
65706a29
PA
2219 /* Since events are serialized to GDB core, and we can't
2220 report this one right now. Leave the status pending for
2221 the next time we're able to report it. */
2222 mark_lwp_dead (child, wstat);
1a48f002 2223 return;
582511be
PA
2224 }
2225 else
2226 {
65706a29 2227 delete_lwp (child);
1a48f002 2228 return;
582511be
PA
2229 }
2230 }
2231
2232 gdb_assert (WIFSTOPPED (wstat));
2233
fa96cb38
PA
2234 if (WIFSTOPPED (wstat))
2235 {
2236 struct process_info *proc;
2237
c06cbd92 2238 /* Architecture-specific setup after inferior is running. */
fa96cb38 2239 proc = find_process_pid (pid_of (thread));
c06cbd92 2240 if (proc->tdesc == NULL)
fa96cb38 2241 {
c06cbd92
YQ
2242 if (proc->attached)
2243 {
c06cbd92
YQ
2244 /* This needs to happen after we have attached to the
2245 inferior and it is stopped for the first time, but
2246 before we access any inferior registers. */
797bcff5 2247 arch_setup_thread (thread);
c06cbd92
YQ
2248 }
2249 else
2250 {
2251 /* The process is started, but GDBserver will do
2252 architecture-specific setup after the program stops at
2253 the first instruction. */
2254 child->status_pending_p = 1;
2255 child->status_pending = wstat;
1a48f002 2256 return;
c06cbd92 2257 }
fa96cb38
PA
2258 }
2259 }
2260
fa96cb38
PA
2261 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2262 {
beed38b8 2263 struct process_info *proc = find_process_pid (pid_of (thread));
de0d863e 2264 int options = linux_low_ptrace_options (proc->attached);
beed38b8 2265
de0d863e 2266 linux_enable_event_reporting (lwpid, options);
fa96cb38
PA
2267 child->must_set_ptrace_flags = 0;
2268 }
2269
82075af2
JS
2270 /* Always update syscall_state, even if it will be filtered later. */
2271 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2272 {
2273 child->syscall_state
2274 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2275 ? TARGET_WAITKIND_SYSCALL_RETURN
2276 : TARGET_WAITKIND_SYSCALL_ENTRY);
2277 }
2278 else
2279 {
2280 /* Almost all other ptrace-stops are known to be outside of system
2281 calls, with further exceptions in handle_extended_wait. */
2282 child->syscall_state = TARGET_WAITKIND_IGNORE;
2283 }
2284
e7ad2f14
PA
2285 /* Be careful to not overwrite stop_pc until save_stop_reason is
2286 called. */
fa96cb38 2287 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2288 && linux_is_extended_waitstatus (wstat))
fa96cb38 2289 {
582511be 2290 child->stop_pc = get_pc (child);
94585166 2291 if (handle_extended_wait (&child, wstat))
de0d863e
DB
2292 {
2293 /* The event has been handled, so just return without
2294 reporting it. */
1a48f002 2295 return;
de0d863e 2296 }
fa96cb38
PA
2297 }
2298
80aea927 2299 if (linux_wstatus_maybe_breakpoint (wstat))
582511be 2300 {
e7ad2f14 2301 if (save_stop_reason (child))
582511be
PA
2302 have_stop_pc = 1;
2303 }
2304
2305 if (!have_stop_pc)
2306 child->stop_pc = get_pc (child);
2307
fa96cb38
PA
2308 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2309 && child->stop_expected)
2310 {
c058728c
SM
2311 threads_debug_printf ("Expected stop.");
2312
fa96cb38
PA
2313 child->stop_expected = 0;
2314
2315 if (thread->last_resume_kind == resume_stop)
2316 {
2317 /* We want to report the stop to the core. Treat the
2318 SIGSTOP as a normal event. */
c058728c
SM
2319 threads_debug_printf ("resume_stop SIGSTOP caught for %s.",
2320 target_pid_to_str (ptid_of (thread)).c_str ());
fa96cb38
PA
2321 }
2322 else if (stopping_threads != NOT_STOPPING_THREADS)
2323 {
2324 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2325 pending. */
c058728c
SM
2326 threads_debug_printf ("SIGSTOP caught for %s while stopping threads.",
2327 target_pid_to_str (ptid_of (thread)).c_str ());
1a48f002 2328 return;
fa96cb38
PA
2329 }
2330 else
2331 {
2bf6fb9d 2332 /* This is a delayed SIGSTOP. Filter out the event. */
c058728c 2333 threads_debug_printf ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2bf6fb9d 2334 child->stepping ? "step" : "continue",
61d7f128 2335 target_pid_to_str (ptid_of (thread)).c_str ());
2bf6fb9d 2336
df95181f 2337 resume_one_lwp (child, child->stepping, 0, NULL);
1a48f002 2338 return;
fa96cb38
PA
2339 }
2340 }
2341
582511be
PA
2342 child->status_pending_p = 1;
2343 child->status_pending = wstat;
1a48f002 2344 return;
fa96cb38
PA
2345}
2346
b31cdfa6
TBA
2347bool
2348linux_process_target::maybe_hw_step (thread_info *thread)
f79b145d 2349{
b31cdfa6
TBA
2350 if (supports_hardware_single_step ())
2351 return true;
f79b145d
YQ
2352 else
2353 {
3b9a79ef 2354 /* GDBserver must insert single-step breakpoint for software
f79b145d 2355 single step. */
3b9a79ef 2356 gdb_assert (has_single_step_breakpoints (thread));
b31cdfa6 2357 return false;
f79b145d
YQ
2358 }
2359}
2360
df95181f
TBA
2361void
2362linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
20ba1ce6 2363{
20ba1ce6
PA
2364 struct lwp_info *lp = get_thread_lwp (thread);
2365
2366 if (lp->stopped
863d01bd 2367 && !lp->suspended
20ba1ce6 2368 && !lp->status_pending_p
183be222 2369 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
20ba1ce6 2370 {
8901d193
YQ
2371 int step = 0;
2372
2373 if (thread->last_resume_kind == resume_step)
2374 step = maybe_hw_step (thread);
20ba1ce6 2375
c058728c
SM
2376 threads_debug_printf ("resuming stopped-resumed LWP %s at %s: step=%d",
2377 target_pid_to_str (ptid_of (thread)).c_str (),
2378 paddress (lp->stop_pc), step);
20ba1ce6 2379
df95181f 2380 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
20ba1ce6
PA
2381 }
2382}
2383
d16f3f6c
TBA
2384int
2385linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2386 ptid_t filter_ptid,
2387 int *wstatp, int options)
0d62e5e8 2388{
d86d4aaf 2389 struct thread_info *event_thread;
d50171e4 2390 struct lwp_info *event_child, *requested_child;
fa96cb38 2391 sigset_t block_mask, prev_mask;
d50171e4 2392
fa96cb38 2393 retry:
d86d4aaf
DE
2394 /* N.B. event_thread points to the thread_info struct that contains
2395 event_child. Keep them in sync. */
2396 event_thread = NULL;
d50171e4
PA
2397 event_child = NULL;
2398 requested_child = NULL;
0d62e5e8 2399
95954743 2400 /* Check for a lwp with a pending status. */
bd99dc85 2401
d7e15655 2402 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
0d62e5e8 2403 {
83e1b6c1
SM
2404 event_thread = find_thread_in_random ([&] (thread_info *thread)
2405 {
2406 return status_pending_p_callback (thread, filter_ptid);
2407 });
2408
d86d4aaf 2409 if (event_thread != NULL)
c058728c
SM
2410 {
2411 event_child = get_thread_lwp (event_thread);
2412 threads_debug_printf ("Got a pending child %ld", lwpid_of (event_thread));
2413 }
0d62e5e8 2414 }
d7e15655 2415 else if (filter_ptid != null_ptid)
0d62e5e8 2416 {
fa96cb38 2417 requested_child = find_lwp_pid (filter_ptid);
d50171e4 2418
bde24c0a 2419 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66 2420 && requested_child->status_pending_p
229d26fc
SM
2421 && (requested_child->collecting_fast_tracepoint
2422 != fast_tpoint_collect_result::not_collecting))
fa593d66
PA
2423 {
2424 enqueue_one_deferred_signal (requested_child,
2425 &requested_child->status_pending);
2426 requested_child->status_pending_p = 0;
2427 requested_child->status_pending = 0;
df95181f 2428 resume_one_lwp (requested_child, 0, 0, NULL);
fa593d66
PA
2429 }
2430
2431 if (requested_child->suspended
2432 && requested_child->status_pending_p)
38e08fca
GB
2433 {
2434 internal_error (__FILE__, __LINE__,
2435 "requesting an event out of a"
2436 " suspended child?");
2437 }
fa593d66 2438
d50171e4 2439 if (requested_child->status_pending_p)
d86d4aaf
DE
2440 {
2441 event_child = requested_child;
2442 event_thread = get_lwp_thread (event_child);
2443 }
0d62e5e8 2444 }
611cb4a5 2445
0d62e5e8
DJ
2446 if (event_child != NULL)
2447 {
c058728c
SM
2448 threads_debug_printf ("Got an event from pending child %ld (%04x)",
2449 lwpid_of (event_thread),
2450 event_child->status_pending);
2451
fa96cb38 2452 *wstatp = event_child->status_pending;
bd99dc85
PA
2453 event_child->status_pending_p = 0;
2454 event_child->status_pending = 0;
24583e45 2455 switch_to_thread (event_thread);
d86d4aaf 2456 return lwpid_of (event_thread);
0d62e5e8
DJ
2457 }
2458
fa96cb38
PA
2459 /* But if we don't find a pending event, we'll have to wait.
2460
2461 We only enter this loop if no process has a pending wait status.
2462 Thus any action taken in response to a wait status inside this
2463 loop is responding as soon as we detect the status, not after any
2464 pending events. */
d8301ad1 2465
fa96cb38
PA
2466 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2467 all signals while here. */
2468 sigfillset (&block_mask);
21987b9c 2469 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
fa96cb38 2470
582511be
PA
2471 /* Always pull all events out of the kernel. We'll randomly select
2472 an event LWP out of all that have events, to prevent
2473 starvation. */
fa96cb38 2474 while (event_child == NULL)
0d62e5e8 2475 {
fa96cb38 2476 pid_t ret = 0;
0d62e5e8 2477
fa96cb38
PA
2478 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2479 quirks:
0d62e5e8 2480
fa96cb38
PA
2481 - If the thread group leader exits while other threads in the
2482 thread group still exist, waitpid(TGID, ...) hangs. That
2483 waitpid won't return an exit status until the other threads
2484 in the group are reaped.
611cb4a5 2485
fa96cb38
PA
2486 - When a non-leader thread execs, that thread just vanishes
2487 without reporting an exit (so we'd hang if we waited for it
2488 explicitly in that case). The exec event is reported to
94585166 2489 the TGID pid. */
fa96cb38
PA
2490 errno = 0;
2491 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2492
c058728c
SM
2493 threads_debug_printf ("waitpid(-1, ...) returned %d, %s",
2494 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
0d62e5e8 2495
fa96cb38 2496 if (ret > 0)
0d62e5e8 2497 {
c058728c
SM
2498 threads_debug_printf ("waitpid %ld received %s",
2499 (long) ret, status_to_str (*wstatp).c_str ());
89be2091 2500
582511be
PA
2501 /* Filter all events. IOW, leave all events pending. We'll
2502 randomly select an event LWP out of all that have events
2503 below. */
d16f3f6c 2504 filter_event (ret, *wstatp);
fa96cb38
PA
2505 /* Retry until nothing comes out of waitpid. A single
2506 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2507 continue;
2508 }
2509
20ba1ce6
PA
2510 /* Now that we've pulled all events out of the kernel, resume
2511 LWPs that don't have an interesting event to report. */
2512 if (stopping_threads == NOT_STOPPING_THREADS)
df95181f
TBA
2513 for_each_thread ([this] (thread_info *thread)
2514 {
2515 resume_stopped_resumed_lwps (thread);
2516 });
20ba1ce6
PA
2517
2518 /* ... and find an LWP with a status to report to the core, if
2519 any. */
83e1b6c1
SM
2520 event_thread = find_thread_in_random ([&] (thread_info *thread)
2521 {
2522 return status_pending_p_callback (thread, filter_ptid);
2523 });
2524
582511be
PA
2525 if (event_thread != NULL)
2526 {
2527 event_child = get_thread_lwp (event_thread);
2528 *wstatp = event_child->status_pending;
2529 event_child->status_pending_p = 0;
2530 event_child->status_pending = 0;
2531 break;
2532 }
2533
fa96cb38
PA
2534 /* Check for zombie thread group leaders. Those can't be reaped
2535 until all other threads in the thread group are. */
2536 check_zombie_leaders ();
2537
a1385b7b
SM
2538 auto not_stopped = [&] (thread_info *thread)
2539 {
2540 return not_stopped_callback (thread, wait_ptid);
2541 };
2542
fa96cb38
PA
2543 /* If there are no resumed children left in the set of LWPs we
2544 want to wait for, bail. We can't just block in
2545 waitpid/sigsuspend, because lwps might have been left stopped
2546 in trace-stop state, and we'd be stuck forever waiting for
2547 their status to change (which would only happen if we resumed
2548 them). Even if WNOHANG is set, this return code is preferred
2549 over 0 (below), as it is more detailed. */
a1385b7b 2550 if (find_thread (not_stopped) == NULL)
a6dbe5df 2551 {
c058728c
SM
2552 threads_debug_printf ("exit (no unwaited-for LWP)");
2553
21987b9c 2554 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2555 return -1;
a6dbe5df
PA
2556 }
2557
fa96cb38
PA
2558 /* No interesting event to report to the caller. */
2559 if ((options & WNOHANG))
24a09b5f 2560 {
c058728c 2561 threads_debug_printf ("WNOHANG set, no event found");
fa96cb38 2562
21987b9c 2563 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2564 return 0;
24a09b5f
DJ
2565 }
2566
fa96cb38 2567 /* Block until we get an event reported with SIGCHLD. */
c058728c 2568 threads_debug_printf ("sigsuspend'ing");
d50171e4 2569
fa96cb38 2570 sigsuspend (&prev_mask);
21987b9c 2571 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38
PA
2572 goto retry;
2573 }
d50171e4 2574
21987b9c 2575 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2576
24583e45 2577 switch_to_thread (event_thread);
d50171e4 2578
fa96cb38
PA
2579 return lwpid_of (event_thread);
2580}
2581
d16f3f6c
TBA
2582int
2583linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
fa96cb38 2584{
d16f3f6c 2585 return wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2586}
2587
6bf5e0ba
PA
2588/* Select one LWP out of those that have events pending. */
2589
2590static void
2591select_event_lwp (struct lwp_info **orig_lp)
2592{
582511be
PA
2593 struct thread_info *event_thread = NULL;
2594
2595 /* In all-stop, give preference to the LWP that is being
2596 single-stepped. There will be at most one, and it's the LWP that
2597 the core is most interested in. If we didn't do this, then we'd
2598 have to handle pending step SIGTRAPs somehow in case the core
2599 later continues the previously-stepped thread, otherwise we'd
2600 report the pending SIGTRAP, and the core, not having stepped the
2601 thread, wouldn't understand what the trap was for, and therefore
2602 would report it to the user as a random signal. */
2603 if (!non_stop)
6bf5e0ba 2604 {
39a64da5
SM
2605 event_thread = find_thread ([] (thread_info *thread)
2606 {
2607 lwp_info *lp = get_thread_lwp (thread);
2608
183be222 2609 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
39a64da5
SM
2610 && thread->last_resume_kind == resume_step
2611 && lp->status_pending_p);
2612 });
2613
582511be 2614 if (event_thread != NULL)
c058728c
SM
2615 threads_debug_printf
2616 ("Select single-step %s",
2617 target_pid_to_str (ptid_of (event_thread)).c_str ());
6bf5e0ba 2618 }
582511be 2619 if (event_thread == NULL)
6bf5e0ba
PA
2620 {
2621 /* No single-stepping LWP. Select one at random, out of those
dda83cd7 2622 which have had events. */
6bf5e0ba 2623
b0319eaa 2624 event_thread = find_thread_in_random ([&] (thread_info *thread)
39a64da5
SM
2625 {
2626 lwp_info *lp = get_thread_lwp (thread);
2627
b0319eaa 2628 /* Only resumed LWPs that have an event pending. */
183be222 2629 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
b0319eaa 2630 && lp->status_pending_p);
39a64da5 2631 });
6bf5e0ba
PA
2632 }
2633
d86d4aaf 2634 if (event_thread != NULL)
6bf5e0ba 2635 {
d86d4aaf
DE
2636 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2637
6bf5e0ba
PA
2638 /* Switch the event LWP. */
2639 *orig_lp = event_lp;
2640 }
2641}
2642
7984d532
PA
2643/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2644 NULL. */
2645
2646static void
2647unsuspend_all_lwps (struct lwp_info *except)
2648{
139720c5
SM
2649 for_each_thread ([&] (thread_info *thread)
2650 {
2651 lwp_info *lwp = get_thread_lwp (thread);
2652
2653 if (lwp != except)
2654 lwp_suspended_decr (lwp);
2655 });
7984d532
PA
2656}
2657
5a6b0a41 2658static bool lwp_running (thread_info *thread);
fa593d66
PA
2659
2660/* Stabilize threads (move out of jump pads).
2661
2662 If a thread is midway collecting a fast tracepoint, we need to
2663 finish the collection and move it out of the jump pad before
2664 reporting the signal.
2665
2666 This avoids recursion while collecting (when a signal arrives
2667 midway, and the signal handler itself collects), which would trash
2668 the trace buffer. In case the user set a breakpoint in a signal
2669 handler, this avoids the backtrace showing the jump pad, etc..
2670 Most importantly, there are certain things we can't do safely if
2671 threads are stopped in a jump pad (or in its callee's). For
2672 example:
2673
2674 - starting a new trace run. A thread still collecting the
2675 previous run, could trash the trace buffer when resumed. The trace
2676 buffer control structures would have been reset but the thread had
2677 no way to tell. The thread could even midway memcpy'ing to the
2678 buffer, which would mean that when resumed, it would clobber the
2679 trace buffer that had been set for a new run.
2680
2681 - we can't rewrite/reuse the jump pads for new tracepoints
2682 safely. Say you do tstart while a thread is stopped midway while
2683 collecting. When the thread is later resumed, it finishes the
2684 collection, and returns to the jump pad, to execute the original
2685 instruction that was under the tracepoint jump at the time the
2686 older run had been started. If the jump pad had been rewritten
2687 since for something else in the new run, the thread would now
2688 execute the wrong / random instructions. */
2689
5c9eb2f2
TBA
2690void
2691linux_process_target::stabilize_threads ()
fa593d66 2692{
13e567af
TBA
2693 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2694 {
2695 return stuck_in_jump_pad (thread);
2696 });
fa593d66 2697
d86d4aaf 2698 if (thread_stuck != NULL)
fa593d66 2699 {
c058728c
SM
2700 threads_debug_printf ("can't stabilize, LWP %ld is stuck in jump pad",
2701 lwpid_of (thread_stuck));
fa593d66
PA
2702 return;
2703 }
2704
24583e45 2705 scoped_restore_current_thread restore_thread;
fa593d66
PA
2706
2707 stabilizing_threads = 1;
2708
2709 /* Kick 'em all. */
d16f3f6c
TBA
2710 for_each_thread ([this] (thread_info *thread)
2711 {
2712 move_out_of_jump_pad (thread);
2713 });
fa593d66
PA
2714
2715 /* Loop until all are stopped out of the jump pads. */
5a6b0a41 2716 while (find_thread (lwp_running) != NULL)
fa593d66
PA
2717 {
2718 struct target_waitstatus ourstatus;
2719 struct lwp_info *lwp;
fa593d66
PA
2720 int wstat;
2721
2722 /* Note that we go through the full wait even loop. While
2723 moving threads out of jump pad, we need to be able to step
2724 over internal breakpoints and such. */
d16f3f6c 2725 wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66 2726
183be222 2727 if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
fa593d66 2728 {
0bfdf32f 2729 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2730
2731 /* Lock it. */
863d01bd 2732 lwp_suspended_inc (lwp);
fa593d66 2733
183be222 2734 if (ourstatus.sig () != GDB_SIGNAL_0
0bfdf32f 2735 || current_thread->last_resume_kind == resume_stop)
fa593d66 2736 {
183be222 2737 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
fa593d66
PA
2738 enqueue_one_deferred_signal (lwp, &wstat);
2739 }
2740 }
2741 }
2742
fcdad592 2743 unsuspend_all_lwps (NULL);
fa593d66
PA
2744
2745 stabilizing_threads = 0;
2746
b4d51a55 2747 if (debug_threads)
fa593d66 2748 {
13e567af
TBA
2749 thread_stuck = find_thread ([this] (thread_info *thread)
2750 {
2751 return stuck_in_jump_pad (thread);
2752 });
fcb056a5 2753
d86d4aaf 2754 if (thread_stuck != NULL)
c058728c
SM
2755 threads_debug_printf
2756 ("couldn't stabilize, LWP %ld got stuck in jump pad",
2757 lwpid_of (thread_stuck));
fa593d66
PA
2758 }
2759}
2760
582511be
PA
2761/* Convenience function that is called when the kernel reports an
2762 event that is not passed out to GDB. */
2763
2764static ptid_t
2765ignore_event (struct target_waitstatus *ourstatus)
2766{
2767 /* If we got an event, there may still be others, as a single
2768 SIGCHLD can indicate more than one child stopped. This forces
2769 another target_wait call. */
2770 async_file_mark ();
2771
183be222 2772 ourstatus->set_ignore ();
582511be
PA
2773 return null_ptid;
2774}
2775
fd000fb3
TBA
2776ptid_t
2777linux_process_target::filter_exit_event (lwp_info *event_child,
2778 target_waitstatus *ourstatus)
65706a29 2779{
c12a5089 2780 client_state &cs = get_client_state ();
65706a29
PA
2781 struct thread_info *thread = get_lwp_thread (event_child);
2782 ptid_t ptid = ptid_of (thread);
2783
2784 if (!last_thread_of_process_p (pid_of (thread)))
2785 {
c12a5089 2786 if (cs.report_thread_events)
183be222 2787 ourstatus->set_thread_exited (0);
65706a29 2788 else
183be222 2789 ourstatus->set_ignore ();
65706a29
PA
2790
2791 delete_lwp (event_child);
2792 }
2793 return ptid;
2794}
2795
82075af2
JS
2796/* Returns 1 if GDB is interested in any event_child syscalls. */
2797
2798static int
2799gdb_catching_syscalls_p (struct lwp_info *event_child)
2800{
2801 struct thread_info *thread = get_lwp_thread (event_child);
2802 struct process_info *proc = get_thread_process (thread);
2803
f27866ba 2804 return !proc->syscalls_to_catch.empty ();
82075af2
JS
2805}
2806
9eedd27d
TBA
2807bool
2808linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
82075af2 2809{
4cc32bec 2810 int sysno;
82075af2
JS
2811 struct thread_info *thread = get_lwp_thread (event_child);
2812 struct process_info *proc = get_thread_process (thread);
2813
f27866ba 2814 if (proc->syscalls_to_catch.empty ())
9eedd27d 2815 return false;
82075af2 2816
f27866ba 2817 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
9eedd27d 2818 return true;
82075af2 2819
4cc32bec 2820 get_syscall_trapinfo (event_child, &sysno);
f27866ba
SM
2821
2822 for (int iter : proc->syscalls_to_catch)
82075af2 2823 if (iter == sysno)
9eedd27d 2824 return true;
82075af2 2825
9eedd27d 2826 return false;
82075af2
JS
2827}
2828
d16f3f6c
TBA
2829ptid_t
2830linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
b60cea74 2831 target_wait_flags target_options)
da6d8c04 2832{
c058728c
SM
2833 THREADS_SCOPED_DEBUG_ENTER_EXIT;
2834
c12a5089 2835 client_state &cs = get_client_state ();
e5f1222d 2836 int w;
fc7238bb 2837 struct lwp_info *event_child;
bd99dc85 2838 int options;
bd99dc85 2839 int pid;
6bf5e0ba
PA
2840 int step_over_finished;
2841 int bp_explains_trap;
2842 int maybe_internal_trap;
2843 int report_to_gdb;
219f2f23 2844 int trace_event;
c2d6af84 2845 int in_step_range;
f2faf941 2846 int any_resumed;
bd99dc85 2847
c058728c 2848 threads_debug_printf ("[%s]", target_pid_to_str (ptid).c_str ());
87ce2a04 2849
bd99dc85
PA
2850 /* Translate generic target options into linux options. */
2851 options = __WALL;
2852 if (target_options & TARGET_WNOHANG)
2853 options |= WNOHANG;
0d62e5e8 2854
fa593d66
PA
2855 bp_explains_trap = 0;
2856 trace_event = 0;
c2d6af84 2857 in_step_range = 0;
183be222 2858 ourstatus->set_ignore ();
bd99dc85 2859
83e1b6c1
SM
2860 auto status_pending_p_any = [&] (thread_info *thread)
2861 {
2862 return status_pending_p_callback (thread, minus_one_ptid);
2863 };
2864
a1385b7b
SM
2865 auto not_stopped = [&] (thread_info *thread)
2866 {
2867 return not_stopped_callback (thread, minus_one_ptid);
2868 };
2869
f2faf941 2870 /* Find a resumed LWP, if any. */
83e1b6c1 2871 if (find_thread (status_pending_p_any) != NULL)
f2faf941 2872 any_resumed = 1;
a1385b7b 2873 else if (find_thread (not_stopped) != NULL)
f2faf941
PA
2874 any_resumed = 1;
2875 else
2876 any_resumed = 0;
2877
d7e15655 2878 if (step_over_bkpt == null_ptid)
d16f3f6c 2879 pid = wait_for_event (ptid, &w, options);
6bf5e0ba
PA
2880 else
2881 {
c058728c
SM
2882 threads_debug_printf ("step_over_bkpt set [%s], doing a blocking wait",
2883 target_pid_to_str (step_over_bkpt).c_str ());
d16f3f6c 2884 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
6bf5e0ba
PA
2885 }
2886
f2faf941 2887 if (pid == 0 || (pid == -1 && !any_resumed))
87ce2a04 2888 {
fa96cb38
PA
2889 gdb_assert (target_options & TARGET_WNOHANG);
2890
c058728c 2891 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_IGNORE");
fa96cb38 2892
183be222 2893 ourstatus->set_ignore ();
87ce2a04
DE
2894 return null_ptid;
2895 }
fa96cb38
PA
2896 else if (pid == -1)
2897 {
c058728c 2898 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_NO_RESUMED");
bd99dc85 2899
183be222 2900 ourstatus->set_no_resumed ();
fa96cb38
PA
2901 return null_ptid;
2902 }
0d62e5e8 2903
0bfdf32f 2904 event_child = get_thread_lwp (current_thread);
0d62e5e8 2905
d16f3f6c 2906 /* wait_for_event only returns an exit status for the last
fa96cb38
PA
2907 child of a process. Report it. */
2908 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 2909 {
fa96cb38 2910 if (WIFEXITED (w))
0d62e5e8 2911 {
183be222 2912 ourstatus->set_exited (WEXITSTATUS (w));
bd99dc85 2913
c058728c
SM
2914 threads_debug_printf
2915 ("ret = %s, exited with retcode %d",
2916 target_pid_to_str (ptid_of (current_thread)).c_str (),
2917 WEXITSTATUS (w));
fa96cb38
PA
2918 }
2919 else
2920 {
183be222 2921 ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
5b1c542e 2922
c058728c
SM
2923 threads_debug_printf
2924 ("ret = %s, terminated with signal %d",
2925 target_pid_to_str (ptid_of (current_thread)).c_str (),
2926 WTERMSIG (w));
0d62e5e8 2927 }
fa96cb38 2928
183be222 2929 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
65706a29
PA
2930 return filter_exit_event (event_child, ourstatus);
2931
0bfdf32f 2932 return ptid_of (current_thread);
da6d8c04
DJ
2933 }
2934
2d97cd35
AT
2935 /* If step-over executes a breakpoint instruction, in the case of a
2936 hardware single step it means a gdb/gdbserver breakpoint had been
2937 planted on top of a permanent breakpoint, in the case of a software
2938 single step it may just mean that gdbserver hit the reinsert breakpoint.
e7ad2f14 2939 The PC has been adjusted by save_stop_reason to point at
2d97cd35
AT
2940 the breakpoint address.
2941 So in the case of the hardware single step advance the PC manually
2942 past the breakpoint and in the case of software single step advance only
3b9a79ef 2943 if it's not the single_step_breakpoint we are hitting.
2d97cd35
AT
2944 This avoids that a program would keep trapping a permanent breakpoint
2945 forever. */
d7e15655 2946 if (step_over_bkpt != null_ptid
2d97cd35
AT
2947 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
2948 && (event_child->stepping
3b9a79ef 2949 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
8090aef2 2950 {
dd373349
AT
2951 int increment_pc = 0;
2952 int breakpoint_kind = 0;
2953 CORE_ADDR stop_pc = event_child->stop_pc;
2954
d16f3f6c
TBA
2955 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
2956 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
8090aef2 2957
c058728c
SM
2958 threads_debug_printf
2959 ("step-over for %s executed software breakpoint",
2960 target_pid_to_str (ptid_of (current_thread)).c_str ());
8090aef2
PA
2961
2962 if (increment_pc != 0)
2963 {
2964 struct regcache *regcache
2965 = get_thread_regcache (current_thread, 1);
2966
2967 event_child->stop_pc += increment_pc;
bf9ae9d8 2968 low_set_pc (regcache, event_child->stop_pc);
8090aef2 2969
d7146cda 2970 if (!low_breakpoint_at (event_child->stop_pc))
15c66dd6 2971 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
2972 }
2973 }
2974
6bf5e0ba
PA
2975 /* If this event was not handled before, and is not a SIGTRAP, we
2976 report it. SIGILL and SIGSEGV are also treated as traps in case
2977 a breakpoint is inserted at the current PC. If this target does
2978 not support internal breakpoints at all, we also report the
2979 SIGTRAP without further processing; it's of no concern to us. */
2980 maybe_internal_trap
bf9ae9d8 2981 = (low_supports_breakpoints ()
6bf5e0ba
PA
2982 && (WSTOPSIG (w) == SIGTRAP
2983 || ((WSTOPSIG (w) == SIGILL
2984 || WSTOPSIG (w) == SIGSEGV)
d7146cda 2985 && low_breakpoint_at (event_child->stop_pc))));
6bf5e0ba
PA
2986
2987 if (maybe_internal_trap)
2988 {
2989 /* Handle anything that requires bookkeeping before deciding to
2990 report the event or continue waiting. */
2991
2992 /* First check if we can explain the SIGTRAP with an internal
2993 breakpoint, or if we should possibly report the event to GDB.
2994 Do this before anything that may remove or insert a
2995 breakpoint. */
2996 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2997
2998 /* We have a SIGTRAP, possibly a step-over dance has just
2999 finished. If so, tweak the state machine accordingly,
3b9a79ef
YQ
3000 reinsert breakpoints and delete any single-step
3001 breakpoints. */
6bf5e0ba
PA
3002 step_over_finished = finish_step_over (event_child);
3003
3004 /* Now invoke the callbacks of any internal breakpoints there. */
3005 check_breakpoints (event_child->stop_pc);
3006
219f2f23
PA
3007 /* Handle tracepoint data collecting. This may overflow the
3008 trace buffer, and cause a tracing stop, removing
3009 breakpoints. */
3010 trace_event = handle_tracepoints (event_child);
3011
6bf5e0ba 3012 if (bp_explains_trap)
c058728c 3013 threads_debug_printf ("Hit a gdbserver breakpoint.");
6bf5e0ba
PA
3014 }
3015 else
3016 {
3017 /* We have some other signal, possibly a step-over dance was in
3018 progress, and it should be cancelled too. */
3019 step_over_finished = finish_step_over (event_child);
fa593d66
PA
3020 }
3021
3022 /* We have all the data we need. Either report the event to GDB, or
3023 resume threads and keep waiting for more. */
3024
3025 /* If we're collecting a fast tracepoint, finish the collection and
3026 move out of the jump pad before delivering a signal. See
3027 linux_stabilize_threads. */
3028
3029 if (WIFSTOPPED (w)
3030 && WSTOPSIG (w) != SIGTRAP
3031 && supports_fast_tracepoints ()
58b4daa5 3032 && agent_loaded_p ())
fa593d66 3033 {
c058728c
SM
3034 threads_debug_printf ("Got signal %d for LWP %ld. Check if we need "
3035 "to defer or adjust it.",
3036 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3037
3038 /* Allow debugging the jump pad itself. */
0bfdf32f 3039 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
3040 && maybe_move_out_of_jump_pad (event_child, &w))
3041 {
3042 enqueue_one_deferred_signal (event_child, &w);
3043
c058728c
SM
3044 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad)",
3045 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66 3046
df95181f 3047 resume_one_lwp (event_child, 0, 0, NULL);
582511be
PA
3048
3049 return ignore_event (ourstatus);
fa593d66
PA
3050 }
3051 }
219f2f23 3052
229d26fc
SM
3053 if (event_child->collecting_fast_tracepoint
3054 != fast_tpoint_collect_result::not_collecting)
fa593d66 3055 {
c058728c
SM
3056 threads_debug_printf
3057 ("LWP %ld was trying to move out of the jump pad (%d). "
3058 "Check if we're already there.",
3059 lwpid_of (current_thread),
3060 (int) event_child->collecting_fast_tracepoint);
fa593d66
PA
3061
3062 trace_event = 1;
3063
3064 event_child->collecting_fast_tracepoint
3065 = linux_fast_tracepoint_collecting (event_child, NULL);
3066
229d26fc
SM
3067 if (event_child->collecting_fast_tracepoint
3068 != fast_tpoint_collect_result::before_insn)
fa593d66
PA
3069 {
3070 /* No longer need this breakpoint. */
3071 if (event_child->exit_jump_pad_bkpt != NULL)
3072 {
c058728c
SM
3073 threads_debug_printf
3074 ("No longer need exit-jump-pad bkpt; removing it."
3075 "stopping all threads momentarily.");
fa593d66
PA
3076
3077 /* Other running threads could hit this breakpoint.
3078 We don't handle moribund locations like GDB does,
3079 instead we always pause all threads when removing
3080 breakpoints, so that any step-over or
3081 decr_pc_after_break adjustment is always taken
3082 care of while the breakpoint is still
3083 inserted. */
3084 stop_all_lwps (1, event_child);
fa593d66
PA
3085
3086 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3087 event_child->exit_jump_pad_bkpt = NULL;
3088
3089 unstop_all_lwps (1, event_child);
3090
3091 gdb_assert (event_child->suspended >= 0);
3092 }
3093 }
3094
229d26fc
SM
3095 if (event_child->collecting_fast_tracepoint
3096 == fast_tpoint_collect_result::not_collecting)
fa593d66 3097 {
c058728c
SM
3098 threads_debug_printf
3099 ("fast tracepoint finished collecting successfully.");
fa593d66
PA
3100
3101 /* We may have a deferred signal to report. */
3102 if (dequeue_one_deferred_signal (event_child, &w))
c058728c 3103 threads_debug_printf ("dequeued one signal.");
3c11dd79 3104 else
fa593d66 3105 {
c058728c 3106 threads_debug_printf ("no deferred signals.");
fa593d66
PA
3107
3108 if (stabilizing_threads)
3109 {
183be222 3110 ourstatus->set_stopped (GDB_SIGNAL_0);
87ce2a04 3111
c058728c
SM
3112 threads_debug_printf
3113 ("ret = %s, stopped while stabilizing threads",
3114 target_pid_to_str (ptid_of (current_thread)).c_str ());
87ce2a04 3115
0bfdf32f 3116 return ptid_of (current_thread);
fa593d66
PA
3117 }
3118 }
3119 }
6bf5e0ba
PA
3120 }
3121
e471f25b
PA
3122 /* Check whether GDB would be interested in this event. */
3123
82075af2
JS
3124 /* Check if GDB is interested in this syscall. */
3125 if (WIFSTOPPED (w)
3126 && WSTOPSIG (w) == SYSCALL_SIGTRAP
9eedd27d 3127 && !gdb_catch_this_syscall (event_child))
82075af2 3128 {
c058728c
SM
3129 threads_debug_printf ("Ignored syscall for LWP %ld.",
3130 lwpid_of (current_thread));
82075af2 3131
df95181f 3132 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
edeeb602 3133
82075af2
JS
3134 return ignore_event (ourstatus);
3135 }
3136
e471f25b
PA
3137 /* If GDB is not interested in this signal, don't stop other
3138 threads, and don't report it to GDB. Just resume the inferior
3139 right away. We do this for threading-related signals as well as
3140 any that GDB specifically requested we ignore. But never ignore
3141 SIGSTOP if we sent it ourselves, and do not ignore signals when
3142 stepping - they may require special handling to skip the signal
c9587f88
AT
3143 handler. Also never ignore signals that could be caused by a
3144 breakpoint. */
e471f25b 3145 if (WIFSTOPPED (w)
0bfdf32f 3146 && current_thread->last_resume_kind != resume_step
e471f25b 3147 && (
1a981360 3148#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 3149 (current_process ()->priv->thread_db != NULL
e471f25b
PA
3150 && (WSTOPSIG (w) == __SIGRTMIN
3151 || WSTOPSIG (w) == __SIGRTMIN + 1))
3152 ||
3153#endif
c12a5089 3154 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 3155 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
3156 && current_thread->last_resume_kind == resume_stop)
3157 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
3158 {
3159 siginfo_t info, *info_p;
3160
c058728c
SM
3161 threads_debug_printf ("Ignored signal %d for LWP %ld.",
3162 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 3163
0bfdf32f 3164 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 3165 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
3166 info_p = &info;
3167 else
3168 info_p = NULL;
863d01bd
PA
3169
3170 if (step_over_finished)
3171 {
3172 /* We cancelled this thread's step-over above. We still
3173 need to unsuspend all other LWPs, and set them back
3174 running again while the signal handler runs. */
3175 unsuspend_all_lwps (event_child);
3176
3177 /* Enqueue the pending signal info so that proceed_all_lwps
3178 doesn't lose it. */
3179 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3180
3181 proceed_all_lwps ();
3182 }
3183 else
3184 {
df95181f
TBA
3185 resume_one_lwp (event_child, event_child->stepping,
3186 WSTOPSIG (w), info_p);
863d01bd 3187 }
edeeb602 3188
582511be 3189 return ignore_event (ourstatus);
e471f25b
PA
3190 }
3191
c2d6af84
PA
3192 /* Note that all addresses are always "out of the step range" when
3193 there's no range to begin with. */
3194 in_step_range = lwp_in_step_range (event_child);
3195
3196 /* If GDB wanted this thread to single step, and the thread is out
3197 of the step range, we always want to report the SIGTRAP, and let
3198 GDB handle it. Watchpoints should always be reported. So should
3199 signals we can't explain. A SIGTRAP we can't explain could be a
3200 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3201 do, we're be able to handle GDB breakpoints on top of internal
3202 breakpoints, by handling the internal breakpoint and still
3203 reporting the event to GDB. If we don't, we're out of luck, GDB
863d01bd
PA
3204 won't see the breakpoint hit. If we see a single-step event but
3205 the thread should be continuing, don't pass the trap to gdb.
3206 That indicates that we had previously finished a single-step but
3207 left the single-step pending -- see
3208 complete_ongoing_step_over. */
6bf5e0ba 3209 report_to_gdb = (!maybe_internal_trap
0bfdf32f 3210 || (current_thread->last_resume_kind == resume_step
c2d6af84 3211 && !in_step_range)
15c66dd6 3212 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
863d01bd
PA
3213 || (!in_step_range
3214 && !bp_explains_trap
3215 && !trace_event
3216 && !step_over_finished
3217 && !(current_thread->last_resume_kind == resume_continue
3218 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
9f3a5c85 3219 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5 3220 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
de0d863e 3221 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
183be222 3222 || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
d3ce09f5
SS
3223
3224 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
3225
3226 /* We found no reason GDB would want us to stop. We either hit one
3227 of our own breakpoints, or finished an internal step GDB
3228 shouldn't know about. */
3229 if (!report_to_gdb)
3230 {
c058728c
SM
3231 if (bp_explains_trap)
3232 threads_debug_printf ("Hit a gdbserver breakpoint.");
3233
3234 if (step_over_finished)
3235 threads_debug_printf ("Step-over finished.");
3236
3237 if (trace_event)
3238 threads_debug_printf ("Tracepoint event.");
3239
3240 if (lwp_in_step_range (event_child))
3241 threads_debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).",
3242 paddress (event_child->stop_pc),
3243 paddress (event_child->step_range_start),
3244 paddress (event_child->step_range_end));
6bf5e0ba
PA
3245
3246 /* We're not reporting this breakpoint to GDB, so apply the
3247 decr_pc_after_break adjustment to the inferior's regcache
3248 ourselves. */
3249
bf9ae9d8 3250 if (low_supports_breakpoints ())
6bf5e0ba
PA
3251 {
3252 struct regcache *regcache
0bfdf32f 3253 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3254 low_set_pc (regcache, event_child->stop_pc);
6bf5e0ba
PA
3255 }
3256
7984d532 3257 if (step_over_finished)
e3652c84
YQ
3258 {
3259 /* If we have finished stepping over a breakpoint, we've
3260 stopped and suspended all LWPs momentarily except the
3261 stepping one. This is where we resume them all again.
3262 We're going to keep waiting, so use proceed, which
3263 handles stepping over the next breakpoint. */
3264 unsuspend_all_lwps (event_child);
3265 }
3266 else
3267 {
3268 /* Remove the single-step breakpoints if any. Note that
3269 there isn't single-step breakpoint if we finished stepping
3270 over. */
7582c77c 3271 if (supports_software_single_step ()
e3652c84
YQ
3272 && has_single_step_breakpoints (current_thread))
3273 {
3274 stop_all_lwps (0, event_child);
3275 delete_single_step_breakpoints (current_thread);
3276 unstop_all_lwps (0, event_child);
3277 }
3278 }
7984d532 3279
c058728c 3280 threads_debug_printf ("proceeding all threads.");
edeeb602 3281
c058728c 3282 proceed_all_lwps ();
edeeb602 3283
582511be 3284 return ignore_event (ourstatus);
6bf5e0ba
PA
3285 }
3286
c058728c
SM
3287 if (debug_threads)
3288 {
3289 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3290 threads_debug_printf ("LWP %ld: extended event with waitstatus %s",
3291 lwpid_of (get_lwp_thread (event_child)),
3292 event_child->waitstatus.to_string ().c_str ());
3293
3294 if (current_thread->last_resume_kind == resume_step)
3295 {
3296 if (event_child->step_range_start == event_child->step_range_end)
3297 threads_debug_printf
3298 ("GDB wanted to single-step, reporting event.");
3299 else if (!lwp_in_step_range (event_child))
3300 threads_debug_printf ("Out of step range, reporting event.");
3301 }
3302
3303 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3304 threads_debug_printf ("Stopped by watchpoint.");
3305 else if (gdb_breakpoint_here (event_child->stop_pc))
3306 threads_debug_printf ("Stopped by GDB breakpoint.");
3307 }
3308
3309 threads_debug_printf ("Hit a non-gdbserver trap event.");
6bf5e0ba
PA
3310
3311 /* Alright, we're going to report a stop. */
3312
3b9a79ef 3313 /* Remove single-step breakpoints. */
7582c77c 3314 if (supports_software_single_step ())
8901d193 3315 {
3b9a79ef 3316 /* Remove single-step breakpoints or not. It it is true, stop all
8901d193
YQ
3317 lwps, so that other threads won't hit the breakpoint in the
3318 staled memory. */
3b9a79ef 3319 int remove_single_step_breakpoints_p = 0;
8901d193
YQ
3320
3321 if (non_stop)
3322 {
3b9a79ef
YQ
3323 remove_single_step_breakpoints_p
3324 = has_single_step_breakpoints (current_thread);
8901d193
YQ
3325 }
3326 else
3327 {
3328 /* In all-stop, a stop reply cancels all previous resume
3b9a79ef 3329 requests. Delete all single-step breakpoints. */
8901d193 3330
9c80ecd6
SM
3331 find_thread ([&] (thread_info *thread) {
3332 if (has_single_step_breakpoints (thread))
3333 {
3334 remove_single_step_breakpoints_p = 1;
3335 return true;
3336 }
8901d193 3337
9c80ecd6
SM
3338 return false;
3339 });
8901d193
YQ
3340 }
3341
3b9a79ef 3342 if (remove_single_step_breakpoints_p)
8901d193 3343 {
3b9a79ef 3344 /* If we remove single-step breakpoints from memory, stop all lwps,
8901d193
YQ
3345 so that other threads won't hit the breakpoint in the staled
3346 memory. */
3347 stop_all_lwps (0, event_child);
3348
3349 if (non_stop)
3350 {
3b9a79ef
YQ
3351 gdb_assert (has_single_step_breakpoints (current_thread));
3352 delete_single_step_breakpoints (current_thread);
8901d193
YQ
3353 }
3354 else
3355 {
9c80ecd6
SM
3356 for_each_thread ([] (thread_info *thread){
3357 if (has_single_step_breakpoints (thread))
3358 delete_single_step_breakpoints (thread);
3359 });
8901d193
YQ
3360 }
3361
3362 unstop_all_lwps (0, event_child);
3363 }
3364 }
3365
582511be 3366 if (!stabilizing_threads)
6bf5e0ba
PA
3367 {
3368 /* In all-stop, stop all threads. */
582511be
PA
3369 if (!non_stop)
3370 stop_all_lwps (0, NULL);
6bf5e0ba 3371
c03e6ccc 3372 if (step_over_finished)
582511be
PA
3373 {
3374 if (!non_stop)
3375 {
3376 /* If we were doing a step-over, all other threads but
3377 the stepping one had been paused in start_step_over,
3378 with their suspend counts incremented. We don't want
3379 to do a full unstop/unpause, because we're in
3380 all-stop mode (so we want threads stopped), but we
3381 still need to unsuspend the other threads, to
3382 decrement their `suspended' count back. */
3383 unsuspend_all_lwps (event_child);
3384 }
3385 else
3386 {
3387 /* If we just finished a step-over, then all threads had
3388 been momentarily paused. In all-stop, that's fine,
3389 we want threads stopped by now anyway. In non-stop,
3390 we need to re-resume threads that GDB wanted to be
3391 running. */
3392 unstop_all_lwps (1, event_child);
3393 }
3394 }
c03e6ccc 3395
3aa5cfa0
AT
3396 /* If we're not waiting for a specific LWP, choose an event LWP
3397 from among those that have had events. Giving equal priority
3398 to all LWPs that have had events helps prevent
3399 starvation. */
d7e15655 3400 if (ptid == minus_one_ptid)
3aa5cfa0
AT
3401 {
3402 event_child->status_pending_p = 1;
3403 event_child->status_pending = w;
3404
3405 select_event_lwp (&event_child);
3406
3407 /* current_thread and event_child must stay in sync. */
24583e45 3408 switch_to_thread (get_lwp_thread (event_child));
3aa5cfa0
AT
3409
3410 event_child->status_pending_p = 0;
3411 w = event_child->status_pending;
3412 }
3413
3414
fa593d66 3415 /* Stabilize threads (move out of jump pads). */
582511be 3416 if (!non_stop)
5c9eb2f2 3417 target_stabilize_threads ();
6bf5e0ba
PA
3418 }
3419 else
3420 {
3421 /* If we just finished a step-over, then all threads had been
3422 momentarily paused. In all-stop, that's fine, we want
3423 threads stopped by now anyway. In non-stop, we need to
3424 re-resume threads that GDB wanted to be running. */
3425 if (step_over_finished)
7984d532 3426 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3427 }
3428
183be222 3429 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
de0d863e 3430 {
00db26fa
PA
3431 /* If the reported event is an exit, fork, vfork or exec, let
3432 GDB know. */
5a04c4cf
PA
3433
3434 /* Break the unreported fork relationship chain. */
183be222
SM
3435 if (event_child->waitstatus.kind () == TARGET_WAITKIND_FORKED
3436 || event_child->waitstatus.kind () == TARGET_WAITKIND_VFORKED)
5a04c4cf
PA
3437 {
3438 event_child->fork_relative->fork_relative = NULL;
3439 event_child->fork_relative = NULL;
3440 }
3441
00db26fa 3442 *ourstatus = event_child->waitstatus;
de0d863e 3443 /* Clear the event lwp's waitstatus since we handled it already. */
183be222 3444 event_child->waitstatus.set_ignore ();
de0d863e
DB
3445 }
3446 else
183be222
SM
3447 {
3448 /* The actual stop signal is overwritten below. */
3449 ourstatus->set_stopped (GDB_SIGNAL_0);
3450 }
5b1c542e 3451
582511be 3452 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3453 it was a software breakpoint, and the client doesn't know we can
3454 adjust the breakpoint ourselves. */
3455 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
c12a5089 3456 && !cs.swbreak_feature)
582511be 3457 {
d4807ea2 3458 int decr_pc = low_decr_pc_after_break ();
582511be
PA
3459
3460 if (decr_pc != 0)
3461 {
3462 struct regcache *regcache
3463 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3464 low_set_pc (regcache, event_child->stop_pc + decr_pc);
582511be
PA
3465 }
3466 }
3467
82075af2
JS
3468 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3469 {
183be222
SM
3470 int syscall_number;
3471
3472 get_syscall_trapinfo (event_child, &syscall_number);
3473 if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3474 ourstatus->set_syscall_entry (syscall_number);
3475 else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3476 ourstatus->set_syscall_return (syscall_number);
3477 else
3478 gdb_assert_not_reached ("unexpected syscall state");
82075af2
JS
3479 }
3480 else if (current_thread->last_resume_kind == resume_stop
3481 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
3482 {
3483 /* A thread that has been requested to stop by GDB with vCont;t,
3484 and it stopped cleanly, so report as SIG0. The use of
3485 SIGSTOP is an implementation detail. */
183be222 3486 ourstatus->set_stopped (GDB_SIGNAL_0);
bd99dc85 3487 }
0bfdf32f 3488 else if (current_thread->last_resume_kind == resume_stop
8336d594 3489 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
3490 {
3491 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 3492 but, it stopped for other reasons. */
183be222 3493 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
bd99dc85 3494 }
183be222
SM
3495 else if (ourstatus->kind () == TARGET_WAITKIND_STOPPED)
3496 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
bd99dc85 3497
d7e15655 3498 gdb_assert (step_over_bkpt == null_ptid);
d50171e4 3499
e48359ea 3500 threads_debug_printf ("ret = %s, %s",
c058728c 3501 target_pid_to_str (ptid_of (current_thread)).c_str (),
e48359ea 3502 ourstatus->to_string ().c_str ());
bd99dc85 3503
183be222 3504 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
65706a29
PA
3505 return filter_exit_event (event_child, ourstatus);
3506
0bfdf32f 3507 return ptid_of (current_thread);
bd99dc85
PA
3508}
3509
3510/* Get rid of any pending event in the pipe. */
3511static void
3512async_file_flush (void)
3513{
cdc8e9b2 3514 linux_event_pipe.flush ();
bd99dc85
PA
3515}
3516
3517/* Put something in the pipe, so the event loop wakes up. */
3518static void
3519async_file_mark (void)
3520{
cdc8e9b2 3521 linux_event_pipe.mark ();
bd99dc85
PA
3522}
3523
6532e7e3
TBA
3524ptid_t
3525linux_process_target::wait (ptid_t ptid,
3526 target_waitstatus *ourstatus,
b60cea74 3527 target_wait_flags target_options)
bd99dc85 3528{
95954743 3529 ptid_t event_ptid;
bd99dc85 3530
bd99dc85
PA
3531 /* Flush the async file first. */
3532 if (target_is_async_p ())
3533 async_file_flush ();
3534
582511be
PA
3535 do
3536 {
d16f3f6c 3537 event_ptid = wait_1 (ptid, ourstatus, target_options);
582511be
PA
3538 }
3539 while ((target_options & TARGET_WNOHANG) == 0
d7e15655 3540 && event_ptid == null_ptid
183be222 3541 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3542
3543 /* If at least one stop was reported, there may be more. A single
3544 SIGCHLD can signal more than one child stop. */
3545 if (target_is_async_p ()
3546 && (target_options & TARGET_WNOHANG) != 0
d7e15655 3547 && event_ptid != null_ptid)
bd99dc85
PA
3548 async_file_mark ();
3549
3550 return event_ptid;
da6d8c04
DJ
3551}
3552
c5f62d5f 3553/* Send a signal to an LWP. */
fd500816
DJ
3554
3555static int
a1928bad 3556kill_lwp (unsigned long lwpid, int signo)
fd500816 3557{
4a6ed09b 3558 int ret;
fd500816 3559
4a6ed09b
PA
3560 errno = 0;
3561 ret = syscall (__NR_tkill, lwpid, signo);
3562 if (errno == ENOSYS)
3563 {
3564 /* If tkill fails, then we are not using nptl threads, a
3565 configuration we no longer support. */
3566 perror_with_name (("tkill"));
3567 }
3568 return ret;
fd500816
DJ
3569}
3570
964e4306
PA
3571void
3572linux_stop_lwp (struct lwp_info *lwp)
3573{
3574 send_sigstop (lwp);
3575}
3576
0d62e5e8 3577static void
02fc4de7 3578send_sigstop (struct lwp_info *lwp)
0d62e5e8 3579{
bd99dc85 3580 int pid;
0d62e5e8 3581
d86d4aaf 3582 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3583
0d62e5e8
DJ
3584 /* If we already have a pending stop signal for this process, don't
3585 send another. */
54a0b537 3586 if (lwp->stop_expected)
0d62e5e8 3587 {
c058728c 3588 threads_debug_printf ("Have pending sigstop for lwp %d", pid);
ae13219e 3589
0d62e5e8
DJ
3590 return;
3591 }
3592
c058728c 3593 threads_debug_printf ("Sending sigstop to lwp %d", pid);
0d62e5e8 3594
d50171e4 3595 lwp->stop_expected = 1;
bd99dc85 3596 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3597}
3598
df3e4dbe
SM
3599static void
3600send_sigstop (thread_info *thread, lwp_info *except)
02fc4de7 3601{
d86d4aaf 3602 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3603
7984d532
PA
3604 /* Ignore EXCEPT. */
3605 if (lwp == except)
df3e4dbe 3606 return;
7984d532 3607
02fc4de7 3608 if (lwp->stopped)
df3e4dbe 3609 return;
02fc4de7
PA
3610
3611 send_sigstop (lwp);
7984d532
PA
3612}
3613
3614/* Increment the suspend count of an LWP, and stop it, if not stopped
3615 yet. */
df3e4dbe
SM
3616static void
3617suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
7984d532 3618{
d86d4aaf 3619 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3620
3621 /* Ignore EXCEPT. */
3622 if (lwp == except)
df3e4dbe 3623 return;
7984d532 3624
863d01bd 3625 lwp_suspended_inc (lwp);
7984d532 3626
df3e4dbe 3627 send_sigstop (thread, except);
02fc4de7
PA
3628}
3629
95954743
PA
3630static void
3631mark_lwp_dead (struct lwp_info *lwp, int wstat)
3632{
95954743
PA
3633 /* Store the exit status for later. */
3634 lwp->status_pending_p = 1;
3635 lwp->status_pending = wstat;
3636
00db26fa
PA
3637 /* Store in waitstatus as well, as there's nothing else to process
3638 for this event. */
3639 if (WIFEXITED (wstat))
183be222 3640 lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
00db26fa 3641 else if (WIFSIGNALED (wstat))
183be222 3642 lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
00db26fa 3643
95954743
PA
3644 /* Prevent trying to stop it. */
3645 lwp->stopped = 1;
3646
3647 /* No further stops are expected from a dead lwp. */
3648 lwp->stop_expected = 0;
3649}
3650
00db26fa
PA
3651/* Return true if LWP has exited already, and has a pending exit event
3652 to report to GDB. */
3653
3654static int
3655lwp_is_marked_dead (struct lwp_info *lwp)
3656{
3657 return (lwp->status_pending_p
3658 && (WIFEXITED (lwp->status_pending)
3659 || WIFSIGNALED (lwp->status_pending)));
3660}
3661
d16f3f6c
TBA
3662void
3663linux_process_target::wait_for_sigstop ()
0d62e5e8 3664{
0bfdf32f 3665 struct thread_info *saved_thread;
95954743 3666 ptid_t saved_tid;
fa96cb38
PA
3667 int wstat;
3668 int ret;
0d62e5e8 3669
0bfdf32f
GB
3670 saved_thread = current_thread;
3671 if (saved_thread != NULL)
9c80ecd6 3672 saved_tid = saved_thread->id;
bd99dc85 3673 else
95954743 3674 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3675
20ac1cdb
TBA
3676 scoped_restore_current_thread restore_thread;
3677
c058728c 3678 threads_debug_printf ("pulling events");
d50171e4 3679
fa96cb38
PA
3680 /* Passing NULL_PTID as filter indicates we want all events to be
3681 left pending. Eventually this returns when there are no
3682 unwaited-for children left. */
d16f3f6c 3683 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
fa96cb38 3684 gdb_assert (ret == -1);
0d62e5e8 3685
13d3d99b 3686 if (saved_thread == NULL || mythread_alive (saved_tid))
20ac1cdb 3687 return;
0d62e5e8
DJ
3688 else
3689 {
c058728c 3690 threads_debug_printf ("Previously current thread died.");
0d62e5e8 3691
f0db101d
PA
3692 /* We can't change the current inferior behind GDB's back,
3693 otherwise, a subsequent command may apply to the wrong
3694 process. */
20ac1cdb
TBA
3695 restore_thread.dont_restore ();
3696 switch_to_thread (nullptr);
0d62e5e8
DJ
3697 }
3698}
3699
13e567af
TBA
3700bool
3701linux_process_target::stuck_in_jump_pad (thread_info *thread)
fa593d66 3702{
d86d4aaf 3703 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3704
863d01bd
PA
3705 if (lwp->suspended != 0)
3706 {
3707 internal_error (__FILE__, __LINE__,
3708 "LWP %ld is suspended, suspended=%d\n",
3709 lwpid_of (thread), lwp->suspended);
3710 }
fa593d66
PA
3711 gdb_assert (lwp->stopped);
3712
3713 /* Allow debugging the jump pad, gdb_collect, etc.. */
3714 return (supports_fast_tracepoints ()
58b4daa5 3715 && agent_loaded_p ()
fa593d66 3716 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3717 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66 3718 || thread->last_resume_kind == resume_step)
229d26fc
SM
3719 && (linux_fast_tracepoint_collecting (lwp, NULL)
3720 != fast_tpoint_collect_result::not_collecting));
fa593d66
PA
3721}
3722
d16f3f6c
TBA
3723void
3724linux_process_target::move_out_of_jump_pad (thread_info *thread)
fa593d66 3725{
d86d4aaf 3726 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3727 int *wstat;
3728
863d01bd
PA
3729 if (lwp->suspended != 0)
3730 {
3731 internal_error (__FILE__, __LINE__,
3732 "LWP %ld is suspended, suspended=%d\n",
3733 lwpid_of (thread), lwp->suspended);
3734 }
fa593d66
PA
3735 gdb_assert (lwp->stopped);
3736
f0ce0d3a 3737 /* For gdb_breakpoint_here. */
24583e45
TBA
3738 scoped_restore_current_thread restore_thread;
3739 switch_to_thread (thread);
f0ce0d3a 3740
fa593d66
PA
3741 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3742
3743 /* Allow debugging the jump pad, gdb_collect, etc. */
3744 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3745 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3746 && thread->last_resume_kind != resume_step
3747 && maybe_move_out_of_jump_pad (lwp, wstat))
3748 {
c058728c
SM
3749 threads_debug_printf ("LWP %ld needs stabilizing (in jump pad)",
3750 lwpid_of (thread));
fa593d66
PA
3751
3752 if (wstat)
3753 {
3754 lwp->status_pending_p = 0;
3755 enqueue_one_deferred_signal (lwp, wstat);
3756
c058728c
SM
3757 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad",
3758 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
3759 }
3760
df95181f 3761 resume_one_lwp (lwp, 0, 0, NULL);
fa593d66
PA
3762 }
3763 else
863d01bd 3764 lwp_suspended_inc (lwp);
fa593d66
PA
3765}
3766
5a6b0a41
SM
3767static bool
3768lwp_running (thread_info *thread)
fa593d66 3769{
d86d4aaf 3770 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3771
00db26fa 3772 if (lwp_is_marked_dead (lwp))
5a6b0a41
SM
3773 return false;
3774
3775 return !lwp->stopped;
fa593d66
PA
3776}
3777
d16f3f6c
TBA
3778void
3779linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
0d62e5e8 3780{
bde24c0a
PA
3781 /* Should not be called recursively. */
3782 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3783
c058728c
SM
3784 THREADS_SCOPED_DEBUG_ENTER_EXIT;
3785
3786 threads_debug_printf
3787 ("%s, except=%s", suspend ? "stop-and-suspend" : "stop",
3788 (except != NULL
3789 ? target_pid_to_str (ptid_of (get_lwp_thread (except))).c_str ()
3790 : "none"));
87ce2a04 3791
bde24c0a
PA
3792 stopping_threads = (suspend
3793 ? STOPPING_AND_SUSPENDING_THREADS
3794 : STOPPING_THREADS);
7984d532
PA
3795
3796 if (suspend)
df3e4dbe
SM
3797 for_each_thread ([&] (thread_info *thread)
3798 {
3799 suspend_and_send_sigstop (thread, except);
3800 });
7984d532 3801 else
df3e4dbe
SM
3802 for_each_thread ([&] (thread_info *thread)
3803 {
3804 send_sigstop (thread, except);
3805 });
3806
fa96cb38 3807 wait_for_sigstop ();
bde24c0a 3808 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04 3809
c058728c 3810 threads_debug_printf ("setting stopping_threads back to !stopping");
0d62e5e8
DJ
3811}
3812
863d01bd
PA
3813/* Enqueue one signal in the chain of signals which need to be
3814 delivered to this process on next resume. */
3815
3816static void
3817enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3818{
013e3554
TBA
3819 lwp->pending_signals.emplace_back (signal);
3820 if (info == nullptr)
3821 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
863d01bd 3822 else
013e3554 3823 lwp->pending_signals.back ().info = *info;
863d01bd
PA
3824}
3825
df95181f
TBA
3826void
3827linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
fa5308bd 3828{
984a2c04
YQ
3829 struct thread_info *thread = get_lwp_thread (lwp);
3830 struct regcache *regcache = get_thread_regcache (thread, 1);
8ce47547 3831
24583e45 3832 scoped_restore_current_thread restore_thread;
984a2c04 3833
24583e45 3834 switch_to_thread (thread);
7582c77c 3835 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
fa5308bd 3836
a0ff9e1a 3837 for (CORE_ADDR pc : next_pcs)
3b9a79ef 3838 set_single_step_breakpoint (pc, current_ptid);
fa5308bd
AT
3839}
3840
df95181f
TBA
3841int
3842linux_process_target::single_step (lwp_info* lwp)
7fe5e27e
AT
3843{
3844 int step = 0;
3845
b31cdfa6 3846 if (supports_hardware_single_step ())
7fe5e27e
AT
3847 {
3848 step = 1;
3849 }
7582c77c 3850 else if (supports_software_single_step ())
7fe5e27e
AT
3851 {
3852 install_software_single_step_breakpoints (lwp);
3853 step = 0;
3854 }
3855 else
c058728c 3856 threads_debug_printf ("stepping is not implemented on this target");
7fe5e27e
AT
3857
3858 return step;
3859}
3860
35ac8b3e 3861/* The signal can be delivered to the inferior if we are not trying to
5b061e98
YQ
3862 finish a fast tracepoint collect. Since signal can be delivered in
3863 the step-over, the program may go to signal handler and trap again
3864 after return from the signal handler. We can live with the spurious
3865 double traps. */
35ac8b3e
YQ
3866
3867static int
3868lwp_signal_can_be_delivered (struct lwp_info *lwp)
3869{
229d26fc
SM
3870 return (lwp->collecting_fast_tracepoint
3871 == fast_tpoint_collect_result::not_collecting);
35ac8b3e
YQ
3872}
3873
df95181f
TBA
3874void
3875linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
3876 int signal, siginfo_t *info)
da6d8c04 3877{
d86d4aaf 3878 struct thread_info *thread = get_lwp_thread (lwp);
82075af2 3879 int ptrace_request;
c06cbd92
YQ
3880 struct process_info *proc = get_thread_process (thread);
3881
3882 /* Note that target description may not be initialised
3883 (proc->tdesc == NULL) at this point because the program hasn't
3884 stopped at the first instruction yet. It means GDBserver skips
3885 the extra traps from the wrapper program (see option --wrapper).
3886 Code in this function that requires register access should be
3887 guarded by proc->tdesc == NULL or something else. */
0d62e5e8 3888
54a0b537 3889 if (lwp->stopped == 0)
0d62e5e8
DJ
3890 return;
3891
183be222 3892 gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
65706a29 3893
229d26fc
SM
3894 fast_tpoint_collect_result fast_tp_collecting
3895 = lwp->collecting_fast_tracepoint;
fa593d66 3896
229d26fc
SM
3897 gdb_assert (!stabilizing_threads
3898 || (fast_tp_collecting
3899 != fast_tpoint_collect_result::not_collecting));
fa593d66 3900
219f2f23
PA
3901 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3902 user used the "jump" command, or "set $pc = foo"). */
c06cbd92 3903 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
219f2f23
PA
3904 {
3905 /* Collecting 'while-stepping' actions doesn't make sense
3906 anymore. */
d86d4aaf 3907 release_while_stepping_state_list (thread);
219f2f23
PA
3908 }
3909
0d62e5e8 3910 /* If we have pending signals or status, and a new signal, enqueue the
35ac8b3e
YQ
3911 signal. Also enqueue the signal if it can't be delivered to the
3912 inferior right now. */
0d62e5e8 3913 if (signal != 0
fa593d66 3914 && (lwp->status_pending_p
013e3554 3915 || !lwp->pending_signals.empty ()
35ac8b3e 3916 || !lwp_signal_can_be_delivered (lwp)))
94610ec4
YQ
3917 {
3918 enqueue_pending_signal (lwp, signal, info);
3919
3920 /* Postpone any pending signal. It was enqueued above. */
3921 signal = 0;
3922 }
0d62e5e8 3923
d50171e4
PA
3924 if (lwp->status_pending_p)
3925 {
c058728c
SM
3926 threads_debug_printf
3927 ("Not resuming lwp %ld (%s, stop %s); has pending status",
3928 lwpid_of (thread), step ? "step" : "continue",
3929 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
3930 return;
3931 }
0d62e5e8 3932
24583e45
TBA
3933 scoped_restore_current_thread restore_thread;
3934 switch_to_thread (thread);
0d62e5e8 3935
0d62e5e8
DJ
3936 /* This bit needs some thinking about. If we get a signal that
3937 we must report while a single-step reinsert is still pending,
3938 we often end up resuming the thread. It might be better to
3939 (ew) allow a stack of pending events; then we could be sure that
3940 the reinsert happened right away and not lose any signals.
3941
3942 Making this stack would also shrink the window in which breakpoints are
54a0b537 3943 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
3944 complete correctness, so it won't solve that problem. It may be
3945 worthwhile just to solve this one, however. */
54a0b537 3946 if (lwp->bp_reinsert != 0)
0d62e5e8 3947 {
c058728c
SM
3948 threads_debug_printf (" pending reinsert at 0x%s",
3949 paddress (lwp->bp_reinsert));
d50171e4 3950
b31cdfa6 3951 if (supports_hardware_single_step ())
d50171e4 3952 {
229d26fc 3953 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
3954 {
3955 if (step == 0)
9986ba08 3956 warning ("BAD - reinserting but not stepping.");
fa593d66 3957 if (lwp->suspended)
9986ba08
PA
3958 warning ("BAD - reinserting and suspended(%d).",
3959 lwp->suspended);
fa593d66 3960 }
d50171e4 3961 }
f79b145d
YQ
3962
3963 step = maybe_hw_step (thread);
0d62e5e8
DJ
3964 }
3965
229d26fc 3966 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
c058728c
SM
3967 threads_debug_printf
3968 ("lwp %ld wants to get out of fast tracepoint jump pad "
3969 "(exit-jump-pad-bkpt)", lwpid_of (thread));
3970
229d26fc 3971 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
fa593d66 3972 {
c058728c
SM
3973 threads_debug_printf
3974 ("lwp %ld wants to get out of fast tracepoint jump pad single-stepping",
3975 lwpid_of (thread));
fa593d66 3976
b31cdfa6 3977 if (supports_hardware_single_step ())
fa593d66
PA
3978 step = 1;
3979 else
38e08fca
GB
3980 {
3981 internal_error (__FILE__, __LINE__,
3982 "moving out of jump pad single-stepping"
3983 " not implemented on this target");
3984 }
fa593d66
PA
3985 }
3986
219f2f23
PA
3987 /* If we have while-stepping actions in this thread set it stepping.
3988 If we have a signal to deliver, it may or may not be set to
3989 SIG_IGN, we don't know. Assume so, and allow collecting
3990 while-stepping into a signal handler. A possible smart thing to
3991 do would be to set an internal breakpoint at the signal return
3992 address, continue, and carry on catching this while-stepping
3993 action only when that breakpoint is hit. A future
3994 enhancement. */
7fe5e27e 3995 if (thread->while_stepping != NULL)
219f2f23 3996 {
c058728c
SM
3997 threads_debug_printf
3998 ("lwp %ld has a while-stepping action -> forcing step.",
3999 lwpid_of (thread));
7fe5e27e
AT
4000
4001 step = single_step (lwp);
219f2f23
PA
4002 }
4003
bf9ae9d8 4004 if (proc->tdesc != NULL && low_supports_breakpoints ())
0d62e5e8 4005 {
0bfdf32f 4006 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be 4007
bf9ae9d8 4008 lwp->stop_pc = low_get_pc (regcache);
582511be 4009
c058728c
SM
4010 threads_debug_printf (" %s from pc 0x%lx", step ? "step" : "continue",
4011 (long) lwp->stop_pc);
0d62e5e8
DJ
4012 }
4013
35ac8b3e
YQ
4014 /* If we have pending signals, consume one if it can be delivered to
4015 the inferior. */
013e3554 4016 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
0d62e5e8 4017 {
013e3554 4018 const pending_signal &p_sig = lwp->pending_signals.front ();
0d62e5e8 4019
013e3554
TBA
4020 signal = p_sig.signal;
4021 if (p_sig.info.si_signo != 0)
d86d4aaf 4022 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554 4023 &p_sig.info);
32ca6d61 4024
013e3554 4025 lwp->pending_signals.pop_front ();
0d62e5e8
DJ
4026 }
4027
c058728c
SM
4028 threads_debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)",
4029 lwpid_of (thread), step ? "step" : "continue", signal,
4030 lwp->stop_expected ? "expected" : "not expected");
94610ec4 4031
d7599cc0 4032 low_prepare_to_resume (lwp);
aa5ca48f 4033
d86d4aaf 4034 regcache_invalidate_thread (thread);
da6d8c04 4035 errno = 0;
54a0b537 4036 lwp->stepping = step;
82075af2
JS
4037 if (step)
4038 ptrace_request = PTRACE_SINGLESTEP;
4039 else if (gdb_catching_syscalls_p (lwp))
4040 ptrace_request = PTRACE_SYSCALL;
4041 else
4042 ptrace_request = PTRACE_CONT;
4043 ptrace (ptrace_request,
4044 lwpid_of (thread),
b8e1b30e 4045 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
4046 /* Coerce to a uintptr_t first to avoid potential gcc warning
4047 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4048 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 4049
da6d8c04 4050 if (errno)
23f238d3
PA
4051 perror_with_name ("resuming thread");
4052
4053 /* Successfully resumed. Clear state that no longer makes sense,
4054 and mark the LWP as running. Must not do this before resuming
4055 otherwise if that fails other code will be confused. E.g., we'd
4056 later try to stop the LWP and hang forever waiting for a stop
4057 status. Note that we must not throw after this is cleared,
4058 otherwise handle_zombie_lwp_error would get confused. */
4059 lwp->stopped = 0;
4060 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4061}
4062
d7599cc0
TBA
4063void
4064linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4065{
4066 /* Nop. */
4067}
4068
23f238d3
PA
4069/* Called when we try to resume a stopped LWP and that errors out. If
4070 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4071 or about to become), discard the error, clear any pending status
4072 the LWP may have, and return true (we'll collect the exit status
4073 soon enough). Otherwise, return false. */
4074
4075static int
4076check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4077{
4078 struct thread_info *thread = get_lwp_thread (lp);
4079
4080 /* If we get an error after resuming the LWP successfully, we'd
4081 confuse !T state for the LWP being gone. */
4082 gdb_assert (lp->stopped);
4083
4084 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4085 because even if ptrace failed with ESRCH, the tracee may be "not
4086 yet fully dead", but already refusing ptrace requests. In that
4087 case the tracee has 'R (Running)' state for a little bit
4088 (observed in Linux 3.18). See also the note on ESRCH in the
4089 ptrace(2) man page. Instead, check whether the LWP has any state
4090 other than ptrace-stopped. */
4091
4092 /* Don't assume anything if /proc/PID/status can't be read. */
4093 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 4094 {
23f238d3
PA
4095 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4096 lp->status_pending_p = 0;
4097 return 1;
4098 }
4099 return 0;
4100}
4101
df95181f
TBA
4102void
4103linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4104 siginfo_t *info)
23f238d3 4105{
a70b8144 4106 try
23f238d3 4107 {
df95181f 4108 resume_one_lwp_throw (lwp, step, signal, info);
23f238d3 4109 }
230d2906 4110 catch (const gdb_exception_error &ex)
23f238d3
PA
4111 {
4112 if (!check_ptrace_stopped_lwp_gone (lwp))
eedc3f4f 4113 throw;
3221518c 4114 }
da6d8c04
DJ
4115}
4116
5fdda392
SM
4117/* This function is called once per thread via for_each_thread.
4118 We look up which resume request applies to THREAD and mark it with a
4119 pointer to the appropriate resume request.
5544ad89
DJ
4120
4121 This algorithm is O(threads * resume elements), but resume elements
4122 is small (and will remain small at least until GDB supports thread
4123 suspension). */
ebcf782c 4124
5fdda392
SM
4125static void
4126linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
0d62e5e8 4127{
d86d4aaf 4128 struct lwp_info *lwp = get_thread_lwp (thread);
64386c31 4129
5fdda392 4130 for (int ndx = 0; ndx < n; ndx++)
95954743 4131 {
5fdda392 4132 ptid_t ptid = resume[ndx].thread;
d7e15655 4133 if (ptid == minus_one_ptid
9c80ecd6 4134 || ptid == thread->id
0c9070b3
YQ
4135 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4136 of PID'. */
e99b03dc 4137 || (ptid.pid () == pid_of (thread)
0e998d96 4138 && (ptid.is_pid ()
e38504b3 4139 || ptid.lwp () == -1)))
95954743 4140 {
5fdda392 4141 if (resume[ndx].kind == resume_stop
8336d594 4142 && thread->last_resume_kind == resume_stop)
d50171e4 4143 {
c058728c
SM
4144 threads_debug_printf
4145 ("already %s LWP %ld at GDB's request",
4146 (thread->last_status.kind () == TARGET_WAITKIND_STOPPED
4147 ? "stopped" : "stopping"),
4148 lwpid_of (thread));
d50171e4
PA
4149
4150 continue;
4151 }
4152
5a04c4cf
PA
4153 /* Ignore (wildcard) resume requests for already-resumed
4154 threads. */
5fdda392 4155 if (resume[ndx].kind != resume_stop
5a04c4cf
PA
4156 && thread->last_resume_kind != resume_stop)
4157 {
c058728c
SM
4158 threads_debug_printf
4159 ("already %s LWP %ld at GDB's request",
4160 (thread->last_resume_kind == resume_step
4161 ? "stepping" : "continuing"),
4162 lwpid_of (thread));
5a04c4cf
PA
4163 continue;
4164 }
4165
4166 /* Don't let wildcard resumes resume fork children that GDB
4167 does not yet know are new fork children. */
4168 if (lwp->fork_relative != NULL)
4169 {
5a04c4cf
PA
4170 struct lwp_info *rel = lwp->fork_relative;
4171
4172 if (rel->status_pending_p
183be222
SM
4173 && (rel->waitstatus.kind () == TARGET_WAITKIND_FORKED
4174 || rel->waitstatus.kind () == TARGET_WAITKIND_VFORKED))
5a04c4cf 4175 {
c058728c
SM
4176 threads_debug_printf
4177 ("not resuming LWP %ld: has queued stop reply",
4178 lwpid_of (thread));
5a04c4cf
PA
4179 continue;
4180 }
4181 }
4182
4183 /* If the thread has a pending event that has already been
4184 reported to GDBserver core, but GDB has not pulled the
4185 event out of the vStopped queue yet, likewise, ignore the
4186 (wildcard) resume request. */
9c80ecd6 4187 if (in_queued_stop_replies (thread->id))
5a04c4cf 4188 {
c058728c
SM
4189 threads_debug_printf
4190 ("not resuming LWP %ld: has queued stop reply",
4191 lwpid_of (thread));
5a04c4cf
PA
4192 continue;
4193 }
4194
5fdda392 4195 lwp->resume = &resume[ndx];
8336d594 4196 thread->last_resume_kind = lwp->resume->kind;
fa593d66 4197
c2d6af84
PA
4198 lwp->step_range_start = lwp->resume->step_range_start;
4199 lwp->step_range_end = lwp->resume->step_range_end;
4200
fa593d66
PA
4201 /* If we had a deferred signal to report, dequeue one now.
4202 This can happen if LWP gets more than one signal while
4203 trying to get out of a jump pad. */
4204 if (lwp->stopped
4205 && !lwp->status_pending_p
4206 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4207 {
4208 lwp->status_pending_p = 1;
4209
c058728c
SM
4210 threads_debug_printf
4211 ("Dequeueing deferred signal %d for LWP %ld, "
4212 "leaving status pending.",
4213 WSTOPSIG (lwp->status_pending),
4214 lwpid_of (thread));
fa593d66
PA
4215 }
4216
5fdda392 4217 return;
95954743
PA
4218 }
4219 }
2bd7c093
PA
4220
4221 /* No resume action for this thread. */
4222 lwp->resume = NULL;
5544ad89
DJ
4223}
4224
df95181f
TBA
4225bool
4226linux_process_target::resume_status_pending (thread_info *thread)
5544ad89 4227{
d86d4aaf 4228 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4229
bd99dc85
PA
4230 /* LWPs which will not be resumed are not interesting, because
4231 we might not wait for them next time through linux_wait. */
2bd7c093 4232 if (lwp->resume == NULL)
25c28b4d 4233 return false;
64386c31 4234
df95181f 4235 return thread_still_has_status_pending (thread);
d50171e4
PA
4236}
4237
df95181f
TBA
4238bool
4239linux_process_target::thread_needs_step_over (thread_info *thread)
d50171e4 4240{
d86d4aaf 4241 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4242 CORE_ADDR pc;
c06cbd92
YQ
4243 struct process_info *proc = get_thread_process (thread);
4244
4245 /* GDBserver is skipping the extra traps from the wrapper program,
4246 don't have to do step over. */
4247 if (proc->tdesc == NULL)
eca55aec 4248 return false;
d50171e4
PA
4249
4250 /* LWPs which will not be resumed are not interesting, because we
4251 might not wait for them next time through linux_wait. */
4252
4253 if (!lwp->stopped)
4254 {
c058728c
SM
4255 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped",
4256 lwpid_of (thread));
eca55aec 4257 return false;
d50171e4
PA
4258 }
4259
8336d594 4260 if (thread->last_resume_kind == resume_stop)
d50171e4 4261 {
c058728c
SM
4262 threads_debug_printf
4263 ("Need step over [LWP %ld]? Ignoring, should remain stopped",
4264 lwpid_of (thread));
eca55aec 4265 return false;
d50171e4
PA
4266 }
4267
7984d532
PA
4268 gdb_assert (lwp->suspended >= 0);
4269
4270 if (lwp->suspended)
4271 {
c058728c
SM
4272 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, suspended",
4273 lwpid_of (thread));
eca55aec 4274 return false;
7984d532
PA
4275 }
4276
bd99dc85 4277 if (lwp->status_pending_p)
d50171e4 4278 {
c058728c
SM
4279 threads_debug_printf
4280 ("Need step over [LWP %ld]? Ignoring, has pending status.",
4281 lwpid_of (thread));
eca55aec 4282 return false;
d50171e4
PA
4283 }
4284
4285 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4286 or we have. */
4287 pc = get_pc (lwp);
4288
4289 /* If the PC has changed since we stopped, then don't do anything,
4290 and let the breakpoint/tracepoint be hit. This happens if, for
4291 instance, GDB handled the decr_pc_after_break subtraction itself,
4292 GDB is OOL stepping this thread, or the user has issued a "jump"
4293 command, or poked thread's registers herself. */
4294 if (pc != lwp->stop_pc)
4295 {
c058728c
SM
4296 threads_debug_printf
4297 ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4298 "Old stop_pc was 0x%s, PC is now 0x%s", lwpid_of (thread),
4299 paddress (lwp->stop_pc), paddress (pc));
eca55aec 4300 return false;
d50171e4
PA
4301 }
4302
484b3c32
YQ
4303 /* On software single step target, resume the inferior with signal
4304 rather than stepping over. */
7582c77c 4305 if (supports_software_single_step ()
013e3554 4306 && !lwp->pending_signals.empty ()
484b3c32
YQ
4307 && lwp_signal_can_be_delivered (lwp))
4308 {
c058728c
SM
4309 threads_debug_printf
4310 ("Need step over [LWP %ld]? Ignoring, has pending signals.",
4311 lwpid_of (thread));
484b3c32 4312
eca55aec 4313 return false;
484b3c32
YQ
4314 }
4315
24583e45
TBA
4316 scoped_restore_current_thread restore_thread;
4317 switch_to_thread (thread);
d50171e4 4318
8b07ae33 4319 /* We can only step over breakpoints we know about. */
fa593d66 4320 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 4321 {
8b07ae33 4322 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
4323 though. If the condition is being evaluated on the target's side
4324 and it evaluate to false, step over this breakpoint as well. */
4325 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
4326 && gdb_condition_true_at_breakpoint (pc)
4327 && gdb_no_commands_at_breakpoint (pc))
8b07ae33 4328 {
c058728c
SM
4329 threads_debug_printf ("Need step over [LWP %ld]? yes, but found"
4330 " GDB breakpoint at 0x%s; skipping step over",
4331 lwpid_of (thread), paddress (pc));
d50171e4 4332
eca55aec 4333 return false;
8b07ae33
PA
4334 }
4335 else
4336 {
c058728c
SM
4337 threads_debug_printf ("Need step over [LWP %ld]? yes, "
4338 "found breakpoint at 0x%s",
4339 lwpid_of (thread), paddress (pc));
d50171e4 4340
8b07ae33 4341 /* We've found an lwp that needs stepping over --- return 1 so
8f86d7aa 4342 that find_thread stops looking. */
eca55aec 4343 return true;
8b07ae33 4344 }
d50171e4
PA
4345 }
4346
c058728c
SM
4347 threads_debug_printf
4348 ("Need step over [LWP %ld]? No, no breakpoint found at 0x%s",
4349 lwpid_of (thread), paddress (pc));
c6ecbae5 4350
eca55aec 4351 return false;
5544ad89
DJ
4352}
4353
d16f3f6c
TBA
4354void
4355linux_process_target::start_step_over (lwp_info *lwp)
d50171e4 4356{
d86d4aaf 4357 struct thread_info *thread = get_lwp_thread (lwp);
d50171e4 4358 CORE_ADDR pc;
d50171e4 4359
c058728c
SM
4360 threads_debug_printf ("Starting step-over on LWP %ld. Stopping all threads",
4361 lwpid_of (thread));
d50171e4 4362
7984d532 4363 stop_all_lwps (1, lwp);
863d01bd
PA
4364
4365 if (lwp->suspended != 0)
4366 {
4367 internal_error (__FILE__, __LINE__,
4368 "LWP %ld suspended=%d\n", lwpid_of (thread),
4369 lwp->suspended);
4370 }
d50171e4 4371
c058728c 4372 threads_debug_printf ("Done stopping all threads for step-over.");
d50171e4
PA
4373
4374 /* Note, we should always reach here with an already adjusted PC,
4375 either by GDB (if we're resuming due to GDB's request), or by our
4376 caller, if we just finished handling an internal breakpoint GDB
4377 shouldn't care about. */
4378 pc = get_pc (lwp);
4379
24583e45
TBA
4380 bool step = false;
4381 {
4382 scoped_restore_current_thread restore_thread;
4383 switch_to_thread (thread);
d50171e4 4384
24583e45
TBA
4385 lwp->bp_reinsert = pc;
4386 uninsert_breakpoints_at (pc);
4387 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4 4388
24583e45
TBA
4389 step = single_step (lwp);
4390 }
d50171e4 4391
df95181f 4392 resume_one_lwp (lwp, step, 0, NULL);
d50171e4
PA
4393
4394 /* Require next event from this LWP. */
9c80ecd6 4395 step_over_bkpt = thread->id;
d50171e4
PA
4396}
4397
b31cdfa6
TBA
4398bool
4399linux_process_target::finish_step_over (lwp_info *lwp)
d50171e4
PA
4400{
4401 if (lwp->bp_reinsert != 0)
4402 {
24583e45 4403 scoped_restore_current_thread restore_thread;
f79b145d 4404
c058728c 4405 threads_debug_printf ("Finished step over.");
d50171e4 4406
24583e45 4407 switch_to_thread (get_lwp_thread (lwp));
f79b145d 4408
d50171e4
PA
4409 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4410 may be no breakpoint to reinsert there by now. */
4411 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4412 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4413
4414 lwp->bp_reinsert = 0;
4415
3b9a79ef
YQ
4416 /* Delete any single-step breakpoints. No longer needed. We
4417 don't have to worry about other threads hitting this trap,
4418 and later not being able to explain it, because we were
4419 stepping over a breakpoint, and we hold all threads but
4420 LWP stopped while doing that. */
b31cdfa6 4421 if (!supports_hardware_single_step ())
f79b145d 4422 {
3b9a79ef
YQ
4423 gdb_assert (has_single_step_breakpoints (current_thread));
4424 delete_single_step_breakpoints (current_thread);
f79b145d 4425 }
d50171e4
PA
4426
4427 step_over_bkpt = null_ptid;
b31cdfa6 4428 return true;
d50171e4
PA
4429 }
4430 else
b31cdfa6 4431 return false;
d50171e4
PA
4432}
4433
d16f3f6c
TBA
4434void
4435linux_process_target::complete_ongoing_step_over ()
863d01bd 4436{
d7e15655 4437 if (step_over_bkpt != null_ptid)
863d01bd
PA
4438 {
4439 struct lwp_info *lwp;
4440 int wstat;
4441 int ret;
4442
c058728c 4443 threads_debug_printf ("detach: step over in progress, finish it first");
863d01bd
PA
4444
4445 /* Passing NULL_PTID as filter indicates we want all events to
4446 be left pending. Eventually this returns when there are no
4447 unwaited-for children left. */
d16f3f6c
TBA
4448 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4449 __WALL);
863d01bd
PA
4450 gdb_assert (ret == -1);
4451
4452 lwp = find_lwp_pid (step_over_bkpt);
4453 if (lwp != NULL)
7e9cf1fe
PA
4454 {
4455 finish_step_over (lwp);
4456
4457 /* If we got our step SIGTRAP, don't leave it pending,
4458 otherwise we would report it to GDB as a spurious
4459 SIGTRAP. */
4460 gdb_assert (lwp->status_pending_p);
4461 if (WIFSTOPPED (lwp->status_pending)
4462 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4463 {
4464 thread_info *thread = get_lwp_thread (lwp);
4465 if (thread->last_resume_kind != resume_step)
4466 {
c058728c 4467 threads_debug_printf ("detach: discard step-over SIGTRAP");
7e9cf1fe
PA
4468
4469 lwp->status_pending_p = 0;
4470 lwp->status_pending = 0;
4471 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4472 }
4473 else
c058728c
SM
4474 threads_debug_printf
4475 ("detach: resume_step, not discarding step-over SIGTRAP");
7e9cf1fe
PA
4476 }
4477 }
863d01bd
PA
4478 step_over_bkpt = null_ptid;
4479 unsuspend_all_lwps (lwp);
4480 }
4481}
4482
df95181f
TBA
4483void
4484linux_process_target::resume_one_thread (thread_info *thread,
4485 bool leave_all_stopped)
5544ad89 4486{
d86d4aaf 4487 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4488 int leave_pending;
5544ad89 4489
2bd7c093 4490 if (lwp->resume == NULL)
c80825ff 4491 return;
5544ad89 4492
bd99dc85 4493 if (lwp->resume->kind == resume_stop)
5544ad89 4494 {
c058728c
SM
4495 threads_debug_printf ("resume_stop request for LWP %ld",
4496 lwpid_of (thread));
bd99dc85
PA
4497
4498 if (!lwp->stopped)
4499 {
c058728c 4500 threads_debug_printf ("stopping LWP %ld", lwpid_of (thread));
bd99dc85 4501
d50171e4
PA
4502 /* Stop the thread, and wait for the event asynchronously,
4503 through the event loop. */
02fc4de7 4504 send_sigstop (lwp);
bd99dc85
PA
4505 }
4506 else
4507 {
c058728c 4508 threads_debug_printf ("already stopped LWP %ld", lwpid_of (thread));
d50171e4
PA
4509
4510 /* The LWP may have been stopped in an internal event that
4511 was not meant to be notified back to GDB (e.g., gdbserver
4512 breakpoint), so we should be reporting a stop event in
4513 this case too. */
4514
4515 /* If the thread already has a pending SIGSTOP, this is a
4516 no-op. Otherwise, something later will presumably resume
4517 the thread and this will cause it to cancel any pending
4518 operation, due to last_resume_kind == resume_stop. If
4519 the thread already has a pending status to report, we
4520 will still report it the next time we wait - see
4521 status_pending_p_callback. */
1a981360
PA
4522
4523 /* If we already have a pending signal to report, then
4524 there's no need to queue a SIGSTOP, as this means we're
4525 midway through moving the LWP out of the jumppad, and we
4526 will report the pending signal as soon as that is
4527 finished. */
013e3554 4528 if (lwp->pending_signals_to_report.empty ())
1a981360 4529 send_sigstop (lwp);
bd99dc85 4530 }
32ca6d61 4531
bd99dc85
PA
4532 /* For stop requests, we're done. */
4533 lwp->resume = NULL;
183be222 4534 thread->last_status.set_ignore ();
c80825ff 4535 return;
5544ad89
DJ
4536 }
4537
bd99dc85 4538 /* If this thread which is about to be resumed has a pending status,
863d01bd
PA
4539 then don't resume it - we can just report the pending status.
4540 Likewise if it is suspended, because e.g., another thread is
4541 stepping past a breakpoint. Make sure to queue any signals that
4542 would otherwise be sent. In all-stop mode, we do this decision
4543 based on if *any* thread has a pending status. If there's a
4544 thread that needs the step-over-breakpoint dance, then don't
4545 resume any other thread but that particular one. */
4546 leave_pending = (lwp->suspended
4547 || lwp->status_pending_p
4548 || leave_all_stopped);
5544ad89 4549
0e9a339e
YQ
4550 /* If we have a new signal, enqueue the signal. */
4551 if (lwp->resume->sig != 0)
4552 {
4553 siginfo_t info, *info_p;
4554
4555 /* If this is the same signal we were previously stopped by,
4556 make sure to queue its siginfo. */
4557 if (WIFSTOPPED (lwp->last_status)
4558 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4559 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4560 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4561 info_p = &info;
4562 else
4563 info_p = NULL;
4564
4565 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4566 }
4567
d50171e4 4568 if (!leave_pending)
bd99dc85 4569 {
c058728c 4570 threads_debug_printf ("resuming LWP %ld", lwpid_of (thread));
5544ad89 4571
9c80ecd6 4572 proceed_one_lwp (thread, NULL);
bd99dc85
PA
4573 }
4574 else
c058728c 4575 threads_debug_printf ("leaving LWP %ld stopped", lwpid_of (thread));
5544ad89 4576
183be222 4577 thread->last_status.set_ignore ();
bd99dc85 4578 lwp->resume = NULL;
0d62e5e8
DJ
4579}
4580
0e4d7e35
TBA
4581void
4582linux_process_target::resume (thread_resume *resume_info, size_t n)
0d62e5e8 4583{
d86d4aaf 4584 struct thread_info *need_step_over = NULL;
c6ecbae5 4585
c058728c 4586 THREADS_SCOPED_DEBUG_ENTER_EXIT;
87ce2a04 4587
5fdda392
SM
4588 for_each_thread ([&] (thread_info *thread)
4589 {
4590 linux_set_resume_request (thread, resume_info, n);
4591 });
5544ad89 4592
d50171e4
PA
4593 /* If there is a thread which would otherwise be resumed, which has
4594 a pending status, then don't resume any threads - we can just
4595 report the pending status. Make sure to queue any signals that
4596 would otherwise be sent. In non-stop mode, we'll apply this
4597 logic to each thread individually. We consume all pending events
4598 before considering to start a step-over (in all-stop). */
25c28b4d 4599 bool any_pending = false;
bd99dc85 4600 if (!non_stop)
df95181f
TBA
4601 any_pending = find_thread ([this] (thread_info *thread)
4602 {
4603 return resume_status_pending (thread);
4604 }) != nullptr;
d50171e4
PA
4605
4606 /* If there is a thread which would otherwise be resumed, which is
4607 stopped at a breakpoint that needs stepping over, then don't
4608 resume any threads - have it step over the breakpoint with all
4609 other threads stopped, then resume all threads again. Make sure
4610 to queue any signals that would otherwise be delivered or
4611 queued. */
bf9ae9d8 4612 if (!any_pending && low_supports_breakpoints ())
df95181f
TBA
4613 need_step_over = find_thread ([this] (thread_info *thread)
4614 {
4615 return thread_needs_step_over (thread);
4616 });
d50171e4 4617
c80825ff 4618 bool leave_all_stopped = (need_step_over != NULL || any_pending);
d50171e4 4619
c058728c
SM
4620 if (need_step_over != NULL)
4621 threads_debug_printf ("Not resuming all, need step over");
4622 else if (any_pending)
4623 threads_debug_printf ("Not resuming, all-stop and found "
4624 "an LWP with pending status");
4625 else
4626 threads_debug_printf ("Resuming, no pending status or step over needed");
d50171e4
PA
4627
4628 /* Even if we're leaving threads stopped, queue all signals we'd
4629 otherwise deliver. */
c80825ff
SM
4630 for_each_thread ([&] (thread_info *thread)
4631 {
df95181f 4632 resume_one_thread (thread, leave_all_stopped);
c80825ff 4633 });
d50171e4
PA
4634
4635 if (need_step_over)
d86d4aaf 4636 start_step_over (get_thread_lwp (need_step_over));
87ce2a04 4637
1bebeeca
PA
4638 /* We may have events that were pending that can/should be sent to
4639 the client now. Trigger a linux_wait call. */
4640 if (target_is_async_p ())
4641 async_file_mark ();
d50171e4
PA
4642}
4643
df95181f
TBA
4644void
4645linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
d50171e4 4646{
d86d4aaf 4647 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4648 int step;
4649
7984d532 4650 if (lwp == except)
e2b44075 4651 return;
d50171e4 4652
c058728c 4653 threads_debug_printf ("lwp %ld", lwpid_of (thread));
d50171e4
PA
4654
4655 if (!lwp->stopped)
4656 {
c058728c 4657 threads_debug_printf (" LWP %ld already running", lwpid_of (thread));
e2b44075 4658 return;
d50171e4
PA
4659 }
4660
02fc4de7 4661 if (thread->last_resume_kind == resume_stop
183be222 4662 && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
d50171e4 4663 {
c058728c
SM
4664 threads_debug_printf (" client wants LWP to remain %ld stopped",
4665 lwpid_of (thread));
e2b44075 4666 return;
d50171e4
PA
4667 }
4668
4669 if (lwp->status_pending_p)
4670 {
c058728c
SM
4671 threads_debug_printf (" LWP %ld has pending status, leaving stopped",
4672 lwpid_of (thread));
e2b44075 4673 return;
d50171e4
PA
4674 }
4675
7984d532
PA
4676 gdb_assert (lwp->suspended >= 0);
4677
d50171e4
PA
4678 if (lwp->suspended)
4679 {
c058728c 4680 threads_debug_printf (" LWP %ld is suspended", lwpid_of (thread));
e2b44075 4681 return;
d50171e4
PA
4682 }
4683
1a981360 4684 if (thread->last_resume_kind == resume_stop
013e3554 4685 && lwp->pending_signals_to_report.empty ()
229d26fc
SM
4686 && (lwp->collecting_fast_tracepoint
4687 == fast_tpoint_collect_result::not_collecting))
02fc4de7
PA
4688 {
4689 /* We haven't reported this LWP as stopped yet (otherwise, the
4690 last_status.kind check above would catch it, and we wouldn't
4691 reach here. This LWP may have been momentarily paused by a
4692 stop_all_lwps call while handling for example, another LWP's
4693 step-over. In that case, the pending expected SIGSTOP signal
4694 that was queued at vCont;t handling time will have already
4695 been consumed by wait_for_sigstop, and so we need to requeue
4696 another one here. Note that if the LWP already has a SIGSTOP
4697 pending, this is a no-op. */
4698
c058728c
SM
4699 threads_debug_printf
4700 ("Client wants LWP %ld to stop. Making sure it has a SIGSTOP pending",
4701 lwpid_of (thread));
02fc4de7
PA
4702
4703 send_sigstop (lwp);
4704 }
4705
863d01bd
PA
4706 if (thread->last_resume_kind == resume_step)
4707 {
c058728c
SM
4708 threads_debug_printf (" stepping LWP %ld, client wants it stepping",
4709 lwpid_of (thread));
8901d193 4710
3b9a79ef 4711 /* If resume_step is requested by GDB, install single-step
8901d193 4712 breakpoints when the thread is about to be actually resumed if
3b9a79ef 4713 the single-step breakpoints weren't removed. */
7582c77c 4714 if (supports_software_single_step ()
3b9a79ef 4715 && !has_single_step_breakpoints (thread))
8901d193
YQ
4716 install_software_single_step_breakpoints (lwp);
4717
4718 step = maybe_hw_step (thread);
863d01bd
PA
4719 }
4720 else if (lwp->bp_reinsert != 0)
4721 {
c058728c
SM
4722 threads_debug_printf (" stepping LWP %ld, reinsert set",
4723 lwpid_of (thread));
f79b145d
YQ
4724
4725 step = maybe_hw_step (thread);
863d01bd
PA
4726 }
4727 else
4728 step = 0;
4729
df95181f 4730 resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
4731}
4732
df95181f
TBA
4733void
4734linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4735 lwp_info *except)
7984d532 4736{
d86d4aaf 4737 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
4738
4739 if (lwp == except)
e2b44075 4740 return;
7984d532 4741
863d01bd 4742 lwp_suspended_decr (lwp);
7984d532 4743
e2b44075 4744 proceed_one_lwp (thread, except);
d50171e4
PA
4745}
4746
d16f3f6c
TBA
4747void
4748linux_process_target::proceed_all_lwps ()
d50171e4 4749{
d86d4aaf 4750 struct thread_info *need_step_over;
d50171e4
PA
4751
4752 /* If there is a thread which would otherwise be resumed, which is
4753 stopped at a breakpoint that needs stepping over, then don't
4754 resume any threads - have it step over the breakpoint with all
4755 other threads stopped, then resume all threads again. */
4756
bf9ae9d8 4757 if (low_supports_breakpoints ())
d50171e4 4758 {
df95181f
TBA
4759 need_step_over = find_thread ([this] (thread_info *thread)
4760 {
4761 return thread_needs_step_over (thread);
4762 });
d50171e4
PA
4763
4764 if (need_step_over != NULL)
4765 {
c058728c
SM
4766 threads_debug_printf ("found thread %ld needing a step-over",
4767 lwpid_of (need_step_over));
d50171e4 4768
d86d4aaf 4769 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
4770 return;
4771 }
4772 }
5544ad89 4773
c058728c 4774 threads_debug_printf ("Proceeding, no step-over needed");
d50171e4 4775
df95181f 4776 for_each_thread ([this] (thread_info *thread)
e2b44075
SM
4777 {
4778 proceed_one_lwp (thread, NULL);
4779 });
d50171e4
PA
4780}
4781
d16f3f6c
TBA
4782void
4783linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
d50171e4 4784{
c058728c
SM
4785 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4786
4787 if (except)
4788 threads_debug_printf ("except=(LWP %ld)",
4789 lwpid_of (get_lwp_thread (except)));
4790 else
4791 threads_debug_printf ("except=nullptr");
5544ad89 4792
7984d532 4793 if (unsuspend)
e2b44075
SM
4794 for_each_thread ([&] (thread_info *thread)
4795 {
4796 unsuspend_and_proceed_one_lwp (thread, except);
4797 });
7984d532 4798 else
e2b44075
SM
4799 for_each_thread ([&] (thread_info *thread)
4800 {
4801 proceed_one_lwp (thread, except);
4802 });
0d62e5e8
DJ
4803}
4804
58caa3dc
DJ
4805
4806#ifdef HAVE_LINUX_REGSETS
4807
1faeff08
MR
4808#define use_linux_regsets 1
4809
030031ee
PA
4810/* Returns true if REGSET has been disabled. */
4811
4812static int
4813regset_disabled (struct regsets_info *info, struct regset_info *regset)
4814{
4815 return (info->disabled_regsets != NULL
4816 && info->disabled_regsets[regset - info->regsets]);
4817}
4818
4819/* Disable REGSET. */
4820
4821static void
4822disable_regset (struct regsets_info *info, struct regset_info *regset)
4823{
4824 int dr_offset;
4825
4826 dr_offset = regset - info->regsets;
4827 if (info->disabled_regsets == NULL)
224c3ddb 4828 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
030031ee
PA
4829 info->disabled_regsets[dr_offset] = 1;
4830}
4831
58caa3dc 4832static int
3aee8918
PA
4833regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4834 struct regcache *regcache)
58caa3dc
DJ
4835{
4836 struct regset_info *regset;
e9d25b98 4837 int saw_general_regs = 0;
95954743 4838 int pid;
1570b33e 4839 struct iovec iov;
58caa3dc 4840
0bfdf32f 4841 pid = lwpid_of (current_thread);
28eef672 4842 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 4843 {
1570b33e
L
4844 void *buf, *data;
4845 int nt_type, res;
58caa3dc 4846
030031ee 4847 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 4848 continue;
58caa3dc 4849
bca929d3 4850 buf = xmalloc (regset->size);
1570b33e
L
4851
4852 nt_type = regset->nt_type;
4853 if (nt_type)
4854 {
4855 iov.iov_base = buf;
4856 iov.iov_len = regset->size;
4857 data = (void *) &iov;
4858 }
4859 else
4860 data = buf;
4861
dfb64f85 4862#ifndef __sparc__
f15f9948 4863 res = ptrace (regset->get_request, pid,
b8e1b30e 4864 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4865#else
1570b33e 4866 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4867#endif
58caa3dc
DJ
4868 if (res < 0)
4869 {
1ef53e6b
AH
4870 if (errno == EIO
4871 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 4872 {
1ef53e6b
AH
4873 /* If we get EIO on a regset, or an EINVAL and the regset is
4874 optional, do not try it again for this process mode. */
030031ee 4875 disable_regset (regsets_info, regset);
58caa3dc 4876 }
e5a9158d
AA
4877 else if (errno == ENODATA)
4878 {
4879 /* ENODATA may be returned if the regset is currently
4880 not "active". This can happen in normal operation,
4881 so suppress the warning in this case. */
4882 }
fcd4a73d
YQ
4883 else if (errno == ESRCH)
4884 {
4885 /* At this point, ESRCH should mean the process is
4886 already gone, in which case we simply ignore attempts
4887 to read its registers. */
4888 }
58caa3dc
DJ
4889 else
4890 {
0d62e5e8 4891 char s[256];
95954743
PA
4892 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4893 pid);
0d62e5e8 4894 perror (s);
58caa3dc
DJ
4895 }
4896 }
098dbe61
AA
4897 else
4898 {
4899 if (regset->type == GENERAL_REGS)
4900 saw_general_regs = 1;
4901 regset->store_function (regcache, buf);
4902 }
fdeb2a12 4903 free (buf);
58caa3dc 4904 }
e9d25b98
DJ
4905 if (saw_general_regs)
4906 return 0;
4907 else
4908 return 1;
58caa3dc
DJ
4909}
4910
4911static int
3aee8918
PA
4912regsets_store_inferior_registers (struct regsets_info *regsets_info,
4913 struct regcache *regcache)
58caa3dc
DJ
4914{
4915 struct regset_info *regset;
e9d25b98 4916 int saw_general_regs = 0;
95954743 4917 int pid;
1570b33e 4918 struct iovec iov;
58caa3dc 4919
0bfdf32f 4920 pid = lwpid_of (current_thread);
28eef672 4921 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 4922 {
1570b33e
L
4923 void *buf, *data;
4924 int nt_type, res;
58caa3dc 4925
feea5f36
AA
4926 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4927 || regset->fill_function == NULL)
28eef672 4928 continue;
58caa3dc 4929
bca929d3 4930 buf = xmalloc (regset->size);
545587ee
DJ
4931
4932 /* First fill the buffer with the current register set contents,
4933 in case there are any items in the kernel's regset that are
4934 not in gdbserver's regcache. */
1570b33e
L
4935
4936 nt_type = regset->nt_type;
4937 if (nt_type)
4938 {
4939 iov.iov_base = buf;
4940 iov.iov_len = regset->size;
4941 data = (void *) &iov;
4942 }
4943 else
4944 data = buf;
4945
dfb64f85 4946#ifndef __sparc__
f15f9948 4947 res = ptrace (regset->get_request, pid,
b8e1b30e 4948 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4949#else
689cc2ae 4950 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4951#endif
545587ee
DJ
4952
4953 if (res == 0)
4954 {
4955 /* Then overlay our cached registers on that. */
442ea881 4956 regset->fill_function (regcache, buf);
545587ee
DJ
4957
4958 /* Only now do we write the register set. */
dfb64f85 4959#ifndef __sparc__
f15f9948 4960 res = ptrace (regset->set_request, pid,
b8e1b30e 4961 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4962#else
1570b33e 4963 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 4964#endif
545587ee
DJ
4965 }
4966
58caa3dc
DJ
4967 if (res < 0)
4968 {
1ef53e6b
AH
4969 if (errno == EIO
4970 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 4971 {
1ef53e6b
AH
4972 /* If we get EIO on a regset, or an EINVAL and the regset is
4973 optional, do not try it again for this process mode. */
030031ee 4974 disable_regset (regsets_info, regset);
58caa3dc 4975 }
3221518c
UW
4976 else if (errno == ESRCH)
4977 {
1b3f6016
PA
4978 /* At this point, ESRCH should mean the process is
4979 already gone, in which case we simply ignore attempts
4980 to change its registers. See also the related
df95181f 4981 comment in resume_one_lwp. */
fdeb2a12 4982 free (buf);
3221518c
UW
4983 return 0;
4984 }
58caa3dc
DJ
4985 else
4986 {
ce3a066d 4987 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
4988 }
4989 }
e9d25b98
DJ
4990 else if (regset->type == GENERAL_REGS)
4991 saw_general_regs = 1;
09ec9b38 4992 free (buf);
58caa3dc 4993 }
e9d25b98
DJ
4994 if (saw_general_regs)
4995 return 0;
4996 else
4997 return 1;
58caa3dc
DJ
4998}
4999
1faeff08 5000#else /* !HAVE_LINUX_REGSETS */
58caa3dc 5001
1faeff08 5002#define use_linux_regsets 0
3aee8918
PA
5003#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5004#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 5005
58caa3dc 5006#endif
1faeff08
MR
5007
5008/* Return 1 if register REGNO is supported by one of the regset ptrace
5009 calls or 0 if it has to be transferred individually. */
5010
5011static int
3aee8918 5012linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
5013{
5014 unsigned char mask = 1 << (regno % 8);
5015 size_t index = regno / 8;
5016
5017 return (use_linux_regsets
3aee8918
PA
5018 && (regs_info->regset_bitmap == NULL
5019 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
5020}
5021
58caa3dc 5022#ifdef HAVE_LINUX_USRREGS
1faeff08 5023
5b3da067 5024static int
3aee8918 5025register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
5026{
5027 int addr;
5028
3aee8918 5029 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
5030 error ("Invalid register number %d.", regnum);
5031
3aee8918 5032 addr = usrregs->regmap[regnum];
1faeff08
MR
5033
5034 return addr;
5035}
5036
daca57a7
TBA
5037
5038void
5039linux_process_target::fetch_register (const usrregs_info *usrregs,
5040 regcache *regcache, int regno)
1faeff08
MR
5041{
5042 CORE_ADDR regaddr;
5043 int i, size;
5044 char *buf;
5045 int pid;
5046
3aee8918 5047 if (regno >= usrregs->num_regs)
1faeff08 5048 return;
daca57a7 5049 if (low_cannot_fetch_register (regno))
1faeff08
MR
5050 return;
5051
3aee8918 5052 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5053 if (regaddr == -1)
5054 return;
5055
3aee8918
PA
5056 size = ((register_size (regcache->tdesc, regno)
5057 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5058 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5059 buf = (char *) alloca (size);
1faeff08 5060
0bfdf32f 5061 pid = lwpid_of (current_thread);
1faeff08
MR
5062 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5063 {
5064 errno = 0;
5065 *(PTRACE_XFER_TYPE *) (buf + i) =
5066 ptrace (PTRACE_PEEKUSER, pid,
5067 /* Coerce to a uintptr_t first to avoid potential gcc warning
5068 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5069 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
5070 regaddr += sizeof (PTRACE_XFER_TYPE);
5071 if (errno != 0)
9a70f35c
YQ
5072 {
5073 /* Mark register REGNO unavailable. */
5074 supply_register (regcache, regno, NULL);
5075 return;
5076 }
1faeff08
MR
5077 }
5078
b35db733 5079 low_supply_ptrace_register (regcache, regno, buf);
1faeff08
MR
5080}
5081
daca57a7
TBA
5082void
5083linux_process_target::store_register (const usrregs_info *usrregs,
5084 regcache *regcache, int regno)
1faeff08
MR
5085{
5086 CORE_ADDR regaddr;
5087 int i, size;
5088 char *buf;
5089 int pid;
5090
3aee8918 5091 if (regno >= usrregs->num_regs)
1faeff08 5092 return;
daca57a7 5093 if (low_cannot_store_register (regno))
1faeff08
MR
5094 return;
5095
3aee8918 5096 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5097 if (regaddr == -1)
5098 return;
5099
3aee8918
PA
5100 size = ((register_size (regcache->tdesc, regno)
5101 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5102 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5103 buf = (char *) alloca (size);
1faeff08
MR
5104 memset (buf, 0, size);
5105
b35db733 5106 low_collect_ptrace_register (regcache, regno, buf);
1faeff08 5107
0bfdf32f 5108 pid = lwpid_of (current_thread);
1faeff08
MR
5109 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5110 {
5111 errno = 0;
5112 ptrace (PTRACE_POKEUSER, pid,
5113 /* Coerce to a uintptr_t first to avoid potential gcc warning
5114 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5115 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5116 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
5117 if (errno != 0)
5118 {
5119 /* At this point, ESRCH should mean the process is
5120 already gone, in which case we simply ignore attempts
5121 to change its registers. See also the related
df95181f 5122 comment in resume_one_lwp. */
1faeff08
MR
5123 if (errno == ESRCH)
5124 return;
5125
daca57a7
TBA
5126
5127 if (!low_cannot_store_register (regno))
6d91ce9a 5128 error ("writing register %d: %s", regno, safe_strerror (errno));
1faeff08
MR
5129 }
5130 regaddr += sizeof (PTRACE_XFER_TYPE);
5131 }
5132}
daca57a7 5133#endif /* HAVE_LINUX_USRREGS */
1faeff08 5134
b35db733
TBA
5135void
5136linux_process_target::low_collect_ptrace_register (regcache *regcache,
5137 int regno, char *buf)
5138{
5139 collect_register (regcache, regno, buf);
5140}
5141
5142void
5143linux_process_target::low_supply_ptrace_register (regcache *regcache,
5144 int regno, const char *buf)
5145{
5146 supply_register (regcache, regno, buf);
5147}
5148
daca57a7
TBA
5149void
5150linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5151 regcache *regcache,
5152 int regno, int all)
1faeff08 5153{
daca57a7 5154#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5155 struct usrregs_info *usr = regs_info->usrregs;
5156
1faeff08
MR
5157 if (regno == -1)
5158 {
3aee8918
PA
5159 for (regno = 0; regno < usr->num_regs; regno++)
5160 if (all || !linux_register_in_regsets (regs_info, regno))
5161 fetch_register (usr, regcache, regno);
1faeff08
MR
5162 }
5163 else
3aee8918 5164 fetch_register (usr, regcache, regno);
daca57a7 5165#endif
1faeff08
MR
5166}
5167
daca57a7
TBA
5168void
5169linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5170 regcache *regcache,
5171 int regno, int all)
1faeff08 5172{
daca57a7 5173#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5174 struct usrregs_info *usr = regs_info->usrregs;
5175
1faeff08
MR
5176 if (regno == -1)
5177 {
3aee8918
PA
5178 for (regno = 0; regno < usr->num_regs; regno++)
5179 if (all || !linux_register_in_regsets (regs_info, regno))
5180 store_register (usr, regcache, regno);
1faeff08
MR
5181 }
5182 else
3aee8918 5183 store_register (usr, regcache, regno);
58caa3dc 5184#endif
daca57a7 5185}
1faeff08 5186
a5a4d4cd
TBA
5187void
5188linux_process_target::fetch_registers (regcache *regcache, int regno)
1faeff08
MR
5189{
5190 int use_regsets;
5191 int all = 0;
aa8d21c9 5192 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5193
5194 if (regno == -1)
5195 {
bd70b1f2 5196 if (regs_info->usrregs != NULL)
3aee8918 5197 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
bd70b1f2 5198 low_fetch_register (regcache, regno);
c14dfd32 5199
3aee8918
PA
5200 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5201 if (regs_info->usrregs != NULL)
5202 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
5203 }
5204 else
5205 {
bd70b1f2 5206 if (low_fetch_register (regcache, regno))
c14dfd32
PA
5207 return;
5208
3aee8918 5209 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5210 if (use_regsets)
3aee8918
PA
5211 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5212 regcache);
5213 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5214 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5215 }
58caa3dc
DJ
5216}
5217
a5a4d4cd
TBA
5218void
5219linux_process_target::store_registers (regcache *regcache, int regno)
58caa3dc 5220{
1faeff08
MR
5221 int use_regsets;
5222 int all = 0;
aa8d21c9 5223 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5224
5225 if (regno == -1)
5226 {
3aee8918
PA
5227 all = regsets_store_inferior_registers (regs_info->regsets_info,
5228 regcache);
5229 if (regs_info->usrregs != NULL)
5230 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
5231 }
5232 else
5233 {
3aee8918 5234 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5235 if (use_regsets)
3aee8918
PA
5236 all = regsets_store_inferior_registers (regs_info->regsets_info,
5237 regcache);
5238 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5239 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5240 }
58caa3dc
DJ
5241}
5242
bd70b1f2
TBA
5243bool
5244linux_process_target::low_fetch_register (regcache *regcache, int regno)
5245{
5246 return false;
5247}
da6d8c04 5248
e2558df3 5249/* A wrapper for the read_memory target op. */
da6d8c04 5250
c3e735a6 5251static int
f450004a 5252linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
e2558df3 5253{
52405d85 5254 return the_target->read_memory (memaddr, myaddr, len);
e2558df3
TBA
5255}
5256
5257/* Copy LEN bytes from inferior's memory starting at MEMADDR
5258 to debugger memory starting at MYADDR. */
5259
5260int
5261linux_process_target::read_memory (CORE_ADDR memaddr,
5262 unsigned char *myaddr, int len)
da6d8c04 5263{
0bfdf32f 5264 int pid = lwpid_of (current_thread);
ae3e2ccf
SM
5265 PTRACE_XFER_TYPE *buffer;
5266 CORE_ADDR addr;
5267 int count;
4934b29e 5268 char filename[64];
ae3e2ccf 5269 int i;
4934b29e 5270 int ret;
fd462a61 5271 int fd;
fd462a61
DJ
5272
5273 /* Try using /proc. Don't bother for one word. */
5274 if (len >= 3 * sizeof (long))
5275 {
4934b29e
MR
5276 int bytes;
5277
fd462a61
DJ
5278 /* We could keep this file open and cache it - possibly one per
5279 thread. That requires some juggling, but is even faster. */
95954743 5280 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
5281 fd = open (filename, O_RDONLY | O_LARGEFILE);
5282 if (fd == -1)
5283 goto no_proc;
5284
5285 /* If pread64 is available, use it. It's faster if the kernel
5286 supports it (only one syscall), and it's 64-bit safe even on
5287 32-bit platforms (for instance, SPARC debugging a SPARC64
5288 application). */
5289#ifdef HAVE_PREAD64
4934b29e 5290 bytes = pread64 (fd, myaddr, len, memaddr);
fd462a61 5291#else
4934b29e
MR
5292 bytes = -1;
5293 if (lseek (fd, memaddr, SEEK_SET) != -1)
5294 bytes = read (fd, myaddr, len);
fd462a61 5295#endif
fd462a61
DJ
5296
5297 close (fd);
4934b29e
MR
5298 if (bytes == len)
5299 return 0;
5300
5301 /* Some data was read, we'll try to get the rest with ptrace. */
5302 if (bytes > 0)
5303 {
5304 memaddr += bytes;
5305 myaddr += bytes;
5306 len -= bytes;
5307 }
fd462a61 5308 }
da6d8c04 5309
fd462a61 5310 no_proc:
4934b29e
MR
5311 /* Round starting address down to longword boundary. */
5312 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5313 /* Round ending address up; get number of longwords that makes. */
5314 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5315 / sizeof (PTRACE_XFER_TYPE));
5316 /* Allocate buffer of that many longwords. */
8d749320 5317 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
4934b29e 5318
da6d8c04 5319 /* Read all the longwords */
4934b29e 5320 errno = 0;
da6d8c04
DJ
5321 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5322 {
14ce3065
DE
5323 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5324 about coercing an 8 byte integer to a 4 byte pointer. */
5325 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5326 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5327 (PTRACE_TYPE_ARG4) 0);
c3e735a6 5328 if (errno)
4934b29e 5329 break;
da6d8c04 5330 }
4934b29e 5331 ret = errno;
da6d8c04
DJ
5332
5333 /* Copy appropriate bytes out of the buffer. */
8d409d16
MR
5334 if (i > 0)
5335 {
5336 i *= sizeof (PTRACE_XFER_TYPE);
5337 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5338 memcpy (myaddr,
5339 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5340 i < len ? i : len);
5341 }
c3e735a6 5342
4934b29e 5343 return ret;
da6d8c04
DJ
5344}
5345
93ae6fdc
PA
5346/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5347 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 5348 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 5349
e2558df3
TBA
5350int
5351linux_process_target::write_memory (CORE_ADDR memaddr,
5352 const unsigned char *myaddr, int len)
da6d8c04 5353{
ae3e2ccf 5354 int i;
da6d8c04 5355 /* Round starting address down to longword boundary. */
ae3e2ccf 5356 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
da6d8c04 5357 /* Round ending address up; get number of longwords that makes. */
ae3e2ccf 5358 int count
493e2a69
MS
5359 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5360 / sizeof (PTRACE_XFER_TYPE);
5361
da6d8c04 5362 /* Allocate buffer of that many longwords. */
ae3e2ccf 5363 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
493e2a69 5364
0bfdf32f 5365 int pid = lwpid_of (current_thread);
da6d8c04 5366
f0ae6fc3
PA
5367 if (len == 0)
5368 {
5369 /* Zero length write always succeeds. */
5370 return 0;
5371 }
5372
0d62e5e8
DJ
5373 if (debug_threads)
5374 {
58d6951d 5375 /* Dump up to four bytes. */
bf47e248
PA
5376 char str[4 * 2 + 1];
5377 char *p = str;
5378 int dump = len < 4 ? len : 4;
5379
5380 for (i = 0; i < dump; i++)
5381 {
5382 sprintf (p, "%02x", myaddr[i]);
5383 p += 2;
5384 }
5385 *p = '\0';
5386
c058728c
SM
5387 threads_debug_printf ("Writing %s to 0x%08lx in process %d",
5388 str, (long) memaddr, pid);
0d62e5e8
DJ
5389 }
5390
da6d8c04
DJ
5391 /* Fill start and end extra bytes of buffer with existing memory data. */
5392
93ae6fdc 5393 errno = 0;
14ce3065
DE
5394 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5395 about coercing an 8 byte integer to a 4 byte pointer. */
5396 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5397 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5398 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5399 if (errno)
5400 return errno;
da6d8c04
DJ
5401
5402 if (count > 1)
5403 {
93ae6fdc 5404 errno = 0;
da6d8c04 5405 buffer[count - 1]
95954743 5406 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
5407 /* Coerce to a uintptr_t first to avoid potential gcc warning
5408 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5409 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
14ce3065 5410 * sizeof (PTRACE_XFER_TYPE)),
b8e1b30e 5411 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5412 if (errno)
5413 return errno;
da6d8c04
DJ
5414 }
5415
93ae6fdc 5416 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 5417
493e2a69
MS
5418 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5419 myaddr, len);
da6d8c04
DJ
5420
5421 /* Write the entire buffer. */
5422
5423 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5424 {
5425 errno = 0;
14ce3065
DE
5426 ptrace (PTRACE_POKETEXT, pid,
5427 /* Coerce to a uintptr_t first to avoid potential gcc warning
5428 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5429 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5430 (PTRACE_TYPE_ARG4) buffer[i]);
da6d8c04
DJ
5431 if (errno)
5432 return errno;
5433 }
5434
5435 return 0;
5436}
2f2893d9 5437
2a31c7aa
TBA
5438void
5439linux_process_target::look_up_symbols ()
2f2893d9 5440{
0d62e5e8 5441#ifdef USE_THREAD_DB
95954743
PA
5442 struct process_info *proc = current_process ();
5443
fe978cb0 5444 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5445 return;
5446
9b4c5f87 5447 thread_db_init ();
0d62e5e8
DJ
5448#endif
5449}
5450
eb497a2a
TBA
5451void
5452linux_process_target::request_interrupt ()
e5379b03 5453{
78708b7c
PA
5454 /* Send a SIGINT to the process group. This acts just like the user
5455 typed a ^C on the controlling terminal. */
eb497a2a 5456 ::kill (-signal_pid, SIGINT);
e5379b03
DJ
5457}
5458
eac215cc
TBA
5459bool
5460linux_process_target::supports_read_auxv ()
5461{
5462 return true;
5463}
5464
aa691b87
RM
5465/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5466 to debugger memory starting at MYADDR. */
5467
eac215cc
TBA
5468int
5469linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5470 unsigned int len)
aa691b87
RM
5471{
5472 char filename[PATH_MAX];
5473 int fd, n;
0bfdf32f 5474 int pid = lwpid_of (current_thread);
aa691b87 5475
6cebaf6e 5476 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5477
5478 fd = open (filename, O_RDONLY);
5479 if (fd < 0)
5480 return -1;
5481
5482 if (offset != (CORE_ADDR) 0
5483 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5484 n = -1;
5485 else
5486 n = read (fd, myaddr, len);
5487
5488 close (fd);
5489
5490 return n;
5491}
5492
7e0bde70
TBA
5493int
5494linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5495 int size, raw_breakpoint *bp)
e013ee27 5496{
c8f4bfdd
YQ
5497 if (type == raw_bkpt_type_sw)
5498 return insert_memory_breakpoint (bp);
e013ee27 5499 else
9db9aa23
TBA
5500 return low_insert_point (type, addr, size, bp);
5501}
5502
5503int
5504linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5505 int size, raw_breakpoint *bp)
5506{
5507 /* Unsupported (see target.h). */
5508 return 1;
e013ee27
OF
5509}
5510
7e0bde70
TBA
5511int
5512linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5513 int size, raw_breakpoint *bp)
e013ee27 5514{
c8f4bfdd
YQ
5515 if (type == raw_bkpt_type_sw)
5516 return remove_memory_breakpoint (bp);
e013ee27 5517 else
9db9aa23
TBA
5518 return low_remove_point (type, addr, size, bp);
5519}
5520
5521int
5522linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5523 int size, raw_breakpoint *bp)
5524{
5525 /* Unsupported (see target.h). */
5526 return 1;
e013ee27
OF
5527}
5528
84320c4e 5529/* Implement the stopped_by_sw_breakpoint target_ops
3e572f71
PA
5530 method. */
5531
84320c4e
TBA
5532bool
5533linux_process_target::stopped_by_sw_breakpoint ()
3e572f71
PA
5534{
5535 struct lwp_info *lwp = get_thread_lwp (current_thread);
5536
5537 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5538}
5539
84320c4e 5540/* Implement the supports_stopped_by_sw_breakpoint target_ops
3e572f71
PA
5541 method. */
5542
84320c4e
TBA
5543bool
5544linux_process_target::supports_stopped_by_sw_breakpoint ()
3e572f71
PA
5545{
5546 return USE_SIGTRAP_SIGINFO;
5547}
5548
93fe88b2 5549/* Implement the stopped_by_hw_breakpoint target_ops
3e572f71
PA
5550 method. */
5551
93fe88b2
TBA
5552bool
5553linux_process_target::stopped_by_hw_breakpoint ()
3e572f71
PA
5554{
5555 struct lwp_info *lwp = get_thread_lwp (current_thread);
5556
5557 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5558}
5559
93fe88b2 5560/* Implement the supports_stopped_by_hw_breakpoint target_ops
3e572f71
PA
5561 method. */
5562
93fe88b2
TBA
5563bool
5564linux_process_target::supports_stopped_by_hw_breakpoint ()
3e572f71
PA
5565{
5566 return USE_SIGTRAP_SIGINFO;
5567}
5568
70b90b91 5569/* Implement the supports_hardware_single_step target_ops method. */
45614f15 5570
22aa6223
TBA
5571bool
5572linux_process_target::supports_hardware_single_step ()
45614f15 5573{
b31cdfa6 5574 return true;
45614f15
YQ
5575}
5576
6eeb5c55
TBA
5577bool
5578linux_process_target::stopped_by_watchpoint ()
e013ee27 5579{
0bfdf32f 5580 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 5581
15c66dd6 5582 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
5583}
5584
6eeb5c55
TBA
5585CORE_ADDR
5586linux_process_target::stopped_data_address ()
e013ee27 5587{
0bfdf32f 5588 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
5589
5590 return lwp->stopped_data_address;
e013ee27
OF
5591}
5592
db0dfaa0
LM
5593/* This is only used for targets that define PT_TEXT_ADDR,
5594 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5595 the target has different ways of acquiring this information, like
5596 loadmaps. */
52fb6437 5597
5203ae1e
TBA
5598bool
5599linux_process_target::supports_read_offsets ()
5600{
5601#ifdef SUPPORTS_READ_OFFSETS
5602 return true;
5603#else
5604 return false;
5605#endif
5606}
5607
52fb6437
NS
5608/* Under uClinux, programs are loaded at non-zero offsets, which we need
5609 to tell gdb about. */
5610
5203ae1e
TBA
5611int
5612linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
52fb6437 5613{
5203ae1e 5614#ifdef SUPPORTS_READ_OFFSETS
52fb6437 5615 unsigned long text, text_end, data;
62828379 5616 int pid = lwpid_of (current_thread);
52fb6437
NS
5617
5618 errno = 0;
5619
b8e1b30e
LM
5620 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5621 (PTRACE_TYPE_ARG4) 0);
5622 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5623 (PTRACE_TYPE_ARG4) 0);
5624 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5625 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
5626
5627 if (errno == 0)
5628 {
5629 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
5630 used by gdb) are relative to the beginning of the program,
5631 with the data segment immediately following the text segment.
5632 However, the actual runtime layout in memory may put the data
5633 somewhere else, so when we send gdb a data base-address, we
5634 use the real data base address and subtract the compile-time
5635 data base-address from it (which is just the length of the
5636 text segment). BSS immediately follows data in both
5637 cases. */
52fb6437
NS
5638 *text_p = text;
5639 *data_p = data - (text_end - text);
1b3f6016 5640
52fb6437
NS
5641 return 1;
5642 }
5203ae1e
TBA
5643 return 0;
5644#else
5645 gdb_assert_not_reached ("target op read_offsets not supported");
52fb6437 5646#endif
5203ae1e 5647}
52fb6437 5648
6e3fd7e9
TBA
5649bool
5650linux_process_target::supports_get_tls_address ()
5651{
5652#ifdef USE_THREAD_DB
5653 return true;
5654#else
5655 return false;
5656#endif
5657}
5658
5659int
5660linux_process_target::get_tls_address (thread_info *thread,
5661 CORE_ADDR offset,
5662 CORE_ADDR load_module,
5663 CORE_ADDR *address)
5664{
5665#ifdef USE_THREAD_DB
5666 return thread_db_get_tls_address (thread, offset, load_module, address);
5667#else
5668 return -1;
5669#endif
5670}
5671
2d0795ee
TBA
5672bool
5673linux_process_target::supports_qxfer_osdata ()
5674{
5675 return true;
5676}
5677
5678int
5679linux_process_target::qxfer_osdata (const char *annex,
5680 unsigned char *readbuf,
5681 unsigned const char *writebuf,
5682 CORE_ADDR offset, int len)
07e059b5 5683{
d26e3629 5684 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
5685}
5686
cb63de7c
TBA
5687void
5688linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5689 gdb_byte *inf_siginfo, int direction)
d0722149 5690{
cb63de7c 5691 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
d0722149
DE
5692
5693 /* If there was no callback, or the callback didn't do anything,
5694 then just do a straight memcpy. */
5695 if (!done)
5696 {
5697 if (direction == 1)
a5362b9a 5698 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 5699 else
a5362b9a 5700 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
5701 }
5702}
5703
cb63de7c
TBA
5704bool
5705linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5706 int direction)
5707{
5708 return false;
5709}
5710
d7abedf7
TBA
5711bool
5712linux_process_target::supports_qxfer_siginfo ()
5713{
5714 return true;
5715}
5716
5717int
5718linux_process_target::qxfer_siginfo (const char *annex,
5719 unsigned char *readbuf,
5720 unsigned const char *writebuf,
5721 CORE_ADDR offset, int len)
4aa995e1 5722{
d0722149 5723 int pid;
a5362b9a 5724 siginfo_t siginfo;
8adce034 5725 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1 5726
0bfdf32f 5727 if (current_thread == NULL)
4aa995e1
PA
5728 return -1;
5729
0bfdf32f 5730 pid = lwpid_of (current_thread);
4aa995e1 5731
c058728c
SM
5732 threads_debug_printf ("%s siginfo for lwp %d.",
5733 readbuf != NULL ? "Reading" : "Writing",
5734 pid);
4aa995e1 5735
0adea5f7 5736 if (offset >= sizeof (siginfo))
4aa995e1
PA
5737 return -1;
5738
b8e1b30e 5739 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5740 return -1;
5741
d0722149
DE
5742 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5743 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5744 inferior with a 64-bit GDBSERVER should look the same as debugging it
5745 with a 32-bit GDBSERVER, we need to convert it. */
5746 siginfo_fixup (&siginfo, inf_siginfo, 0);
5747
4aa995e1
PA
5748 if (offset + len > sizeof (siginfo))
5749 len = sizeof (siginfo) - offset;
5750
5751 if (readbuf != NULL)
d0722149 5752 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
5753 else
5754 {
d0722149
DE
5755 memcpy (inf_siginfo + offset, writebuf, len);
5756
5757 /* Convert back to ptrace layout before flushing it out. */
5758 siginfo_fixup (&siginfo, inf_siginfo, 1);
5759
b8e1b30e 5760 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5761 return -1;
5762 }
5763
5764 return len;
5765}
5766
bd99dc85
PA
5767/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5768 so we notice when children change state; as the handler for the
5769 sigsuspend in my_waitpid. */
5770
5771static void
5772sigchld_handler (int signo)
5773{
5774 int old_errno = errno;
5775
5776 if (debug_threads)
e581f2b4
PA
5777 {
5778 do
5779 {
a7e559cc
AH
5780 /* Use the async signal safe debug function. */
5781 if (debug_write ("sigchld_handler\n",
5782 sizeof ("sigchld_handler\n") - 1) < 0)
e581f2b4
PA
5783 break; /* just ignore */
5784 } while (0);
5785 }
bd99dc85
PA
5786
5787 if (target_is_async_p ())
5788 async_file_mark (); /* trigger a linux_wait */
5789
5790 errno = old_errno;
5791}
5792
0dc587d4
TBA
5793bool
5794linux_process_target::supports_non_stop ()
bd99dc85 5795{
0dc587d4 5796 return true;
bd99dc85
PA
5797}
5798
0dc587d4
TBA
5799bool
5800linux_process_target::async (bool enable)
bd99dc85 5801{
0dc587d4 5802 bool previous = target_is_async_p ();
bd99dc85 5803
c058728c
SM
5804 threads_debug_printf ("async (%d), previous=%d",
5805 enable, previous);
8336d594 5806
bd99dc85
PA
5807 if (previous != enable)
5808 {
5809 sigset_t mask;
5810 sigemptyset (&mask);
5811 sigaddset (&mask, SIGCHLD);
5812
21987b9c 5813 gdb_sigmask (SIG_BLOCK, &mask, NULL);
bd99dc85
PA
5814
5815 if (enable)
5816 {
8674f082 5817 if (!linux_event_pipe.open_pipe ())
aa96c426 5818 {
21987b9c 5819 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
aa96c426
GB
5820
5821 warning ("creating event pipe failed.");
5822 return previous;
5823 }
bd99dc85 5824
bd99dc85 5825 /* Register the event loop handler. */
cdc8e9b2 5826 add_file_handler (linux_event_pipe.event_fd (),
2554f6f5
SM
5827 handle_target_event, NULL,
5828 "linux-low");
bd99dc85
PA
5829
5830 /* Always trigger a linux_wait. */
5831 async_file_mark ();
5832 }
5833 else
5834 {
cdc8e9b2 5835 delete_file_handler (linux_event_pipe.event_fd ());
bd99dc85 5836
8674f082 5837 linux_event_pipe.close_pipe ();
bd99dc85
PA
5838 }
5839
21987b9c 5840 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
bd99dc85
PA
5841 }
5842
5843 return previous;
5844}
5845
0dc587d4
TBA
5846int
5847linux_process_target::start_non_stop (bool nonstop)
bd99dc85
PA
5848{
5849 /* Register or unregister from event-loop accordingly. */
0dc587d4 5850 target_async (nonstop);
aa96c426 5851
0dc587d4 5852 if (target_is_async_p () != (nonstop != false))
aa96c426
GB
5853 return -1;
5854
bd99dc85
PA
5855 return 0;
5856}
5857
652aef77
TBA
5858bool
5859linux_process_target::supports_multi_process ()
cf8fd78b 5860{
652aef77 5861 return true;
cf8fd78b
PA
5862}
5863
89245bc0
DB
5864/* Check if fork events are supported. */
5865
9690a72a
TBA
5866bool
5867linux_process_target::supports_fork_events ()
89245bc0 5868{
a2885186 5869 return true;
89245bc0
DB
5870}
5871
5872/* Check if vfork events are supported. */
5873
9690a72a
TBA
5874bool
5875linux_process_target::supports_vfork_events ()
89245bc0 5876{
a2885186 5877 return true;
89245bc0
DB
5878}
5879
94585166
DB
5880/* Check if exec events are supported. */
5881
9690a72a
TBA
5882bool
5883linux_process_target::supports_exec_events ()
94585166 5884{
a2885186 5885 return true;
94585166
DB
5886}
5887
de0d863e
DB
5888/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5889 ptrace flags for all inferiors. This is in case the new GDB connection
5890 doesn't support the same set of events that the previous one did. */
5891
fb00dfce
TBA
5892void
5893linux_process_target::handle_new_gdb_connection ()
de0d863e 5894{
de0d863e 5895 /* Request that all the lwps reset their ptrace options. */
bbf550d5
SM
5896 for_each_thread ([] (thread_info *thread)
5897 {
5898 struct lwp_info *lwp = get_thread_lwp (thread);
5899
5900 if (!lwp->stopped)
5901 {
5902 /* Stop the lwp so we can modify its ptrace options. */
5903 lwp->must_set_ptrace_flags = 1;
5904 linux_stop_lwp (lwp);
5905 }
5906 else
5907 {
5908 /* Already stopped; go ahead and set the ptrace options. */
5909 struct process_info *proc = find_process_pid (pid_of (thread));
5910 int options = linux_low_ptrace_options (proc->attached);
5911
5912 linux_enable_event_reporting (lwpid_of (thread), options);
5913 lwp->must_set_ptrace_flags = 0;
5914 }
5915 });
de0d863e
DB
5916}
5917
55cf3021
TBA
5918int
5919linux_process_target::handle_monitor_command (char *mon)
5920{
5921#ifdef USE_THREAD_DB
5922 return thread_db_handle_monitor_command (mon);
5923#else
5924 return 0;
5925#endif
5926}
5927
95a45fc1
TBA
5928int
5929linux_process_target::core_of_thread (ptid_t ptid)
5930{
5931 return linux_common_core_of_thread (ptid);
5932}
5933
c756403b
TBA
5934bool
5935linux_process_target::supports_disable_randomization ()
03583c20 5936{
c756403b 5937 return true;
03583c20 5938}
efcbbd14 5939
c0245cb9
TBA
5940bool
5941linux_process_target::supports_agent ()
d1feda86 5942{
c0245cb9 5943 return true;
d1feda86
YQ
5944}
5945
2526e0cd
TBA
5946bool
5947linux_process_target::supports_range_stepping ()
c2d6af84 5948{
7582c77c 5949 if (supports_software_single_step ())
2526e0cd 5950 return true;
c2d6af84 5951
9cfd8715
TBA
5952 return low_supports_range_stepping ();
5953}
5954
5955bool
5956linux_process_target::low_supports_range_stepping ()
5957{
5958 return false;
c2d6af84
PA
5959}
5960
8247b823
TBA
5961bool
5962linux_process_target::supports_pid_to_exec_file ()
5963{
5964 return true;
5965}
5966
04977957 5967const char *
8247b823
TBA
5968linux_process_target::pid_to_exec_file (int pid)
5969{
5970 return linux_proc_pid_to_exec_file (pid);
5971}
5972
c9b7b804
TBA
5973bool
5974linux_process_target::supports_multifs ()
5975{
5976 return true;
5977}
5978
5979int
5980linux_process_target::multifs_open (int pid, const char *filename,
5981 int flags, mode_t mode)
5982{
5983 return linux_mntns_open_cloexec (pid, filename, flags, mode);
5984}
5985
5986int
5987linux_process_target::multifs_unlink (int pid, const char *filename)
5988{
5989 return linux_mntns_unlink (pid, filename);
5990}
5991
5992ssize_t
5993linux_process_target::multifs_readlink (int pid, const char *filename,
5994 char *buf, size_t bufsiz)
5995{
5996 return linux_mntns_readlink (pid, filename, buf, bufsiz);
5997}
5998
723b724b 5999#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
6000struct target_loadseg
6001{
6002 /* Core address to which the segment is mapped. */
6003 Elf32_Addr addr;
6004 /* VMA recorded in the program header. */
6005 Elf32_Addr p_vaddr;
6006 /* Size of this segment in memory. */
6007 Elf32_Word p_memsz;
6008};
6009
723b724b 6010# if defined PT_GETDSBT
78d85199
YQ
6011struct target_loadmap
6012{
6013 /* Protocol version number, must be zero. */
6014 Elf32_Word version;
6015 /* Pointer to the DSBT table, its size, and the DSBT index. */
6016 unsigned *dsbt_table;
6017 unsigned dsbt_size, dsbt_index;
6018 /* Number of segments in this map. */
6019 Elf32_Word nsegs;
6020 /* The actual memory map. */
6021 struct target_loadseg segs[/*nsegs*/];
6022};
723b724b
MF
6023# define LINUX_LOADMAP PT_GETDSBT
6024# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6025# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6026# else
6027struct target_loadmap
6028{
6029 /* Protocol version number, must be zero. */
6030 Elf32_Half version;
6031 /* Number of segments in this map. */
6032 Elf32_Half nsegs;
6033 /* The actual memory map. */
6034 struct target_loadseg segs[/*nsegs*/];
6035};
6036# define LINUX_LOADMAP PTRACE_GETFDPIC
6037# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6038# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6039# endif
78d85199 6040
9da41fda
TBA
6041bool
6042linux_process_target::supports_read_loadmap ()
6043{
6044 return true;
6045}
6046
6047int
6048linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6049 unsigned char *myaddr, unsigned int len)
78d85199 6050{
0bfdf32f 6051 int pid = lwpid_of (current_thread);
78d85199
YQ
6052 int addr = -1;
6053 struct target_loadmap *data = NULL;
6054 unsigned int actual_length, copy_length;
6055
6056 if (strcmp (annex, "exec") == 0)
723b724b 6057 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 6058 else if (strcmp (annex, "interp") == 0)
723b724b 6059 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
6060 else
6061 return -1;
6062
723b724b 6063 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
6064 return -1;
6065
6066 if (data == NULL)
6067 return -1;
6068
6069 actual_length = sizeof (struct target_loadmap)
6070 + sizeof (struct target_loadseg) * data->nsegs;
6071
6072 if (offset < 0 || offset > actual_length)
6073 return -1;
6074
6075 copy_length = actual_length - offset < len ? actual_length - offset : len;
6076 memcpy (myaddr, (char *) data + offset, copy_length);
6077 return copy_length;
6078}
723b724b 6079#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 6080
bc8d3ae4
TBA
6081bool
6082linux_process_target::supports_catch_syscall ()
82075af2 6083{
a2885186 6084 return low_supports_catch_syscall ();
82075af2
JS
6085}
6086
9eedd27d
TBA
6087bool
6088linux_process_target::low_supports_catch_syscall ()
6089{
6090 return false;
6091}
6092
770d8f6a
TBA
6093CORE_ADDR
6094linux_process_target::read_pc (regcache *regcache)
219f2f23 6095{
bf9ae9d8 6096 if (!low_supports_breakpoints ())
219f2f23
PA
6097 return 0;
6098
bf9ae9d8 6099 return low_get_pc (regcache);
219f2f23
PA
6100}
6101
770d8f6a
TBA
6102void
6103linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
219f2f23 6104{
bf9ae9d8 6105 gdb_assert (low_supports_breakpoints ());
219f2f23 6106
bf9ae9d8 6107 low_set_pc (regcache, pc);
219f2f23
PA
6108}
6109
68119632
TBA
6110bool
6111linux_process_target::supports_thread_stopped ()
6112{
6113 return true;
6114}
6115
6116bool
6117linux_process_target::thread_stopped (thread_info *thread)
8336d594
PA
6118{
6119 return get_thread_lwp (thread)->stopped;
6120}
6121
6122/* This exposes stop-all-threads functionality to other modules. */
6123
29e8dc09
TBA
6124void
6125linux_process_target::pause_all (bool freeze)
8336d594 6126{
7984d532
PA
6127 stop_all_lwps (freeze, NULL);
6128}
6129
6130/* This exposes unstop-all-threads functionality to other gdbserver
6131 modules. */
6132
29e8dc09
TBA
6133void
6134linux_process_target::unpause_all (bool unfreeze)
7984d532
PA
6135{
6136 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
6137}
6138
79b44087
TBA
6139int
6140linux_process_target::prepare_to_access_memory ()
90d74c30
PA
6141{
6142 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6143 running LWP. */
6144 if (non_stop)
29e8dc09 6145 target_pause_all (true);
90d74c30
PA
6146 return 0;
6147}
6148
79b44087
TBA
6149void
6150linux_process_target::done_accessing_memory ()
90d74c30
PA
6151{
6152 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6153 running LWP. */
6154 if (non_stop)
29e8dc09 6155 target_unpause_all (true);
90d74c30
PA
6156}
6157
2268b414
JK
6158/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6159
6160static int
6161get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6162 CORE_ADDR *phdr_memaddr, int *num_phdr)
6163{
6164 char filename[PATH_MAX];
6165 int fd;
6166 const int auxv_size = is_elf64
6167 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6168 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6169
6170 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6171
6172 fd = open (filename, O_RDONLY);
6173 if (fd < 0)
6174 return 1;
6175
6176 *phdr_memaddr = 0;
6177 *num_phdr = 0;
6178 while (read (fd, buf, auxv_size) == auxv_size
6179 && (*phdr_memaddr == 0 || *num_phdr == 0))
6180 {
6181 if (is_elf64)
6182 {
6183 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6184
6185 switch (aux->a_type)
6186 {
6187 case AT_PHDR:
6188 *phdr_memaddr = aux->a_un.a_val;
6189 break;
6190 case AT_PHNUM:
6191 *num_phdr = aux->a_un.a_val;
6192 break;
6193 }
6194 }
6195 else
6196 {
6197 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6198
6199 switch (aux->a_type)
6200 {
6201 case AT_PHDR:
6202 *phdr_memaddr = aux->a_un.a_val;
6203 break;
6204 case AT_PHNUM:
6205 *num_phdr = aux->a_un.a_val;
6206 break;
6207 }
6208 }
6209 }
6210
6211 close (fd);
6212
6213 if (*phdr_memaddr == 0 || *num_phdr == 0)
6214 {
6215 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6216 "phdr_memaddr = %ld, phdr_num = %d",
6217 (long) *phdr_memaddr, *num_phdr);
6218 return 2;
6219 }
6220
6221 return 0;
6222}
6223
6224/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6225
6226static CORE_ADDR
6227get_dynamic (const int pid, const int is_elf64)
6228{
6229 CORE_ADDR phdr_memaddr, relocation;
db1ff28b 6230 int num_phdr, i;
2268b414 6231 unsigned char *phdr_buf;
db1ff28b 6232 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
2268b414
JK
6233
6234 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6235 return 0;
6236
6237 gdb_assert (num_phdr < 100); /* Basic sanity check. */
224c3ddb 6238 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
2268b414
JK
6239
6240 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6241 return 0;
6242
6243 /* Compute relocation: it is expected to be 0 for "regular" executables,
6244 non-zero for PIE ones. */
6245 relocation = -1;
db1ff28b
JK
6246 for (i = 0; relocation == -1 && i < num_phdr; i++)
6247 if (is_elf64)
6248 {
6249 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6250
6251 if (p->p_type == PT_PHDR)
6252 relocation = phdr_memaddr - p->p_vaddr;
6253 }
6254 else
6255 {
6256 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6257
6258 if (p->p_type == PT_PHDR)
6259 relocation = phdr_memaddr - p->p_vaddr;
6260 }
6261
2268b414
JK
6262 if (relocation == -1)
6263 {
e237a7e2
JK
6264 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6265 any real world executables, including PIE executables, have always
6266 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6267 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6268 or present DT_DEBUG anyway (fpc binaries are statically linked).
6269
6270 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6271
6272 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6273
2268b414
JK
6274 return 0;
6275 }
6276
db1ff28b
JK
6277 for (i = 0; i < num_phdr; i++)
6278 {
6279 if (is_elf64)
6280 {
6281 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6282
6283 if (p->p_type == PT_DYNAMIC)
6284 return p->p_vaddr + relocation;
6285 }
6286 else
6287 {
6288 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
2268b414 6289
db1ff28b
JK
6290 if (p->p_type == PT_DYNAMIC)
6291 return p->p_vaddr + relocation;
6292 }
6293 }
2268b414
JK
6294
6295 return 0;
6296}
6297
6298/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
6299 can be 0 if the inferior does not yet have the library list initialized.
6300 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6301 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
6302
6303static CORE_ADDR
6304get_r_debug (const int pid, const int is_elf64)
6305{
6306 CORE_ADDR dynamic_memaddr;
6307 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6308 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 6309 CORE_ADDR map = -1;
2268b414
JK
6310
6311 dynamic_memaddr = get_dynamic (pid, is_elf64);
6312 if (dynamic_memaddr == 0)
367ba2c2 6313 return map;
2268b414
JK
6314
6315 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6316 {
6317 if (is_elf64)
6318 {
6319 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
a738da3a 6320#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6321 union
6322 {
6323 Elf64_Xword map;
6324 unsigned char buf[sizeof (Elf64_Xword)];
6325 }
6326 rld_map;
a738da3a
MF
6327#endif
6328#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6329 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6330 {
6331 if (linux_read_memory (dyn->d_un.d_val,
6332 rld_map.buf, sizeof (rld_map.buf)) == 0)
6333 return rld_map.map;
6334 else
6335 break;
6336 }
75f62ce7 6337#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6338#ifdef DT_MIPS_RLD_MAP_REL
6339 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6340 {
6341 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6342 rld_map.buf, sizeof (rld_map.buf)) == 0)
6343 return rld_map.map;
6344 else
6345 break;
6346 }
6347#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6348
367ba2c2
MR
6349 if (dyn->d_tag == DT_DEBUG && map == -1)
6350 map = dyn->d_un.d_val;
2268b414
JK
6351
6352 if (dyn->d_tag == DT_NULL)
6353 break;
6354 }
6355 else
6356 {
6357 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
a738da3a 6358#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6359 union
6360 {
6361 Elf32_Word map;
6362 unsigned char buf[sizeof (Elf32_Word)];
6363 }
6364 rld_map;
a738da3a
MF
6365#endif
6366#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6367 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6368 {
6369 if (linux_read_memory (dyn->d_un.d_val,
6370 rld_map.buf, sizeof (rld_map.buf)) == 0)
6371 return rld_map.map;
6372 else
6373 break;
6374 }
75f62ce7 6375#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6376#ifdef DT_MIPS_RLD_MAP_REL
6377 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6378 {
6379 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6380 rld_map.buf, sizeof (rld_map.buf)) == 0)
6381 return rld_map.map;
6382 else
6383 break;
6384 }
6385#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6386
367ba2c2
MR
6387 if (dyn->d_tag == DT_DEBUG && map == -1)
6388 map = dyn->d_un.d_val;
2268b414
JK
6389
6390 if (dyn->d_tag == DT_NULL)
6391 break;
6392 }
6393
6394 dynamic_memaddr += dyn_size;
6395 }
6396
367ba2c2 6397 return map;
2268b414
JK
6398}
6399
6400/* Read one pointer from MEMADDR in the inferior. */
6401
6402static int
6403read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6404{
485f1ee4
PA
6405 int ret;
6406
6407 /* Go through a union so this works on either big or little endian
6408 hosts, when the inferior's pointer size is smaller than the size
6409 of CORE_ADDR. It is assumed the inferior's endianness is the
6410 same of the superior's. */
6411 union
6412 {
6413 CORE_ADDR core_addr;
6414 unsigned int ui;
6415 unsigned char uc;
6416 } addr;
6417
6418 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6419 if (ret == 0)
6420 {
6421 if (ptr_size == sizeof (CORE_ADDR))
6422 *ptr = addr.core_addr;
6423 else if (ptr_size == sizeof (unsigned int))
6424 *ptr = addr.ui;
6425 else
6426 gdb_assert_not_reached ("unhandled pointer size");
6427 }
6428 return ret;
2268b414
JK
6429}
6430
974387bb
TBA
6431bool
6432linux_process_target::supports_qxfer_libraries_svr4 ()
6433{
6434 return true;
6435}
6436
2268b414
JK
6437struct link_map_offsets
6438 {
6439 /* Offset and size of r_debug.r_version. */
6440 int r_version_offset;
6441
6442 /* Offset and size of r_debug.r_map. */
6443 int r_map_offset;
6444
6445 /* Offset to l_addr field in struct link_map. */
6446 int l_addr_offset;
6447
6448 /* Offset to l_name field in struct link_map. */
6449 int l_name_offset;
6450
6451 /* Offset to l_ld field in struct link_map. */
6452 int l_ld_offset;
6453
6454 /* Offset to l_next field in struct link_map. */
6455 int l_next_offset;
6456
6457 /* Offset to l_prev field in struct link_map. */
6458 int l_prev_offset;
6459 };
6460
fb723180 6461/* Construct qXfer:libraries-svr4:read reply. */
2268b414 6462
974387bb
TBA
6463int
6464linux_process_target::qxfer_libraries_svr4 (const char *annex,
6465 unsigned char *readbuf,
6466 unsigned const char *writebuf,
6467 CORE_ADDR offset, int len)
2268b414 6468{
fe978cb0 6469 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
6470 char filename[PATH_MAX];
6471 int pid, is_elf64;
6472
6473 static const struct link_map_offsets lmo_32bit_offsets =
6474 {
6475 0, /* r_version offset. */
6476 4, /* r_debug.r_map offset. */
6477 0, /* l_addr offset in link_map. */
6478 4, /* l_name offset in link_map. */
6479 8, /* l_ld offset in link_map. */
6480 12, /* l_next offset in link_map. */
6481 16 /* l_prev offset in link_map. */
6482 };
6483
6484 static const struct link_map_offsets lmo_64bit_offsets =
6485 {
6486 0, /* r_version offset. */
6487 8, /* r_debug.r_map offset. */
6488 0, /* l_addr offset in link_map. */
6489 8, /* l_name offset in link_map. */
6490 16, /* l_ld offset in link_map. */
6491 24, /* l_next offset in link_map. */
6492 32 /* l_prev offset in link_map. */
6493 };
6494 const struct link_map_offsets *lmo;
214d508e 6495 unsigned int machine;
b1fbec62
GB
6496 int ptr_size;
6497 CORE_ADDR lm_addr = 0, lm_prev = 0;
b1fbec62
GB
6498 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6499 int header_done = 0;
2268b414
JK
6500
6501 if (writebuf != NULL)
6502 return -2;
6503 if (readbuf == NULL)
6504 return -1;
6505
0bfdf32f 6506 pid = lwpid_of (current_thread);
2268b414 6507 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 6508 is_elf64 = elf_64_file_p (filename, &machine);
2268b414 6509 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
b1fbec62 6510 ptr_size = is_elf64 ? 8 : 4;
2268b414 6511
b1fbec62
GB
6512 while (annex[0] != '\0')
6513 {
6514 const char *sep;
6515 CORE_ADDR *addrp;
da4ae14a 6516 int name_len;
2268b414 6517
b1fbec62
GB
6518 sep = strchr (annex, '=');
6519 if (sep == NULL)
6520 break;
0c5bf5a9 6521
da4ae14a
TT
6522 name_len = sep - annex;
6523 if (name_len == 5 && startswith (annex, "start"))
b1fbec62 6524 addrp = &lm_addr;
da4ae14a 6525 else if (name_len == 4 && startswith (annex, "prev"))
b1fbec62
GB
6526 addrp = &lm_prev;
6527 else
6528 {
6529 annex = strchr (sep, ';');
6530 if (annex == NULL)
6531 break;
6532 annex++;
6533 continue;
6534 }
6535
6536 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 6537 }
b1fbec62
GB
6538
6539 if (lm_addr == 0)
2268b414 6540 {
b1fbec62
GB
6541 int r_version = 0;
6542
6543 if (priv->r_debug == 0)
6544 priv->r_debug = get_r_debug (pid, is_elf64);
6545
6546 /* We failed to find DT_DEBUG. Such situation will not change
6547 for this inferior - do not retry it. Report it to GDB as
6548 E01, see for the reasons at the GDB solib-svr4.c side. */
6549 if (priv->r_debug == (CORE_ADDR) -1)
6550 return -1;
6551
6552 if (priv->r_debug != 0)
2268b414 6553 {
b1fbec62
GB
6554 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6555 (unsigned char *) &r_version,
6556 sizeof (r_version)) != 0
4eb629d5 6557 || r_version < 1)
b1fbec62
GB
6558 {
6559 warning ("unexpected r_debug version %d", r_version);
6560 }
6561 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6562 &lm_addr, ptr_size) != 0)
6563 {
6564 warning ("unable to read r_map from 0x%lx",
6565 (long) priv->r_debug + lmo->r_map_offset);
6566 }
2268b414 6567 }
b1fbec62 6568 }
2268b414 6569
f6e8a41e 6570 std::string document = "<library-list-svr4 version=\"1.0\"";
b1fbec62
GB
6571
6572 while (lm_addr
6573 && read_one_ptr (lm_addr + lmo->l_name_offset,
6574 &l_name, ptr_size) == 0
6575 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6576 &l_addr, ptr_size) == 0
6577 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6578 &l_ld, ptr_size) == 0
6579 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6580 &l_prev, ptr_size) == 0
6581 && read_one_ptr (lm_addr + lmo->l_next_offset,
6582 &l_next, ptr_size) == 0)
6583 {
6584 unsigned char libname[PATH_MAX];
6585
6586 if (lm_prev != l_prev)
2268b414 6587 {
b1fbec62
GB
6588 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6589 (long) lm_prev, (long) l_prev);
6590 break;
2268b414
JK
6591 }
6592
d878444c
JK
6593 /* Ignore the first entry even if it has valid name as the first entry
6594 corresponds to the main executable. The first entry should not be
6595 skipped if the dynamic loader was loaded late by a static executable
6596 (see solib-svr4.c parameter ignore_first). But in such case the main
6597 executable does not have PT_DYNAMIC present and this function already
6598 exited above due to failed get_r_debug. */
6599 if (lm_prev == 0)
f6e8a41e 6600 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
d878444c
JK
6601 else
6602 {
6603 /* Not checking for error because reading may stop before
6604 we've got PATH_MAX worth of characters. */
6605 libname[0] = '\0';
6606 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6607 libname[sizeof (libname) - 1] = '\0';
6608 if (libname[0] != '\0')
2268b414 6609 {
d878444c
JK
6610 if (!header_done)
6611 {
6612 /* Terminate `<library-list-svr4'. */
f6e8a41e 6613 document += '>';
d878444c
JK
6614 header_done = 1;
6615 }
2268b414 6616
e6a58aa8
SM
6617 string_appendf (document, "<library name=\"");
6618 xml_escape_text_append (&document, (char *) libname);
6619 string_appendf (document, "\" lm=\"0x%lx\" "
f6e8a41e 6620 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
e6a58aa8
SM
6621 (unsigned long) lm_addr, (unsigned long) l_addr,
6622 (unsigned long) l_ld);
d878444c 6623 }
0afae3cf 6624 }
b1fbec62
GB
6625
6626 lm_prev = lm_addr;
6627 lm_addr = l_next;
2268b414
JK
6628 }
6629
b1fbec62
GB
6630 if (!header_done)
6631 {
6632 /* Empty list; terminate `<library-list-svr4'. */
f6e8a41e 6633 document += "/>";
b1fbec62
GB
6634 }
6635 else
f6e8a41e 6636 document += "</library-list-svr4>";
b1fbec62 6637
f6e8a41e 6638 int document_len = document.length ();
2268b414
JK
6639 if (offset < document_len)
6640 document_len -= offset;
6641 else
6642 document_len = 0;
6643 if (len > document_len)
6644 len = document_len;
6645
f6e8a41e 6646 memcpy (readbuf, document.data () + offset, len);
2268b414
JK
6647
6648 return len;
6649}
6650
9accd112
MM
6651#ifdef HAVE_LINUX_BTRACE
6652
79597bdd 6653btrace_target_info *
696c0d5e 6654linux_process_target::enable_btrace (thread_info *tp,
79597bdd
TBA
6655 const btrace_config *conf)
6656{
696c0d5e 6657 return linux_enable_btrace (tp->id, conf);
79597bdd
TBA
6658}
6659
969c39fb 6660/* See to_disable_btrace target method. */
9accd112 6661
79597bdd
TBA
6662int
6663linux_process_target::disable_btrace (btrace_target_info *tinfo)
969c39fb
MM
6664{
6665 enum btrace_error err;
6666
6667 err = linux_disable_btrace (tinfo);
6668 return (err == BTRACE_ERR_NONE ? 0 : -1);
6669}
6670
bc504a31 6671/* Encode an Intel Processor Trace configuration. */
b20a6524
MM
6672
6673static void
6674linux_low_encode_pt_config (struct buffer *buffer,
6675 const struct btrace_data_pt_config *config)
6676{
6677 buffer_grow_str (buffer, "<pt-config>\n");
6678
6679 switch (config->cpu.vendor)
6680 {
6681 case CV_INTEL:
6682 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6683 "model=\"%u\" stepping=\"%u\"/>\n",
6684 config->cpu.family, config->cpu.model,
6685 config->cpu.stepping);
6686 break;
6687
6688 default:
6689 break;
6690 }
6691
6692 buffer_grow_str (buffer, "</pt-config>\n");
6693}
6694
6695/* Encode a raw buffer. */
6696
6697static void
6698linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6699 unsigned int size)
6700{
6701 if (size == 0)
6702 return;
6703
268a13a5 6704 /* We use hex encoding - see gdbsupport/rsp-low.h. */
b20a6524
MM
6705 buffer_grow_str (buffer, "<raw>\n");
6706
6707 while (size-- > 0)
6708 {
6709 char elem[2];
6710
6711 elem[0] = tohex ((*data >> 4) & 0xf);
6712 elem[1] = tohex (*data++ & 0xf);
6713
6714 buffer_grow (buffer, elem, 2);
6715 }
6716
6717 buffer_grow_str (buffer, "</raw>\n");
6718}
6719
969c39fb
MM
6720/* See to_read_btrace target method. */
6721
79597bdd
TBA
6722int
6723linux_process_target::read_btrace (btrace_target_info *tinfo,
6724 buffer *buffer,
6725 enum btrace_read_type type)
9accd112 6726{
734b0e4b 6727 struct btrace_data btrace;
969c39fb 6728 enum btrace_error err;
9accd112 6729
969c39fb
MM
6730 err = linux_read_btrace (&btrace, tinfo, type);
6731 if (err != BTRACE_ERR_NONE)
6732 {
6733 if (err == BTRACE_ERR_OVERFLOW)
6734 buffer_grow_str0 (buffer, "E.Overflow.");
6735 else
6736 buffer_grow_str0 (buffer, "E.Generic Error.");
6737
8dcc53b3 6738 return -1;
969c39fb 6739 }
9accd112 6740
734b0e4b
MM
6741 switch (btrace.format)
6742 {
6743 case BTRACE_FORMAT_NONE:
6744 buffer_grow_str0 (buffer, "E.No Trace.");
8dcc53b3 6745 return -1;
734b0e4b
MM
6746
6747 case BTRACE_FORMAT_BTS:
6748 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6749 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
9accd112 6750
46f29a9a 6751 for (const btrace_block &block : *btrace.variant.bts.blocks)
734b0e4b 6752 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
46f29a9a 6753 paddress (block.begin), paddress (block.end));
9accd112 6754
734b0e4b
MM
6755 buffer_grow_str0 (buffer, "</btrace>\n");
6756 break;
6757
b20a6524
MM
6758 case BTRACE_FORMAT_PT:
6759 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6760 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6761 buffer_grow_str (buffer, "<pt>\n");
6762
6763 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
9accd112 6764
b20a6524
MM
6765 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6766 btrace.variant.pt.size);
6767
6768 buffer_grow_str (buffer, "</pt>\n");
6769 buffer_grow_str0 (buffer, "</btrace>\n");
6770 break;
6771
6772 default:
6773 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
8dcc53b3 6774 return -1;
734b0e4b 6775 }
969c39fb
MM
6776
6777 return 0;
9accd112 6778}
f4abbc16
MM
6779
6780/* See to_btrace_conf target method. */
6781
79597bdd
TBA
6782int
6783linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
6784 buffer *buffer)
f4abbc16
MM
6785{
6786 const struct btrace_config *conf;
6787
6788 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6789 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6790
6791 conf = linux_btrace_conf (tinfo);
6792 if (conf != NULL)
6793 {
6794 switch (conf->format)
6795 {
6796 case BTRACE_FORMAT_NONE:
6797 break;
6798
6799 case BTRACE_FORMAT_BTS:
d33501a5
MM
6800 buffer_xml_printf (buffer, "<bts");
6801 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6802 buffer_xml_printf (buffer, " />\n");
f4abbc16 6803 break;
b20a6524
MM
6804
6805 case BTRACE_FORMAT_PT:
6806 buffer_xml_printf (buffer, "<pt");
6807 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
6808 buffer_xml_printf (buffer, "/>\n");
6809 break;
f4abbc16
MM
6810 }
6811 }
6812
6813 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6814 return 0;
6815}
9accd112
MM
6816#endif /* HAVE_LINUX_BTRACE */
6817
7b669087
GB
6818/* See nat/linux-nat.h. */
6819
6820ptid_t
6821current_lwp_ptid (void)
6822{
6823 return ptid_of (current_thread);
6824}
6825
7f63b89b
TBA
6826const char *
6827linux_process_target::thread_name (ptid_t thread)
6828{
6829 return linux_proc_tid_get_name (thread);
6830}
6831
6832#if USE_THREAD_DB
6833bool
6834linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
6835 int *handle_len)
6836{
6837 return thread_db_thread_handle (ptid, handle, handle_len);
6838}
6839#endif
6840
7b961964
SM
6841thread_info *
6842linux_process_target::thread_pending_parent (thread_info *thread)
6843{
6844 lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
6845
6846 if (parent == nullptr)
6847 return nullptr;
6848
6849 return get_lwp_thread (parent);
6850}
6851
df5ad102
SM
6852thread_info *
6853linux_process_target::thread_pending_child (thread_info *thread)
6854{
6855 lwp_info *child = get_thread_lwp (thread)->pending_child ();
6856
6857 if (child == nullptr)
6858 return nullptr;
6859
6860 return get_lwp_thread (child);
6861}
6862
276d4552
YQ
6863/* Default implementation of linux_target_ops method "set_pc" for
6864 32-bit pc register which is literally named "pc". */
6865
6866void
6867linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
6868{
6869 uint32_t newpc = pc;
6870
6871 supply_register_by_name (regcache, "pc", &newpc);
6872}
6873
6874/* Default implementation of linux_target_ops method "get_pc" for
6875 32-bit pc register which is literally named "pc". */
6876
6877CORE_ADDR
6878linux_get_pc_32bit (struct regcache *regcache)
6879{
6880 uint32_t pc;
6881
6882 collect_register_by_name (regcache, "pc", &pc);
c058728c 6883 threads_debug_printf ("stop pc is 0x%" PRIx32, pc);
276d4552
YQ
6884 return pc;
6885}
6886
6f69e520
YQ
6887/* Default implementation of linux_target_ops method "set_pc" for
6888 64-bit pc register which is literally named "pc". */
6889
6890void
6891linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
6892{
6893 uint64_t newpc = pc;
6894
6895 supply_register_by_name (regcache, "pc", &newpc);
6896}
6897
6898/* Default implementation of linux_target_ops method "get_pc" for
6899 64-bit pc register which is literally named "pc". */
6900
6901CORE_ADDR
6902linux_get_pc_64bit (struct regcache *regcache)
6903{
6904 uint64_t pc;
6905
6906 collect_register_by_name (regcache, "pc", &pc);
c058728c 6907 threads_debug_printf ("stop pc is 0x%" PRIx64, pc);
6f69e520
YQ
6908 return pc;
6909}
6910
0570503d 6911/* See linux-low.h. */
974c89e0 6912
0570503d
PFC
6913int
6914linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
974c89e0
AH
6915{
6916 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
6917 int offset = 0;
6918
6919 gdb_assert (wordsize == 4 || wordsize == 8);
6920
52405d85 6921 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
974c89e0
AH
6922 {
6923 if (wordsize == 4)
6924 {
0570503d 6925 uint32_t *data_p = (uint32_t *) data;
974c89e0 6926 if (data_p[0] == match)
0570503d
PFC
6927 {
6928 *valp = data_p[1];
6929 return 1;
6930 }
974c89e0
AH
6931 }
6932 else
6933 {
0570503d 6934 uint64_t *data_p = (uint64_t *) data;
974c89e0 6935 if (data_p[0] == match)
0570503d
PFC
6936 {
6937 *valp = data_p[1];
6938 return 1;
6939 }
974c89e0
AH
6940 }
6941
6942 offset += 2 * wordsize;
6943 }
6944
6945 return 0;
6946}
6947
6948/* See linux-low.h. */
6949
6950CORE_ADDR
6951linux_get_hwcap (int wordsize)
6952{
0570503d
PFC
6953 CORE_ADDR hwcap = 0;
6954 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
6955 return hwcap;
974c89e0
AH
6956}
6957
6958/* See linux-low.h. */
6959
6960CORE_ADDR
6961linux_get_hwcap2 (int wordsize)
6962{
0570503d
PFC
6963 CORE_ADDR hwcap2 = 0;
6964 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
6965 return hwcap2;
974c89e0 6966}
6f69e520 6967
3aee8918
PA
6968#ifdef HAVE_LINUX_REGSETS
6969void
6970initialize_regsets_info (struct regsets_info *info)
6971{
6972 for (info->num_regsets = 0;
6973 info->regsets[info->num_regsets].size >= 0;
6974 info->num_regsets++)
6975 ;
3aee8918
PA
6976}
6977#endif
6978
da6d8c04
DJ
6979void
6980initialize_low (void)
6981{
bd99dc85 6982 struct sigaction sigchld_action;
dd373349 6983
bd99dc85 6984 memset (&sigchld_action, 0, sizeof (sigchld_action));
ef0478f6 6985 set_target_ops (the_linux_target);
dd373349 6986
aa7c7447 6987 linux_ptrace_init_warnings ();
1b919490 6988 linux_proc_init_warnings ();
bd99dc85
PA
6989
6990 sigchld_action.sa_handler = sigchld_handler;
6991 sigemptyset (&sigchld_action.sa_mask);
6992 sigchld_action.sa_flags = SA_RESTART;
6993 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
6994
6995 initialize_low_arch ();
89245bc0
DB
6996
6997 linux_check_ptrace_features ();
da6d8c04 6998}