]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdbserver/linux-low.cc
Use section name in DWARF error message
[thirdparty/binutils-gdb.git] / gdbserver / linux-low.cc
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
1d506c26 2 Copyright (C) 1995-2024 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04 18
58caa3dc 19#include "linux-low.h"
125f8a3d 20#include "nat/linux-osdata.h"
268a13a5 21#include "gdbsupport/agent.h"
de0d863e 22#include "tdesc.h"
cdc8e9b2
JB
23#include "gdbsupport/event-loop.h"
24#include "gdbsupport/event-pipe.h"
268a13a5
TT
25#include "gdbsupport/rsp-low.h"
26#include "gdbsupport/signals-state-save-restore.h"
96d7229d
LM
27#include "nat/linux-nat.h"
28#include "nat/linux-waitpid.h"
268a13a5 29#include "gdbsupport/gdb_wait.h"
5826e159 30#include "nat/gdb_ptrace.h"
125f8a3d
GB
31#include "nat/linux-ptrace.h"
32#include "nat/linux-procfs.h"
8cc73a39 33#include "nat/linux-personality.h"
da6d8c04
DJ
34#include <signal.h>
35#include <sys/ioctl.h>
36#include <fcntl.h>
0a30fbc4 37#include <unistd.h>
fd500816 38#include <sys/syscall.h>
f9387fc3 39#include <sched.h>
07e059b5
VP
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
53ce3c39 43#include <sys/stat.h>
efcbbd14 44#include <sys/vfs.h>
1570b33e 45#include <sys/uio.h>
07b3255c
TT
46#include <langinfo.h>
47#include <iconv.h>
268a13a5 48#include "gdbsupport/filestuff.h"
07b3255c 49#include "gdbsupport/gdb-safe-ctype.h"
c144c7a0 50#include "tracepoint.h"
276d4552 51#include <inttypes.h>
268a13a5 52#include "gdbsupport/common-inferior.h"
2090129c 53#include "nat/fork-inferior.h"
268a13a5 54#include "gdbsupport/environ.h"
21987b9c 55#include "gdbsupport/gdb-sigmask.h"
268a13a5 56#include "gdbsupport/scoped_restore.h"
957f3f49
DE
57#ifndef ELFMAG0
58/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
59 then ELFMAG0 will have been defined. If it didn't get included by
60 gdb_proc_service.h then including it will likely introduce a duplicate
61 definition of elf_fpregset_t. */
62#include <elf.h>
63#endif
14d2069a 64#include "nat/linux-namespaces.h"
efcbbd14 65
fd462a61
DJ
66#ifndef O_LARGEFILE
67#define O_LARGEFILE 0
68#endif
1a981360 69
69f4c9cc
AH
70#ifndef AT_HWCAP2
71#define AT_HWCAP2 26
72#endif
73
db0dfaa0
LM
74/* Some targets did not define these ptrace constants from the start,
75 so gdbserver defines them locally here. In the future, these may
76 be removed after they are added to asm/ptrace.h. */
77#if !(defined(PT_TEXT_ADDR) \
78 || defined(PT_DATA_ADDR) \
79 || defined(PT_TEXT_END_ADDR))
80#if defined(__mcoldfire__)
81/* These are still undefined in 3.10 kernels. */
82#define PT_TEXT_ADDR 49*4
83#define PT_DATA_ADDR 50*4
84#define PT_TEXT_END_ADDR 51*4
db0dfaa0
LM
85/* These are still undefined in 3.10 kernels. */
86#elif defined(__TMS320C6X__)
87#define PT_TEXT_ADDR (0x10000*4)
88#define PT_DATA_ADDR (0x10004*4)
89#define PT_TEXT_END_ADDR (0x10008*4)
90#endif
91#endif
92
5203ae1e
TBA
93#if (defined(__UCLIBC__) \
94 && defined(HAS_NOMMU) \
95 && defined(PT_TEXT_ADDR) \
96 && defined(PT_DATA_ADDR) \
97 && defined(PT_TEXT_END_ADDR))
98#define SUPPORTS_READ_OFFSETS
99#endif
100
9accd112 101#ifdef HAVE_LINUX_BTRACE
125f8a3d 102# include "nat/linux-btrace.h"
268a13a5 103# include "gdbsupport/btrace-common.h"
9accd112
MM
104#endif
105
8365dcf5
TJB
106#ifndef HAVE_ELF32_AUXV_T
107/* Copied from glibc's elf.h. */
108typedef struct
109{
110 uint32_t a_type; /* Entry type */
111 union
112 {
113 uint32_t a_val; /* Integer value */
114 /* We use to have pointer elements added here. We cannot do that,
115 though, since it does not work when using 32-bit definitions
116 on 64-bit platforms and vice versa. */
117 } a_un;
118} Elf32_auxv_t;
119#endif
120
121#ifndef HAVE_ELF64_AUXV_T
122/* Copied from glibc's elf.h. */
123typedef struct
124{
125 uint64_t a_type; /* Entry type */
126 union
127 {
128 uint64_t a_val; /* Integer value */
129 /* We use to have pointer elements added here. We cannot do that,
130 though, since it does not work when using 32-bit definitions
131 on 64-bit platforms and vice versa. */
132 } a_un;
133} Elf64_auxv_t;
134#endif
135
ded48a5e 136/* Does the current host support PTRACE_GETREGSET? */
56f703d3 137int have_ptrace_getregset = -1;
ded48a5e 138
8a841a35
PA
139/* Return TRUE if THREAD is the leader thread of the process. */
140
141static bool
142is_leader (thread_info *thread)
143{
144 ptid_t ptid = ptid_of (thread);
145 return ptid.pid () == ptid.lwp ();
146}
147
48989498
PA
148/* Return true if we should report thread exit events to GDB, for
149 THR. */
150
151static bool
152report_exit_events_for (thread_info *thr)
153{
154 client_state &cs = get_client_state ();
155
156 return (cs.report_thread_events
157 || (thr->thread_options & GDB_THREAD_OPTION_EXIT) != 0);
158}
159
cff068da
GB
160/* LWP accessors. */
161
162/* See nat/linux-nat.h. */
163
164ptid_t
165ptid_of_lwp (struct lwp_info *lwp)
166{
167 return ptid_of (get_lwp_thread (lwp));
168}
169
170/* See nat/linux-nat.h. */
171
4b134ca1
GB
172void
173lwp_set_arch_private_info (struct lwp_info *lwp,
174 struct arch_lwp_info *info)
175{
176 lwp->arch_private = info;
177}
178
179/* See nat/linux-nat.h. */
180
181struct arch_lwp_info *
182lwp_arch_private_info (struct lwp_info *lwp)
183{
184 return lwp->arch_private;
185}
186
187/* See nat/linux-nat.h. */
188
cff068da
GB
189int
190lwp_is_stopped (struct lwp_info *lwp)
191{
192 return lwp->stopped;
193}
194
195/* See nat/linux-nat.h. */
196
197enum target_stop_reason
198lwp_stop_reason (struct lwp_info *lwp)
199{
200 return lwp->stop_reason;
201}
202
0e00e962
AA
203/* See nat/linux-nat.h. */
204
205int
206lwp_is_stepping (struct lwp_info *lwp)
207{
208 return lwp->stepping;
209}
210
05044653
PA
211/* A list of all unknown processes which receive stop signals. Some
212 other process will presumably claim each of these as forked
213 children momentarily. */
24a09b5f 214
05044653
PA
215struct simple_pid_list
216{
217 /* The process ID. */
218 int pid;
219
220 /* The status as reported by waitpid. */
221 int status;
222
223 /* Next in chain. */
224 struct simple_pid_list *next;
225};
05c309a8 226static struct simple_pid_list *stopped_pids;
05044653
PA
227
228/* Trivial list manipulation functions to keep track of a list of new
229 stopped processes. */
230
231static void
232add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
233{
8d749320 234 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
05044653
PA
235
236 new_pid->pid = pid;
237 new_pid->status = status;
238 new_pid->next = *listp;
239 *listp = new_pid;
240}
241
242static int
243pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
244{
245 struct simple_pid_list **p;
246
247 for (p = listp; *p != NULL; p = &(*p)->next)
248 if ((*p)->pid == pid)
249 {
250 struct simple_pid_list *next = (*p)->next;
251
252 *statusp = (*p)->status;
253 xfree (*p);
254 *p = next;
255 return 1;
256 }
257 return 0;
258}
24a09b5f 259
bde24c0a
PA
260enum stopping_threads_kind
261 {
262 /* Not stopping threads presently. */
263 NOT_STOPPING_THREADS,
264
265 /* Stopping threads. */
266 STOPPING_THREADS,
267
268 /* Stopping and suspending threads. */
269 STOPPING_AND_SUSPENDING_THREADS
270 };
271
272/* This is set while stop_all_lwps is in effect. */
6bd434d6 273static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
274
275/* FIXME make into a target method? */
24a09b5f 276int using_threads = 1;
24a09b5f 277
fa593d66
PA
278/* True if we're presently stabilizing threads (moving them out of
279 jump pads). */
280static int stabilizing_threads;
281
f50bf8e5 282static void unsuspend_all_lwps (struct lwp_info *except);
e8a625d1
PA
283static void mark_lwp_dead (struct lwp_info *lwp, int wstat,
284 bool thread_event);
00db26fa 285static int lwp_is_marked_dead (struct lwp_info *lwp);
d50171e4 286static int kill_lwp (unsigned long lwpid, int signo);
863d01bd 287static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
ece66d65 288static int linux_low_ptrace_options (int attached);
ced2dffb 289static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
d50171e4 290
582511be
PA
291/* When the event-loop is doing a step-over, this points at the thread
292 being stepped. */
6bd434d6 293static ptid_t step_over_bkpt;
582511be 294
bf9ae9d8
TBA
295bool
296linux_process_target::low_supports_breakpoints ()
297{
298 return false;
299}
d50171e4 300
bf9ae9d8
TBA
301CORE_ADDR
302linux_process_target::low_get_pc (regcache *regcache)
303{
304 return 0;
305}
306
307void
308linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
d50171e4 309{
bf9ae9d8 310 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
d50171e4 311}
0d62e5e8 312
7582c77c
TBA
313std::vector<CORE_ADDR>
314linux_process_target::low_get_next_pcs (regcache *regcache)
315{
316 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
317 "implemented");
318}
319
d4807ea2
TBA
320int
321linux_process_target::low_decr_pc_after_break ()
322{
323 return 0;
324}
325
c2d6af84
PA
326/* True if LWP is stopped in its stepping range. */
327
328static int
329lwp_in_step_range (struct lwp_info *lwp)
330{
331 CORE_ADDR pc = lwp->stop_pc;
332
333 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
334}
335
cdc8e9b2
JB
336/* The event pipe registered as a waitable file in the event loop. */
337static event_pipe linux_event_pipe;
bd99dc85
PA
338
339/* True if we're currently in async mode. */
cdc8e9b2 340#define target_is_async_p() (linux_event_pipe.is_open ())
bd99dc85 341
02fc4de7 342static void send_sigstop (struct lwp_info *lwp);
bd99dc85 343
d0722149
DE
344/* Return non-zero if HEADER is a 64-bit ELF file. */
345
346static int
214d508e 347elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 348{
214d508e
L
349 if (header->e_ident[EI_MAG0] == ELFMAG0
350 && header->e_ident[EI_MAG1] == ELFMAG1
351 && header->e_ident[EI_MAG2] == ELFMAG2
352 && header->e_ident[EI_MAG3] == ELFMAG3)
353 {
354 *machine = header->e_machine;
355 return header->e_ident[EI_CLASS] == ELFCLASS64;
356
357 }
358 *machine = EM_NONE;
359 return -1;
d0722149
DE
360}
361
362/* Return non-zero if FILE is a 64-bit ELF file,
363 zero if the file is not a 64-bit ELF file,
364 and -1 if the file is not accessible or doesn't exist. */
365
be07f1a2 366static int
214d508e 367elf_64_file_p (const char *file, unsigned int *machine)
d0722149 368{
957f3f49 369 Elf64_Ehdr header;
d0722149
DE
370 int fd;
371
372 fd = open (file, O_RDONLY);
373 if (fd < 0)
374 return -1;
375
376 if (read (fd, &header, sizeof (header)) != sizeof (header))
377 {
378 close (fd);
379 return 0;
380 }
381 close (fd);
382
214d508e 383 return elf_64_header_p (&header, machine);
d0722149
DE
384}
385
be07f1a2
PA
386/* Accepts an integer PID; Returns true if the executable PID is
387 running is a 64-bit ELF file.. */
388
389int
214d508e 390linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 391{
d8d2a3ee 392 char file[PATH_MAX];
be07f1a2
PA
393
394 sprintf (file, "/proc/%d/exe", pid);
214d508e 395 return elf_64_file_p (file, machine);
be07f1a2
PA
396}
397
fd000fb3
TBA
398void
399linux_process_target::delete_lwp (lwp_info *lwp)
bd99dc85 400{
fa96cb38
PA
401 struct thread_info *thr = get_lwp_thread (lwp);
402
c058728c 403 threads_debug_printf ("deleting %ld", lwpid_of (thr));
fa96cb38
PA
404
405 remove_thread (thr);
466eecee 406
fd000fb3 407 low_delete_thread (lwp->arch_private);
466eecee 408
013e3554 409 delete lwp;
bd99dc85
PA
410}
411
fd000fb3
TBA
412void
413linux_process_target::low_delete_thread (arch_lwp_info *info)
414{
415 /* Default implementation should be overridden if architecture-specific
416 info is being used. */
417 gdb_assert (info == nullptr);
418}
95954743 419
421490af
PA
420/* Open the /proc/PID/mem file for PROC. */
421
422static void
423open_proc_mem_file (process_info *proc)
424{
425 gdb_assert (proc->priv->mem_fd == -1);
426
427 char filename[64];
428 xsnprintf (filename, sizeof filename, "/proc/%d/mem", proc->pid);
429
430 proc->priv->mem_fd
431 = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
432}
433
fd000fb3 434process_info *
421490af 435linux_process_target::add_linux_process_no_mem_file (int pid, int attached)
95954743
PA
436{
437 struct process_info *proc;
438
95954743 439 proc = add_process (pid, attached);
8d749320 440 proc->priv = XCNEW (struct process_info_private);
95954743 441
fd000fb3 442 proc->priv->arch_private = low_new_process ();
421490af
PA
443 proc->priv->mem_fd = -1;
444
445 return proc;
446}
447
aa5ca48f 448
421490af
PA
449process_info *
450linux_process_target::add_linux_process (int pid, int attached)
451{
452 process_info *proc = add_linux_process_no_mem_file (pid, attached);
453 open_proc_mem_file (proc);
95954743
PA
454 return proc;
455}
456
f551c8ef
SM
457void
458linux_process_target::remove_linux_process (process_info *proc)
459{
460 if (proc->priv->mem_fd >= 0)
461 close (proc->priv->mem_fd);
462
463 this->low_delete_process (proc->priv->arch_private);
464
465 xfree (proc->priv);
466 proc->priv = nullptr;
467
468 remove_process (proc);
469}
470
fd000fb3
TBA
471arch_process_info *
472linux_process_target::low_new_process ()
473{
474 return nullptr;
475}
476
477void
478linux_process_target::low_delete_process (arch_process_info *info)
479{
480 /* Default implementation must be overridden if architecture-specific
481 info exists. */
482 gdb_assert (info == nullptr);
483}
484
485void
486linux_process_target::low_new_fork (process_info *parent, process_info *child)
487{
488 /* Nop. */
489}
490
797bcff5
TBA
491void
492linux_process_target::arch_setup_thread (thread_info *thread)
94585166 493{
24583e45
TBA
494 scoped_restore_current_thread restore_thread;
495 switch_to_thread (thread);
94585166 496
797bcff5 497 low_arch_setup ();
94585166
DB
498}
499
d16f3f6c
TBA
500int
501linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
502 int wstat)
24a09b5f 503{
c12a5089 504 client_state &cs = get_client_state ();
94585166 505 struct lwp_info *event_lwp = *orig_event_lwp;
89a5711c 506 int event = linux_ptrace_get_extended_event (wstat);
de0d863e 507 struct thread_info *event_thr = get_lwp_thread (event_lwp);
24a09b5f 508
183be222 509 gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
65706a29 510
82075af2
JS
511 /* All extended events we currently use are mid-syscall. Only
512 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
513 you have to be using PTRACE_SEIZE to get that. */
514 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
515
c269dbdb
DB
516 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
517 || (event == PTRACE_EVENT_CLONE))
24a09b5f
DJ
518 {
519 unsigned long new_pid;
05044653 520 int ret, status;
24a09b5f 521
de0d863e 522 /* Get the pid of the new lwp. */
d86d4aaf 523 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 524 &new_pid);
24a09b5f
DJ
525
526 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 527 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
528 {
529 /* The new child has a pending SIGSTOP. We can't affect it until it
530 hits the SIGSTOP, but we're already attached. */
531
97438e3f 532 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
533
534 if (ret == -1)
535 perror_with_name ("waiting for new child");
536 else if (ret != new_pid)
537 warning ("wait returned unexpected PID %d", ret);
da5898ce 538 else if (!WIFSTOPPED (status))
24a09b5f
DJ
539 warning ("wait returned unexpected status 0x%x", status);
540 }
541
393a6b59 542 if (debug_threads)
de0d863e 543 {
393a6b59
PA
544 debug_printf ("HEW: Got %s event from LWP %ld, new child is %ld\n",
545 (event == PTRACE_EVENT_FORK ? "fork"
546 : event == PTRACE_EVENT_VFORK ? "vfork"
547 : event == PTRACE_EVENT_CLONE ? "clone"
548 : "???"),
549 ptid_of (event_thr).lwp (),
550 new_pid);
551 }
552
553 ptid_t child_ptid = (event != PTRACE_EVENT_CLONE
554 ? ptid_t (new_pid, new_pid)
555 : ptid_t (ptid_of (event_thr).pid (), new_pid));
de0d863e 556
38065394
PA
557 process_info *child_proc = nullptr;
558
559 if (event != PTRACE_EVENT_CLONE)
560 {
561 /* Add the new process to the tables before we add the LWP.
562 We need to do this even if the new process will be
563 detached. See breakpoint cloning code further below. */
564 child_proc = add_linux_process (new_pid, 0);
565 }
566
393a6b59
PA
567 lwp_info *child_lwp = add_lwp (child_ptid);
568 gdb_assert (child_lwp != NULL);
569 child_lwp->stopped = 1;
570 if (event != PTRACE_EVENT_CLONE)
571 child_lwp->must_set_ptrace_flags = 1;
572 child_lwp->status_pending_p = 0;
de0d863e 573
393a6b59 574 thread_info *child_thr = get_lwp_thread (child_lwp);
de0d863e 575
393a6b59
PA
576 /* If we're suspending all threads, leave this one suspended
577 too. If the fork/clone parent is stepping over a breakpoint,
578 all other threads have been suspended already. Leave the
579 child suspended too. */
580 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
581 || event_lwp->bp_reinsert != 0)
582 {
583 threads_debug_printf ("leaving child suspended");
584 child_lwp->suspended = 1;
585 }
586
587 if (event_lwp->bp_reinsert != 0
588 && supports_software_single_step ()
589 && event == PTRACE_EVENT_VFORK)
590 {
591 /* If we leave single-step breakpoints there, child will
592 hit it, so uninsert single-step breakpoints from parent
593 (and child). Once vfork child is done, reinsert
594 them back to parent. */
595 uninsert_single_step_breakpoints (event_thr);
596 }
597
598 if (event != PTRACE_EVENT_CLONE)
599 {
38065394
PA
600 /* Clone the breakpoint lists of the parent. We need to do
601 this even if the new process will be detached, since we
602 will need the process object and the breakpoints to
603 remove any breakpoints from memory when we detach, and
604 the client side will access registers. */
de0d863e 605 gdb_assert (child_proc != NULL);
863d01bd 606
393a6b59 607 process_info *parent_proc = get_thread_process (event_thr);
de0d863e 608 child_proc->attached = parent_proc->attached;
2e7b624b 609
63c40ec7 610 clone_all_breakpoints (child_thr, event_thr);
de0d863e 611
51a948fd
AB
612 target_desc_up tdesc = allocate_target_description ();
613 copy_target_description (tdesc.get (), parent_proc->tdesc);
614 child_proc->tdesc = tdesc.release ();
de0d863e 615
3a8a0396 616 /* Clone arch-specific process data. */
fd000fb3 617 low_new_fork (parent_proc, child_proc);
393a6b59 618 }
3a8a0396 619
393a6b59
PA
620 /* Save fork/clone info in the parent thread. */
621 if (event == PTRACE_EVENT_FORK)
622 event_lwp->waitstatus.set_forked (child_ptid);
623 else if (event == PTRACE_EVENT_VFORK)
624 event_lwp->waitstatus.set_vforked (child_ptid);
625 else if (event == PTRACE_EVENT_CLONE
626 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
627 event_lwp->waitstatus.set_thread_cloned (child_ptid);
628
629 if (event != PTRACE_EVENT_CLONE
630 || (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
631 {
de0d863e 632 /* The status_pending field contains bits denoting the
393a6b59
PA
633 extended event, so when the pending event is handled, the
634 handler will look at lwp->waitstatus. */
de0d863e
DB
635 event_lwp->status_pending_p = 1;
636 event_lwp->status_pending = wstat;
637
393a6b59
PA
638 /* Link the threads until the parent's event is passed on to
639 GDB. */
640 event_lwp->relative = child_lwp;
641 child_lwp->relative = event_lwp;
de0d863e
DB
642 }
643
393a6b59
PA
644 /* If the parent thread is doing step-over with single-step
645 breakpoints, the list of single-step breakpoints are cloned
646 from the parent's. Remove them from the child process.
647 In case of vfork, we'll reinsert them back once vforked
648 child is done. */
649 if (event_lwp->bp_reinsert != 0
650 && supports_software_single_step ())
651 {
652 /* The child process is forked and stopped, so it is safe
653 to access its memory without stopping all other threads
654 from other processes. */
655 delete_single_step_breakpoints (child_thr);
e27d73f6 656
393a6b59
PA
657 gdb_assert (has_single_step_breakpoints (event_thr));
658 gdb_assert (!has_single_step_breakpoints (child_thr));
659 }
bde24c0a 660
da5898ce
DJ
661 /* Normally we will get the pending SIGSTOP. But in some cases
662 we might get another signal delivered to the group first.
f21cc1a2 663 If we do get another signal, be sure not to lose it. */
20ba1ce6 664 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 665 {
393a6b59
PA
666 child_lwp->stop_expected = 1;
667 child_lwp->status_pending_p = 1;
668 child_lwp->status_pending = status;
da5898ce 669 }
393a6b59 670 else if (event == PTRACE_EVENT_CLONE && cs.report_thread_events)
65706a29 671 {
393a6b59
PA
672 child_lwp->waitstatus.set_thread_created ();
673 child_lwp->status_pending_p = 1;
674 child_lwp->status_pending = status;
65706a29 675 }
de0d863e 676
393a6b59
PA
677 if (event == PTRACE_EVENT_CLONE)
678 {
a0aad537 679#ifdef USE_THREAD_DB
393a6b59 680 thread_db_notice_clone (event_thr, child_ptid);
a0aad537 681#endif
393a6b59 682 }
86299109 683
393a6b59
PA
684 if (event == PTRACE_EVENT_CLONE
685 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) == 0)
686 {
687 threads_debug_printf
688 ("not reporting clone event from LWP %ld, new child is %ld\n",
689 ptid_of (event_thr).lwp (),
690 new_pid);
691 return 1;
692 }
693
694 /* Leave the child stopped until GDB processes the parent
695 event. */
696 child_thr->last_resume_kind = resume_stop;
697 child_thr->last_status.set_stopped (GDB_SIGNAL_0);
698
699 /* Report the event. */
700 threads_debug_printf
701 ("reporting %s event from LWP %ld, new child is %ld\n",
702 (event == PTRACE_EVENT_FORK ? "fork"
703 : event == PTRACE_EVENT_VFORK ? "vfork"
704 : event == PTRACE_EVENT_CLONE ? "clone"
705 : "???"),
706 ptid_of (event_thr).lwp (),
707 new_pid);
708 return 0;
24a09b5f 709 }
c269dbdb
DB
710 else if (event == PTRACE_EVENT_VFORK_DONE)
711 {
183be222 712 event_lwp->waitstatus.set_vfork_done ();
c269dbdb 713
7582c77c 714 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
2e7b624b 715 {
3b9a79ef 716 reinsert_single_step_breakpoints (event_thr);
2e7b624b 717
3b9a79ef 718 gdb_assert (has_single_step_breakpoints (event_thr));
2e7b624b
YQ
719 }
720
c269dbdb
DB
721 /* Report the event. */
722 return 0;
723 }
c12a5089 724 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
94585166
DB
725 {
726 struct process_info *proc;
f27866ba 727 std::vector<int> syscalls_to_catch;
94585166
DB
728 ptid_t event_ptid;
729 pid_t event_pid;
730
c058728c
SM
731 threads_debug_printf ("Got exec event from LWP %ld",
732 lwpid_of (event_thr));
94585166
DB
733
734 /* Get the event ptid. */
735 event_ptid = ptid_of (event_thr);
e99b03dc 736 event_pid = event_ptid.pid ();
94585166 737
82075af2 738 /* Save the syscall list from the execing process. */
94585166 739 proc = get_thread_process (event_thr);
f27866ba 740 syscalls_to_catch = std::move (proc->syscalls_to_catch);
82075af2
JS
741
742 /* Delete the execing process and all its threads. */
d16f3f6c 743 mourn (proc);
24583e45 744 switch_to_thread (nullptr);
94585166
DB
745
746 /* Create a new process/lwp/thread. */
fd000fb3 747 proc = add_linux_process (event_pid, 0);
94585166
DB
748 event_lwp = add_lwp (event_ptid);
749 event_thr = get_lwp_thread (event_lwp);
750 gdb_assert (current_thread == event_thr);
797bcff5 751 arch_setup_thread (event_thr);
94585166
DB
752
753 /* Set the event status. */
183be222
SM
754 event_lwp->waitstatus.set_execd
755 (make_unique_xstrdup
756 (linux_proc_pid_to_exec_file (lwpid_of (event_thr))));
94585166
DB
757
758 /* Mark the exec status as pending. */
759 event_lwp->stopped = 1;
760 event_lwp->status_pending_p = 1;
761 event_lwp->status_pending = wstat;
762 event_thr->last_resume_kind = resume_continue;
183be222 763 event_thr->last_status.set_ignore ();
94585166 764
82075af2
JS
765 /* Update syscall state in the new lwp, effectively mid-syscall too. */
766 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
767
768 /* Restore the list to catch. Don't rely on the client, which is free
769 to avoid sending a new list when the architecture doesn't change.
770 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
f27866ba 771 proc->syscalls_to_catch = std::move (syscalls_to_catch);
82075af2 772
94585166
DB
773 /* Report the event. */
774 *orig_event_lwp = event_lwp;
775 return 0;
776 }
de0d863e 777
f34652de 778 internal_error (_("unknown ptrace event %d"), event);
24a09b5f
DJ
779}
780
df95181f
TBA
781CORE_ADDR
782linux_process_target::get_pc (lwp_info *lwp)
d50171e4 783{
a9deee17
PA
784 process_info *proc = get_thread_process (get_lwp_thread (lwp));
785 gdb_assert (!proc->starting_up);
d50171e4 786
bf9ae9d8 787 if (!low_supports_breakpoints ())
d50171e4
PA
788 return 0;
789
24583e45
TBA
790 scoped_restore_current_thread restore_thread;
791 switch_to_thread (get_lwp_thread (lwp));
d50171e4 792
a9deee17
PA
793 struct regcache *regcache = get_thread_regcache (current_thread, 1);
794 CORE_ADDR pc = low_get_pc (regcache);
d50171e4 795
c058728c 796 threads_debug_printf ("pc is 0x%lx", (long) pc);
d50171e4 797
d50171e4
PA
798 return pc;
799}
800
9eedd27d
TBA
801void
802linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
82075af2 803{
82075af2
JS
804 struct regcache *regcache;
805
24583e45
TBA
806 scoped_restore_current_thread restore_thread;
807 switch_to_thread (get_lwp_thread (lwp));
82075af2
JS
808
809 regcache = get_thread_regcache (current_thread, 1);
9eedd27d 810 low_get_syscall_trapinfo (regcache, sysno);
82075af2 811
c058728c 812 threads_debug_printf ("get_syscall_trapinfo sysno %d", *sysno);
82075af2
JS
813}
814
9eedd27d
TBA
815void
816linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
817{
818 /* By default, report an unknown system call number. */
819 *sysno = UNKNOWN_SYSCALL;
820}
821
df95181f
TBA
822bool
823linux_process_target::save_stop_reason (lwp_info *lwp)
0d62e5e8 824{
582511be
PA
825 CORE_ADDR pc;
826 CORE_ADDR sw_breakpoint_pc;
3e572f71
PA
827#if USE_SIGTRAP_SIGINFO
828 siginfo_t siginfo;
829#endif
d50171e4 830
bf9ae9d8 831 if (!low_supports_breakpoints ())
df95181f 832 return false;
0d62e5e8 833
a9deee17
PA
834 process_info *proc = get_thread_process (get_lwp_thread (lwp));
835 if (proc->starting_up)
836 {
837 /* Claim we have the stop PC so that the caller doesn't try to
838 fetch it itself. */
839 return true;
840 }
841
582511be 842 pc = get_pc (lwp);
d4807ea2 843 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
d50171e4 844
582511be 845 /* breakpoint_at reads from the current thread. */
24583e45
TBA
846 scoped_restore_current_thread restore_thread;
847 switch_to_thread (get_lwp_thread (lwp));
47c0c975 848
3e572f71
PA
849#if USE_SIGTRAP_SIGINFO
850 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
851 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
852 {
853 if (siginfo.si_signo == SIGTRAP)
854 {
e7ad2f14
PA
855 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
856 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 857 {
e7ad2f14
PA
858 /* The si_code is ambiguous on this arch -- check debug
859 registers. */
860 if (!check_stopped_by_watchpoint (lwp))
861 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
862 }
863 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
864 {
865 /* If we determine the LWP stopped for a SW breakpoint,
866 trust it. Particularly don't check watchpoint
867 registers, because at least on s390, we'd find
868 stopped-by-watchpoint as long as there's a watchpoint
869 set. */
3e572f71 870 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
3e572f71 871 }
e7ad2f14 872 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 873 {
e7ad2f14
PA
874 /* This can indicate either a hardware breakpoint or
875 hardware watchpoint. Check debug registers. */
876 if (!check_stopped_by_watchpoint (lwp))
877 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
3e572f71 878 }
2bf6fb9d
PA
879 else if (siginfo.si_code == TRAP_TRACE)
880 {
e7ad2f14
PA
881 /* We may have single stepped an instruction that
882 triggered a watchpoint. In that case, on some
883 architectures (such as x86), instead of TRAP_HWBKPT,
884 si_code indicates TRAP_TRACE, and we need to check
885 the debug registers separately. */
886 if (!check_stopped_by_watchpoint (lwp))
887 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
2bf6fb9d 888 }
3e572f71
PA
889 }
890 }
891#else
582511be
PA
892 /* We may have just stepped a breakpoint instruction. E.g., in
893 non-stop mode, GDB first tells the thread A to step a range, and
894 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
895 case we need to report the breakpoint PC. */
896 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
d7146cda 897 && low_breakpoint_at (sw_breakpoint_pc))
e7ad2f14
PA
898 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
899
900 if (hardware_breakpoint_inserted_here (pc))
901 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
902
903 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
904 check_stopped_by_watchpoint (lwp);
905#endif
906
907 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
582511be 908 {
c058728c
SM
909 threads_debug_printf
910 ("%s stopped by software breakpoint",
911 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
582511be
PA
912
913 /* Back up the PC if necessary. */
914 if (pc != sw_breakpoint_pc)
e7ad2f14 915 {
582511be
PA
916 struct regcache *regcache
917 = get_thread_regcache (current_thread, 1);
bf9ae9d8 918 low_set_pc (regcache, sw_breakpoint_pc);
582511be
PA
919 }
920
e7ad2f14
PA
921 /* Update this so we record the correct stop PC below. */
922 pc = sw_breakpoint_pc;
582511be 923 }
e7ad2f14 924 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
c058728c
SM
925 threads_debug_printf
926 ("%s stopped by hardware breakpoint",
927 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14 928 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
c058728c
SM
929 threads_debug_printf
930 ("%s stopped by hardware watchpoint",
931 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14 932 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
c058728c
SM
933 threads_debug_printf
934 ("%s stopped by trace",
935 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14
PA
936
937 lwp->stop_pc = pc;
df95181f 938 return true;
0d62e5e8 939}
ce3a066d 940
fd000fb3
TBA
941lwp_info *
942linux_process_target::add_lwp (ptid_t ptid)
611cb4a5 943{
c360a473 944 lwp_info *lwp = new lwp_info;
0d62e5e8 945
754e3168
AH
946 lwp->thread = add_thread (ptid, lwp);
947
fd000fb3 948 low_new_thread (lwp);
aa5ca48f 949
54a0b537 950 return lwp;
0d62e5e8 951}
611cb4a5 952
fd000fb3
TBA
953void
954linux_process_target::low_new_thread (lwp_info *info)
955{
956 /* Nop. */
957}
958
2090129c
SDJ
959/* Callback to be used when calling fork_inferior, responsible for
960 actually initiating the tracing of the inferior. */
961
962static void
963linux_ptrace_fun ()
964{
965 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
966 (PTRACE_TYPE_ARG4) 0) < 0)
50fa3001 967 trace_start_error_with_name ("ptrace");
2090129c
SDJ
968
969 if (setpgid (0, 0) < 0)
970 trace_start_error_with_name ("setpgid");
971
972 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
973 stdout to stderr so that inferior i/o doesn't corrupt the connection.
974 Also, redirect stdin to /dev/null. */
975 if (remote_connection_is_stdio ())
976 {
977 if (close (0) < 0)
978 trace_start_error_with_name ("close");
979 if (open ("/dev/null", O_RDONLY) < 0)
980 trace_start_error_with_name ("open");
981 if (dup2 (2, 1) < 0)
982 trace_start_error_with_name ("dup2");
983 if (write (2, "stdin/stdout redirected\n",
984 sizeof ("stdin/stdout redirected\n") - 1) < 0)
985 {
986 /* Errors ignored. */;
987 }
988 }
989}
990
da6d8c04 991/* Start an inferior process and returns its pid.
2090129c
SDJ
992 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
993 are its arguments. */
da6d8c04 994
15295543
TBA
995int
996linux_process_target::create_inferior (const char *program,
997 const std::vector<char *> &program_args)
da6d8c04 998{
c12a5089 999 client_state &cs = get_client_state ();
a6dbe5df 1000 struct lwp_info *new_lwp;
da6d8c04 1001 int pid;
95954743 1002 ptid_t ptid;
03583c20 1003
41272101
TT
1004 {
1005 maybe_disable_address_space_randomization restore_personality
c12a5089 1006 (cs.disable_randomization);
bea571eb 1007 std::string str_program_args = construct_inferior_arguments (program_args);
41272101
TT
1008
1009 pid = fork_inferior (program,
1010 str_program_args.c_str (),
1011 get_environ ()->envp (), linux_ptrace_fun,
1012 NULL, NULL, NULL, NULL);
1013 }
03583c20 1014
421490af
PA
1015 /* When spawning a new process, we can't open the mem file yet. We
1016 still have to nurse the process through the shell, and that execs
1017 a couple times. The address space a /proc/PID/mem file is
1018 accessing is destroyed on exec. */
1019 process_info *proc = add_linux_process_no_mem_file (pid, 0);
95954743 1020
184ea2f7 1021 ptid = ptid_t (pid, pid);
95954743 1022 new_lwp = add_lwp (ptid);
a6dbe5df 1023 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 1024
2090129c
SDJ
1025 post_fork_inferior (pid, program);
1026
421490af
PA
1027 /* PROC is now past the shell running the program we want, so we can
1028 open the /proc/PID/mem file. */
1029 open_proc_mem_file (proc);
1030
a9fa9f7d 1031 return pid;
da6d8c04
DJ
1032}
1033
ece66d65
JS
1034/* Implement the post_create_inferior target_ops method. */
1035
6dee9afb
TBA
1036void
1037linux_process_target::post_create_inferior ()
ece66d65
JS
1038{
1039 struct lwp_info *lwp = get_thread_lwp (current_thread);
1040
797bcff5 1041 low_arch_setup ();
ece66d65
JS
1042
1043 if (lwp->must_set_ptrace_flags)
1044 {
1045 struct process_info *proc = current_process ();
1046 int options = linux_low_ptrace_options (proc->attached);
1047
1048 linux_enable_event_reporting (lwpid_of (current_thread), options);
1049 lwp->must_set_ptrace_flags = 0;
1050 }
1051}
1052
7ae1a6a6 1053int
fd000fb3 1054linux_process_target::attach_lwp (ptid_t ptid)
da6d8c04 1055{
54a0b537 1056 struct lwp_info *new_lwp;
e38504b3 1057 int lwpid = ptid.lwp ();
611cb4a5 1058
b8e1b30e 1059 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 1060 != 0)
7ae1a6a6 1061 return errno;
24a09b5f 1062
b3312d80 1063 new_lwp = add_lwp (ptid);
0d62e5e8 1064
a6dbe5df
PA
1065 /* We need to wait for SIGSTOP before being able to make the next
1066 ptrace call on this LWP. */
1067 new_lwp->must_set_ptrace_flags = 1;
1068
644cebc9 1069 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2 1070 {
c058728c 1071 threads_debug_printf ("Attached to a stopped process");
c14d7ab2
PA
1072
1073 /* The process is definitely stopped. It is in a job control
1074 stop, unless the kernel predates the TASK_STOPPED /
1075 TASK_TRACED distinction, in which case it might be in a
1076 ptrace stop. Make sure it is in a ptrace stop; from there we
1077 can kill it, signal it, et cetera.
1078
1079 First make sure there is a pending SIGSTOP. Since we are
1080 already attached, the process can not transition from stopped
1081 to running without a PTRACE_CONT; so we know this signal will
1082 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1083 probably already in the queue (unless this kernel is old
1084 enough to use TASK_STOPPED for ptrace stops); but since
1085 SIGSTOP is not an RT signal, it can only be queued once. */
1086 kill_lwp (lwpid, SIGSTOP);
1087
1088 /* Finally, resume the stopped process. This will deliver the
1089 SIGSTOP (or a higher priority signal, just like normal
1090 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 1091 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
1092 }
1093
0d62e5e8 1094 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
1095 brings it to a halt.
1096
1097 There are several cases to consider here:
1098
1099 1) gdbserver has already attached to the process and is being notified
1b3f6016 1100 of a new thread that is being created.
d50171e4
PA
1101 In this case we should ignore that SIGSTOP and resume the
1102 process. This is handled below by setting stop_expected = 1,
8336d594 1103 and the fact that add_thread sets last_resume_kind ==
d50171e4 1104 resume_continue.
0e21c1ec
DE
1105
1106 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
1107 to it via attach_inferior.
1108 In this case we want the process thread to stop.
d50171e4
PA
1109 This is handled by having linux_attach set last_resume_kind ==
1110 resume_stop after we return.
e3deef73
LM
1111
1112 If the pid we are attaching to is also the tgid, we attach to and
1113 stop all the existing threads. Otherwise, we attach to pid and
1114 ignore any other threads in the same group as this pid.
0e21c1ec
DE
1115
1116 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
1117 existing threads.
1118 In this case we want the thread to stop.
1119 FIXME: This case is currently not properly handled.
1120 We should wait for the SIGSTOP but don't. Things work apparently
1121 because enough time passes between when we ptrace (ATTACH) and when
1122 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
1123
1124 On the other hand, if we are currently trying to stop all threads, we
1125 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 1126 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
1127 end of the list, and so the new thread has not yet reached
1128 wait_for_sigstop (but will). */
d50171e4 1129 new_lwp->stop_expected = 1;
0d62e5e8 1130
7ae1a6a6 1131 return 0;
95954743
PA
1132}
1133
8784d563
PA
1134/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1135 already attached. Returns true if a new LWP is found, false
1136 otherwise. */
1137
1138static int
1139attach_proc_task_lwp_callback (ptid_t ptid)
1140{
1141 /* Is this a new thread? */
1142 if (find_thread_ptid (ptid) == NULL)
1143 {
e38504b3 1144 int lwpid = ptid.lwp ();
8784d563
PA
1145 int err;
1146
c058728c 1147 threads_debug_printf ("Found new lwp %d", lwpid);
8784d563 1148
fd000fb3 1149 err = the_linux_target->attach_lwp (ptid);
8784d563
PA
1150
1151 /* Be quiet if we simply raced with the thread exiting. EPERM
1152 is returned if the thread's task still exists, and is marked
1153 as exited or zombie, as well as other conditions, so in that
1154 case, confirm the status in /proc/PID/status. */
1155 if (err == ESRCH
1156 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
c058728c
SM
1157 threads_debug_printf
1158 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1159 lwpid, err, safe_strerror (err));
8784d563
PA
1160 else if (err != 0)
1161 {
4d9b86e1 1162 std::string reason
50fa3001 1163 = linux_ptrace_attach_fail_reason_string (ptid, err);
4d9b86e1 1164
c6f7f9c8 1165 error (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
8784d563
PA
1166 }
1167
1168 return 1;
1169 }
1170 return 0;
1171}
1172
500c1d85
PA
1173static void async_file_mark (void);
1174
e3deef73
LM
1175/* Attach to PID. If PID is the tgid, attach to it and all
1176 of its threads. */
1177
ef03dad8
TBA
1178int
1179linux_process_target::attach (unsigned long pid)
0d62e5e8 1180{
500c1d85
PA
1181 struct process_info *proc;
1182 struct thread_info *initial_thread;
184ea2f7 1183 ptid_t ptid = ptid_t (pid, pid);
7ae1a6a6
PA
1184 int err;
1185
421490af
PA
1186 /* Delay opening the /proc/PID/mem file until we've successfully
1187 attached. */
1188 proc = add_linux_process_no_mem_file (pid, 1);
df0da8a2 1189
e3deef73
LM
1190 /* Attach to PID. We will check for other threads
1191 soon. */
fd000fb3 1192 err = attach_lwp (ptid);
7ae1a6a6 1193 if (err != 0)
4d9b86e1 1194 {
f551c8ef 1195 this->remove_linux_process (proc);
4d9b86e1 1196
50fa3001
SDJ
1197 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1198 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
4d9b86e1 1199 }
7ae1a6a6 1200
421490af
PA
1201 open_proc_mem_file (proc);
1202
500c1d85
PA
1203 /* Don't ignore the initial SIGSTOP if we just attached to this
1204 process. It will be collected by wait shortly. */
184ea2f7 1205 initial_thread = find_thread_ptid (ptid_t (pid, pid));
59487af3 1206 gdb_assert (initial_thread != nullptr);
500c1d85 1207 initial_thread->last_resume_kind = resume_stop;
0d62e5e8 1208
8784d563
PA
1209 /* We must attach to every LWP. If /proc is mounted, use that to
1210 find them now. On the one hand, the inferior may be using raw
1211 clone instead of using pthreads. On the other hand, even if it
1212 is using pthreads, GDB may not be connected yet (thread_db needs
1213 to do symbol lookups, through qSymbol). Also, thread_db walks
1214 structures in the inferior's address space to find the list of
1215 threads/LWPs, and those structures may well be corrupted. Note
1216 that once thread_db is loaded, we'll still use it to list threads
1217 and associate pthread info with each LWP. */
c6f7f9c8
TT
1218 try
1219 {
1220 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1221 }
1222 catch (const gdb_exception_error &)
1223 {
1224 /* Make sure we do not deliver the SIGSTOP to the process. */
1225 initial_thread->last_resume_kind = resume_continue;
1226
1227 this->detach (proc);
1228 throw;
1229 }
500c1d85
PA
1230
1231 /* GDB will shortly read the xml target description for this
1232 process, to figure out the process' architecture. But the target
1233 description is only filled in when the first process/thread in
1234 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1235 that now, otherwise, if GDB is fast enough, it could read the
1236 target description _before_ that initial stop. */
1237 if (non_stop)
1238 {
1239 struct lwp_info *lwp;
1240 int wstat, lwpid;
f2907e49 1241 ptid_t pid_ptid = ptid_t (pid);
500c1d85 1242
d16f3f6c 1243 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
500c1d85
PA
1244 gdb_assert (lwpid > 0);
1245
f2907e49 1246 lwp = find_lwp_pid (ptid_t (lwpid));
59487af3 1247 gdb_assert (lwp != nullptr);
500c1d85
PA
1248
1249 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1250 {
1251 lwp->status_pending_p = 1;
1252 lwp->status_pending = wstat;
1253 }
1254
1255 initial_thread->last_resume_kind = resume_continue;
1256
1257 async_file_mark ();
1258
1259 gdb_assert (proc->tdesc != NULL);
1260 }
1261
95954743
PA
1262 return 0;
1263}
1264
95954743 1265static int
e4eb0dec 1266last_thread_of_process_p (int pid)
95954743 1267{
e4eb0dec 1268 bool seen_one = false;
95954743 1269
da4ae14a 1270 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
95954743 1271 {
e4eb0dec
SM
1272 if (!seen_one)
1273 {
1274 /* This is the first thread of this process we see. */
1275 seen_one = true;
1276 return false;
1277 }
1278 else
1279 {
1280 /* This is the second thread of this process we see. */
1281 return true;
1282 }
1283 });
da6d8c04 1284
e4eb0dec 1285 return thread == NULL;
95954743
PA
1286}
1287
da84f473
PA
1288/* Kill LWP. */
1289
1290static void
1291linux_kill_one_lwp (struct lwp_info *lwp)
1292{
d86d4aaf
DE
1293 struct thread_info *thr = get_lwp_thread (lwp);
1294 int pid = lwpid_of (thr);
da84f473
PA
1295
1296 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1297 there is no signal context, and ptrace(PTRACE_KILL) (or
1298 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1299 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1300 alternative is to kill with SIGKILL. We only need one SIGKILL
1301 per process, not one for each thread. But since we still support
4a6ed09b
PA
1302 support debugging programs using raw clone without CLONE_THREAD,
1303 we send one for each thread. For years, we used PTRACE_KILL
1304 only, so we're being a bit paranoid about some old kernels where
1305 PTRACE_KILL might work better (dubious if there are any such, but
1306 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1307 second, and so we're fine everywhere. */
da84f473
PA
1308
1309 errno = 0;
69ff6be5 1310 kill_lwp (pid, SIGKILL);
da84f473 1311 if (debug_threads)
ce9e3fe7
PA
1312 {
1313 int save_errno = errno;
1314
c058728c
SM
1315 threads_debug_printf ("kill_lwp (SIGKILL) %s, 0, 0 (%s)",
1316 target_pid_to_str (ptid_of (thr)).c_str (),
1317 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1318 }
da84f473
PA
1319
1320 errno = 0;
b8e1b30e 1321 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 1322 if (debug_threads)
ce9e3fe7
PA
1323 {
1324 int save_errno = errno;
1325
c058728c
SM
1326 threads_debug_printf ("PTRACE_KILL %s, 0, 0 (%s)",
1327 target_pid_to_str (ptid_of (thr)).c_str (),
1328 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1329 }
da84f473
PA
1330}
1331
e76126e8
PA
1332/* Kill LWP and wait for it to die. */
1333
1334static void
1335kill_wait_lwp (struct lwp_info *lwp)
1336{
1337 struct thread_info *thr = get_lwp_thread (lwp);
e99b03dc 1338 int pid = ptid_of (thr).pid ();
e38504b3 1339 int lwpid = ptid_of (thr).lwp ();
e76126e8
PA
1340 int wstat;
1341 int res;
1342
c058728c 1343 threads_debug_printf ("killing lwp %d, for pid: %d", lwpid, pid);
e76126e8
PA
1344
1345 do
1346 {
1347 linux_kill_one_lwp (lwp);
1348
1349 /* Make sure it died. Notes:
1350
1351 - The loop is most likely unnecessary.
1352
d16f3f6c 1353 - We don't use wait_for_event as that could delete lwps
e76126e8
PA
1354 while we're iterating over them. We're not interested in
1355 any pending status at this point, only in making sure all
1356 wait status on the kernel side are collected until the
1357 process is reaped.
1358
1359 - We don't use __WALL here as the __WALL emulation relies on
1360 SIGCHLD, and killing a stopped process doesn't generate
1361 one, nor an exit status.
1362 */
1363 res = my_waitpid (lwpid, &wstat, 0);
1364 if (res == -1 && errno == ECHILD)
1365 res = my_waitpid (lwpid, &wstat, __WCLONE);
1366 } while (res > 0 && WIFSTOPPED (wstat));
1367
586b02a9
PA
1368 /* Even if it was stopped, the child may have already disappeared.
1369 E.g., if it was killed by SIGKILL. */
1370 if (res < 0 && errno != ECHILD)
1371 perror_with_name ("kill_wait_lwp");
e76126e8
PA
1372}
1373
578290ec 1374/* Callback for `for_each_thread'. Kills an lwp of a given process,
da84f473 1375 except the leader. */
95954743 1376
578290ec
SM
1377static void
1378kill_one_lwp_callback (thread_info *thread, int pid)
da6d8c04 1379{
54a0b537 1380 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 1381
fd500816
DJ
1382 /* We avoid killing the first thread here, because of a Linux kernel (at
1383 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1384 the children get a chance to be reaped, it will remain a zombie
1385 forever. */
95954743 1386
d86d4aaf 1387 if (lwpid_of (thread) == pid)
95954743 1388 {
c058728c
SM
1389 threads_debug_printf ("is last of process %s",
1390 target_pid_to_str (thread->id).c_str ());
578290ec 1391 return;
95954743 1392 }
fd500816 1393
e76126e8 1394 kill_wait_lwp (lwp);
da6d8c04
DJ
1395}
1396
c6885a57
TBA
1397int
1398linux_process_target::kill (process_info *process)
0d62e5e8 1399{
a780ef4f 1400 int pid = process->pid;
9d606399 1401
f9e39928
PA
1402 /* If we're killing a running inferior, make sure it is stopped
1403 first, as PTRACE_KILL will not work otherwise. */
7984d532 1404 stop_all_lwps (0, NULL);
f9e39928 1405
578290ec
SM
1406 for_each_thread (pid, [&] (thread_info *thread)
1407 {
1408 kill_one_lwp_callback (thread, pid);
1409 });
fd500816 1410
54a0b537 1411 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1412 thread in the list, so do so now. */
a780ef4f 1413 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
bd99dc85 1414
784867a5 1415 if (lwp == NULL)
c058728c 1416 threads_debug_printf ("cannot find lwp for pid: %d", pid);
784867a5 1417 else
e76126e8 1418 kill_wait_lwp (lwp);
2d717e4f 1419
8adb37b9 1420 mourn (process);
f9e39928
PA
1421
1422 /* Since we presently can only stop all lwps of all processes, we
1423 need to unstop lwps of other processes. */
7984d532 1424 unstop_all_lwps (0, NULL);
95954743 1425 return 0;
0d62e5e8
DJ
1426}
1427
9b224c5e
PA
1428/* Get pending signal of THREAD, for detaching purposes. This is the
1429 signal the thread last stopped for, which we need to deliver to the
1430 thread when detaching, otherwise, it'd be suppressed/lost. */
1431
1432static int
1433get_detach_signal (struct thread_info *thread)
1434{
c12a5089 1435 client_state &cs = get_client_state ();
a493e3e2 1436 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1437 int status;
1438 struct lwp_info *lp = get_thread_lwp (thread);
1439
1440 if (lp->status_pending_p)
1441 status = lp->status_pending;
1442 else
1443 {
1444 /* If the thread had been suspended by gdbserver, and it stopped
1445 cleanly, then it'll have stopped with SIGSTOP. But we don't
1446 want to deliver that SIGSTOP. */
183be222
SM
1447 if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1448 || thread->last_status.sig () == GDB_SIGNAL_0)
9b224c5e
PA
1449 return 0;
1450
1451 /* Otherwise, we may need to deliver the signal we
1452 intercepted. */
1453 status = lp->last_status;
1454 }
1455
1456 if (!WIFSTOPPED (status))
1457 {
c058728c
SM
1458 threads_debug_printf ("lwp %s hasn't stopped: no pending signal",
1459 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e
PA
1460 return 0;
1461 }
1462
1463 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1464 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e 1465 {
c058728c
SM
1466 threads_debug_printf ("lwp %s had stopped with extended "
1467 "status: no pending signal",
1468 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e
PA
1469 return 0;
1470 }
1471
2ea28649 1472 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e 1473
c12a5089 1474 if (cs.program_signals_p && !cs.program_signals[signo])
9b224c5e 1475 {
c058728c
SM
1476 threads_debug_printf ("lwp %s had signal %s, but it is in nopass state",
1477 target_pid_to_str (ptid_of (thread)).c_str (),
1478 gdb_signal_to_string (signo));
9b224c5e
PA
1479 return 0;
1480 }
c12a5089 1481 else if (!cs.program_signals_p
9b224c5e
PA
1482 /* If we have no way to know which signals GDB does not
1483 want to have passed to the program, assume
1484 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1485 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e 1486 {
c058728c
SM
1487 threads_debug_printf ("lwp %s had signal %s, "
1488 "but we don't know if we should pass it. "
1489 "Default to not.",
1490 target_pid_to_str (ptid_of (thread)).c_str (),
1491 gdb_signal_to_string (signo));
9b224c5e
PA
1492 return 0;
1493 }
1494 else
1495 {
c058728c
SM
1496 threads_debug_printf ("lwp %s has pending signal %s: delivering it",
1497 target_pid_to_str (ptid_of (thread)).c_str (),
1498 gdb_signal_to_string (signo));
9b224c5e
PA
1499
1500 return WSTOPSIG (status);
1501 }
1502}
1503
fd000fb3
TBA
1504void
1505linux_process_target::detach_one_lwp (lwp_info *lwp)
6ad8ae5c 1506{
ced2dffb 1507 struct thread_info *thread = get_lwp_thread (lwp);
9b224c5e 1508 int sig;
ced2dffb 1509 int lwpid;
6ad8ae5c 1510
9b224c5e 1511 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1512 if (lwp->stop_expected)
ae13219e 1513 {
c058728c
SM
1514 threads_debug_printf ("Sending SIGCONT to %s",
1515 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e 1516
d86d4aaf 1517 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1518 lwp->stop_expected = 0;
ae13219e
DJ
1519 }
1520
9b224c5e
PA
1521 /* Pass on any pending signal for this thread. */
1522 sig = get_detach_signal (thread);
1523
ced2dffb
PA
1524 /* Preparing to resume may try to write registers, and fail if the
1525 lwp is zombie. If that happens, ignore the error. We'll handle
1526 it below, when detach fails with ESRCH. */
a70b8144 1527 try
ced2dffb
PA
1528 {
1529 /* Flush any pending changes to the process's registers. */
1530 regcache_invalidate_thread (thread);
1531
1532 /* Finally, let it resume. */
d7599cc0 1533 low_prepare_to_resume (lwp);
ced2dffb 1534 }
230d2906 1535 catch (const gdb_exception_error &ex)
ced2dffb
PA
1536 {
1537 if (!check_ptrace_stopped_lwp_gone (lwp))
eedc3f4f 1538 throw;
ced2dffb 1539 }
ced2dffb
PA
1540
1541 lwpid = lwpid_of (thread);
1542 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1543 (PTRACE_TYPE_ARG4) (long) sig) < 0)
ced2dffb
PA
1544 {
1545 int save_errno = errno;
1546
1547 /* We know the thread exists, so ESRCH must mean the lwp is
1548 zombie. This can happen if one of the already-detached
1549 threads exits the whole thread group. In that case we're
1550 still attached, and must reap the lwp. */
1551 if (save_errno == ESRCH)
1552 {
1553 int ret, status;
1554
1555 ret = my_waitpid (lwpid, &status, __WALL);
1556 if (ret == -1)
1557 {
1558 warning (_("Couldn't reap LWP %d while detaching: %s"),
6d91ce9a 1559 lwpid, safe_strerror (errno));
ced2dffb
PA
1560 }
1561 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1562 {
1563 warning (_("Reaping LWP %d while detaching "
1564 "returned unexpected status 0x%x"),
1565 lwpid, status);
1566 }
1567 }
1568 else
1569 {
1570 error (_("Can't detach %s: %s"),
61d7f128 1571 target_pid_to_str (ptid_of (thread)).c_str (),
6d91ce9a 1572 safe_strerror (save_errno));
ced2dffb
PA
1573 }
1574 }
c058728c
SM
1575 else
1576 threads_debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)",
1577 target_pid_to_str (ptid_of (thread)).c_str (),
1578 strsignal (sig));
bd99dc85
PA
1579
1580 delete_lwp (lwp);
ced2dffb
PA
1581}
1582
9061c9cf
TBA
1583int
1584linux_process_target::detach (process_info *process)
95954743 1585{
ced2dffb 1586 struct lwp_info *main_lwp;
95954743 1587
863d01bd
PA
1588 /* As there's a step over already in progress, let it finish first,
1589 otherwise nesting a stabilize_threads operation on top gets real
1590 messy. */
1591 complete_ongoing_step_over ();
1592
f9e39928 1593 /* Stop all threads before detaching. First, ptrace requires that
30baf67b 1594 the thread is stopped to successfully detach. Second, thread_db
f9e39928
PA
1595 may need to uninstall thread event breakpoints from memory, which
1596 only works with a stopped process anyway. */
7984d532 1597 stop_all_lwps (0, NULL);
f9e39928 1598
ca5c370d 1599#ifdef USE_THREAD_DB
8336d594 1600 thread_db_detach (process);
ca5c370d
PA
1601#endif
1602
fa593d66 1603 /* Stabilize threads (move out of jump pads). */
5c9eb2f2 1604 target_stabilize_threads ();
fa593d66 1605
ced2dffb
PA
1606 /* Detach from the clone lwps first. If the thread group exits just
1607 while we're detaching, we must reap the clone lwps before we're
1608 able to reap the leader. */
fd000fb3
TBA
1609 for_each_thread (process->pid, [this] (thread_info *thread)
1610 {
1611 /* We don't actually detach from the thread group leader just yet.
1612 If the thread group exits, we must reap the zombie clone lwps
1613 before we're able to reap the leader. */
1614 if (thread->id.pid () == thread->id.lwp ())
1615 return;
1616
1617 lwp_info *lwp = get_thread_lwp (thread);
1618 detach_one_lwp (lwp);
1619 });
ced2dffb 1620
ef2ddb33 1621 main_lwp = find_lwp_pid (ptid_t (process->pid));
59487af3 1622 gdb_assert (main_lwp != nullptr);
fd000fb3 1623 detach_one_lwp (main_lwp);
8336d594 1624
8adb37b9 1625 mourn (process);
f9e39928
PA
1626
1627 /* Since we presently can only stop all lwps of all processes, we
1628 need to unstop lwps of other processes. */
7984d532 1629 unstop_all_lwps (0, NULL);
f9e39928
PA
1630 return 0;
1631}
1632
1633/* Remove all LWPs that belong to process PROC from the lwp list. */
1634
8adb37b9
TBA
1635void
1636linux_process_target::mourn (process_info *process)
8336d594 1637{
8336d594
PA
1638#ifdef USE_THREAD_DB
1639 thread_db_mourn (process);
1640#endif
1641
fd000fb3 1642 for_each_thread (process->pid, [this] (thread_info *thread)
6b2a85da
SM
1643 {
1644 delete_lwp (get_thread_lwp (thread));
1645 });
f9e39928 1646
f551c8ef 1647 this->remove_linux_process (process);
8336d594
PA
1648}
1649
95a49a39
TBA
1650void
1651linux_process_target::join (int pid)
444d6139 1652{
444d6139
PA
1653 int status, ret;
1654
1655 do {
d105de22 1656 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1657 if (WIFEXITED (status) || WIFSIGNALED (status))
1658 break;
1659 } while (ret != -1 || errno != ECHILD);
1660}
1661
13d3d99b
TBA
1662/* Return true if the given thread is still alive. */
1663
1664bool
1665linux_process_target::thread_alive (ptid_t ptid)
0d62e5e8 1666{
95954743
PA
1667 struct lwp_info *lwp = find_lwp_pid (ptid);
1668
1669 /* We assume we always know if a thread exits. If a whole process
1670 exited but we still haven't been able to report it to GDB, we'll
1671 hold on to the last lwp of the dead process. */
1672 if (lwp != NULL)
00db26fa 1673 return !lwp_is_marked_dead (lwp);
0d62e5e8
DJ
1674 else
1675 return 0;
1676}
1677
df95181f
TBA
1678bool
1679linux_process_target::thread_still_has_status_pending (thread_info *thread)
582511be
PA
1680{
1681 struct lwp_info *lp = get_thread_lwp (thread);
1682
1683 if (!lp->status_pending_p)
1684 return 0;
1685
582511be 1686 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1687 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1688 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be 1689 {
582511be
PA
1690 CORE_ADDR pc;
1691 int discard = 0;
1692
1693 gdb_assert (lp->last_status != 0);
1694
1695 pc = get_pc (lp);
1696
24583e45
TBA
1697 scoped_restore_current_thread restore_thread;
1698 switch_to_thread (thread);
582511be
PA
1699
1700 if (pc != lp->stop_pc)
1701 {
c058728c
SM
1702 threads_debug_printf ("PC of %ld changed",
1703 lwpid_of (thread));
582511be
PA
1704 discard = 1;
1705 }
3e572f71
PA
1706
1707#if !USE_SIGTRAP_SIGINFO
15c66dd6 1708 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
d7146cda 1709 && !low_breakpoint_at (pc))
582511be 1710 {
c058728c
SM
1711 threads_debug_printf ("previous SW breakpoint of %ld gone",
1712 lwpid_of (thread));
582511be
PA
1713 discard = 1;
1714 }
15c66dd6 1715 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1716 && !hardware_breakpoint_inserted_here (pc))
1717 {
c058728c
SM
1718 threads_debug_printf ("previous HW breakpoint of %ld gone",
1719 lwpid_of (thread));
582511be
PA
1720 discard = 1;
1721 }
3e572f71 1722#endif
582511be 1723
582511be
PA
1724 if (discard)
1725 {
c058728c 1726 threads_debug_printf ("discarding pending breakpoint status");
582511be
PA
1727 lp->status_pending_p = 0;
1728 return 0;
1729 }
1730 }
1731
1732 return 1;
1733}
1734
a681f9c9
PA
1735/* Returns true if LWP is resumed from the client's perspective. */
1736
1737static int
1738lwp_resumed (struct lwp_info *lwp)
1739{
1740 struct thread_info *thread = get_lwp_thread (lwp);
1741
1742 if (thread->last_resume_kind != resume_stop)
1743 return 1;
1744
1745 /* Did gdb send us a `vCont;t', but we haven't reported the
1746 corresponding stop to gdb yet? If so, the thread is still
1747 resumed/running from gdb's perspective. */
1748 if (thread->last_resume_kind == resume_stop
183be222 1749 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
a681f9c9
PA
1750 return 1;
1751
1752 return 0;
1753}
1754
df95181f
TBA
1755bool
1756linux_process_target::status_pending_p_callback (thread_info *thread,
1757 ptid_t ptid)
0d62e5e8 1758{
582511be 1759 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1760
1761 /* Check if we're only interested in events from a specific process
afa8d396 1762 or a specific LWP. */
83e1b6c1 1763 if (!thread->id.matches (ptid))
95954743 1764 return 0;
0d62e5e8 1765
a681f9c9
PA
1766 if (!lwp_resumed (lp))
1767 return 0;
1768
582511be 1769 if (lp->status_pending_p
df95181f 1770 && !thread_still_has_status_pending (thread))
582511be 1771 {
df95181f 1772 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
582511be
PA
1773 return 0;
1774 }
0d62e5e8 1775
582511be 1776 return lp->status_pending_p;
0d62e5e8
DJ
1777}
1778
95954743
PA
1779struct lwp_info *
1780find_lwp_pid (ptid_t ptid)
1781{
d4895ba2
SM
1782 long lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1783 thread_info *thread = find_thread ([lwp] (thread_info *thr_arg)
454296a2 1784 {
da4ae14a 1785 return thr_arg->id.lwp () == lwp;
454296a2 1786 });
d86d4aaf
DE
1787
1788 if (thread == NULL)
1789 return NULL;
1790
9c80ecd6 1791 return get_thread_lwp (thread);
95954743
PA
1792}
1793
fa96cb38 1794/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1795
fa96cb38
PA
1796static int
1797num_lwps (int pid)
1798{
fa96cb38 1799 int count = 0;
0d62e5e8 1800
4d3bb80e
SM
1801 for_each_thread (pid, [&] (thread_info *thread)
1802 {
9c80ecd6 1803 count++;
4d3bb80e 1804 });
3aee8918 1805
fa96cb38
PA
1806 return count;
1807}
d61ddec4 1808
6d4ee8c6
GB
1809/* See nat/linux-nat.h. */
1810
1811struct lwp_info *
1812iterate_over_lwps (ptid_t filter,
d3a70e03 1813 gdb::function_view<iterate_over_lwps_ftype> callback)
6d4ee8c6 1814{
da4ae14a 1815 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
6d1e5673 1816 {
da4ae14a 1817 lwp_info *lwp = get_thread_lwp (thr_arg);
6d1e5673 1818
d3a70e03 1819 return callback (lwp);
6d1e5673 1820 });
6d4ee8c6 1821
9c80ecd6 1822 if (thread == NULL)
6d4ee8c6
GB
1823 return NULL;
1824
9c80ecd6 1825 return get_thread_lwp (thread);
6d4ee8c6
GB
1826}
1827
e8a625d1 1828bool
fd000fb3 1829linux_process_target::check_zombie_leaders ()
fa96cb38 1830{
e8a625d1
PA
1831 bool new_pending_event = false;
1832
1833 for_each_process ([&] (process_info *proc)
aa40a989
PA
1834 {
1835 pid_t leader_pid = pid_of (proc);
1836 lwp_info *leader_lp = find_lwp_pid (ptid_t (leader_pid));
1837
1838 threads_debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1839 "num_lwps=%d, zombie=%d",
1840 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1841 linux_proc_pid_is_zombie (leader_pid));
1842
1843 if (leader_lp != NULL && !leader_lp->stopped
1844 /* Check if there are other threads in the group, as we may
8a841a35
PA
1845 have raced with the inferior simply exiting. Note this
1846 isn't a watertight check. If the inferior is
1847 multi-threaded and is exiting, it may be we see the
1848 leader as zombie before we reap all the non-leader
1849 threads. See comments below. */
aa40a989
PA
1850 && !last_thread_of_process_p (leader_pid)
1851 && linux_proc_pid_is_zombie (leader_pid))
1852 {
8a841a35
PA
1853 /* A zombie leader in a multi-threaded program can mean one
1854 of three things:
1855
1856 #1 - Only the leader exited, not the whole program, e.g.,
1857 with pthread_exit. Since we can't reap the leader's exit
1858 status until all other threads are gone and reaped too,
1859 we want to delete the zombie leader right away, as it
1860 can't be debugged, we can't read its registers, etc.
1861 This is the main reason we check for zombie leaders
1862 disappearing.
1863
1864 #2 - The whole thread-group/process exited (a group exit,
1865 via e.g. exit(3), and there is (or will be shortly) an
1866 exit reported for each thread in the process, and then
1867 finally an exit for the leader once the non-leaders are
1868 reaped.
1869
1870 #3 - There are 3 or more threads in the group, and a
1871 thread other than the leader exec'd. See comments on
1872 exec events at the top of the file.
1873
1874 Ideally we would never delete the leader for case #2.
1875 Instead, we want to collect the exit status of each
1876 non-leader thread, and then finally collect the exit
1877 status of the leader as normal and use its exit code as
1878 whole-process exit code. Unfortunately, there's no
1879 race-free way to distinguish cases #1 and #2. We can't
1880 assume the exit events for the non-leaders threads are
1881 already pending in the kernel, nor can we assume the
1882 non-leader threads are in zombie state already. Between
1883 the leader becoming zombie and the non-leaders exiting
1884 and becoming zombie themselves, there's a small time
1885 window, so such a check would be racy. Temporarily
1886 pausing all threads and checking to see if all threads
1887 exit or not before re-resuming them would work in the
1888 case that all threads are running right now, but it
1889 wouldn't work if some thread is currently already
1890 ptrace-stopped, e.g., due to scheduler-locking.
1891
1892 So what we do is we delete the leader anyhow, and then
1893 later on when we see its exit status, we re-add it back.
1894 We also make sure that we only report a whole-process
1895 exit when we see the leader exiting, as opposed to when
1896 the last LWP in the LWP list exits, which can be a
1897 non-leader if we deleted the leader here. */
aa40a989 1898 threads_debug_printf ("Thread group leader %d zombie "
8a841a35
PA
1899 "(it exited, or another thread execd), "
1900 "deleting it.",
aa40a989 1901 leader_pid);
e8a625d1
PA
1902
1903 thread_info *leader_thread = get_lwp_thread (leader_lp);
1904 if (report_exit_events_for (leader_thread))
1905 {
1906 mark_lwp_dead (leader_lp, W_EXITCODE (0, 0), true);
1907 new_pending_event = true;
1908 }
1909 else
1910 delete_lwp (leader_lp);
aa40a989 1911 }
9179355e 1912 });
e8a625d1
PA
1913
1914 return new_pending_event;
fa96cb38 1915}
c3adc08c 1916
a1385b7b
SM
1917/* Callback for `find_thread'. Returns the first LWP that is not
1918 stopped. */
d50171e4 1919
a1385b7b
SM
1920static bool
1921not_stopped_callback (thread_info *thread, ptid_t filter)
fa96cb38 1922{
a1385b7b
SM
1923 if (!thread->id.matches (filter))
1924 return false;
47c0c975 1925
a1385b7b 1926 lwp_info *lwp = get_thread_lwp (thread);
fa96cb38 1927
a1385b7b 1928 return !lwp->stopped;
0d62e5e8 1929}
611cb4a5 1930
863d01bd
PA
1931/* Increment LWP's suspend count. */
1932
1933static void
1934lwp_suspended_inc (struct lwp_info *lwp)
1935{
1936 lwp->suspended++;
1937
c058728c
SM
1938 if (lwp->suspended > 4)
1939 threads_debug_printf
1940 ("LWP %ld has a suspiciously high suspend count, suspended=%d",
1941 lwpid_of (get_lwp_thread (lwp)), lwp->suspended);
863d01bd
PA
1942}
1943
1944/* Decrement LWP's suspend count. */
1945
1946static void
1947lwp_suspended_decr (struct lwp_info *lwp)
1948{
1949 lwp->suspended--;
1950
1951 if (lwp->suspended < 0)
1952 {
1953 struct thread_info *thread = get_lwp_thread (lwp);
1954
f34652de 1955 internal_error ("unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
863d01bd
PA
1956 lwp->suspended);
1957 }
1958}
1959
219f2f23
PA
1960/* This function should only be called if the LWP got a SIGTRAP.
1961
1962 Handle any tracepoint steps or hits. Return true if a tracepoint
1963 event was handled, 0 otherwise. */
1964
1965static int
1966handle_tracepoints (struct lwp_info *lwp)
1967{
1968 struct thread_info *tinfo = get_lwp_thread (lwp);
1969 int tpoint_related_event = 0;
1970
582511be
PA
1971 gdb_assert (lwp->suspended == 0);
1972
7984d532
PA
1973 /* If this tracepoint hit causes a tracing stop, we'll immediately
1974 uninsert tracepoints. To do this, we temporarily pause all
1975 threads, unpatch away, and then unpause threads. We need to make
1976 sure the unpausing doesn't resume LWP too. */
863d01bd 1977 lwp_suspended_inc (lwp);
7984d532 1978
219f2f23
PA
1979 /* And we need to be sure that any all-threads-stopping doesn't try
1980 to move threads out of the jump pads, as it could deadlock the
1981 inferior (LWP could be in the jump pad, maybe even holding the
1982 lock.) */
1983
1984 /* Do any necessary step collect actions. */
1985 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1986
fa593d66
PA
1987 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1988
219f2f23
PA
1989 /* See if we just hit a tracepoint and do its main collect
1990 actions. */
1991 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1992
863d01bd 1993 lwp_suspended_decr (lwp);
7984d532
PA
1994
1995 gdb_assert (lwp->suspended == 0);
229d26fc
SM
1996 gdb_assert (!stabilizing_threads
1997 || (lwp->collecting_fast_tracepoint
1998 != fast_tpoint_collect_result::not_collecting));
7984d532 1999
219f2f23
PA
2000 if (tpoint_related_event)
2001 {
c058728c 2002 threads_debug_printf ("got a tracepoint event");
219f2f23
PA
2003 return 1;
2004 }
2005
2006 return 0;
2007}
2008
13e567af
TBA
2009fast_tpoint_collect_result
2010linux_process_target::linux_fast_tracepoint_collecting
2011 (lwp_info *lwp, fast_tpoint_collect_status *status)
fa593d66
PA
2012{
2013 CORE_ADDR thread_area;
d86d4aaf 2014 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66 2015
fa593d66
PA
2016 /* Get the thread area address. This is used to recognize which
2017 thread is which when tracing with the in-process agent library.
2018 We don't read anything from the address, and treat it as opaque;
2019 it's the address itself that we assume is unique per-thread. */
13e567af 2020 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
229d26fc 2021 return fast_tpoint_collect_result::not_collecting;
fa593d66
PA
2022
2023 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2024}
2025
13e567af
TBA
2026int
2027linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
2028{
2029 return -1;
2030}
2031
d16f3f6c
TBA
2032bool
2033linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
fa593d66 2034{
24583e45
TBA
2035 scoped_restore_current_thread restore_thread;
2036 switch_to_thread (get_lwp_thread (lwp));
fa593d66
PA
2037
2038 if ((wstat == NULL
2039 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2040 && supports_fast_tracepoints ()
58b4daa5 2041 && agent_loaded_p ())
fa593d66
PA
2042 {
2043 struct fast_tpoint_collect_status status;
fa593d66 2044
c058728c
SM
2045 threads_debug_printf
2046 ("Checking whether LWP %ld needs to move out of the jump pad.",
2047 lwpid_of (current_thread));
fa593d66 2048
229d26fc
SM
2049 fast_tpoint_collect_result r
2050 = linux_fast_tracepoint_collecting (lwp, &status);
fa593d66
PA
2051
2052 if (wstat == NULL
2053 || (WSTOPSIG (*wstat) != SIGILL
2054 && WSTOPSIG (*wstat) != SIGFPE
2055 && WSTOPSIG (*wstat) != SIGSEGV
2056 && WSTOPSIG (*wstat) != SIGBUS))
2057 {
2058 lwp->collecting_fast_tracepoint = r;
2059
229d26fc 2060 if (r != fast_tpoint_collect_result::not_collecting)
fa593d66 2061 {
229d26fc
SM
2062 if (r == fast_tpoint_collect_result::before_insn
2063 && lwp->exit_jump_pad_bkpt == NULL)
fa593d66
PA
2064 {
2065 /* Haven't executed the original instruction yet.
2066 Set breakpoint there, and wait till it's hit,
2067 then single-step until exiting the jump pad. */
2068 lwp->exit_jump_pad_bkpt
2069 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2070 }
2071
c058728c
SM
2072 threads_debug_printf
2073 ("Checking whether LWP %ld needs to move out of the jump pad..."
2074 " it does", lwpid_of (current_thread));
fa593d66 2075
d16f3f6c 2076 return true;
fa593d66
PA
2077 }
2078 }
2079 else
2080 {
2081 /* If we get a synchronous signal while collecting, *and*
2082 while executing the (relocated) original instruction,
2083 reset the PC to point at the tpoint address, before
2084 reporting to GDB. Otherwise, it's an IPA lib bug: just
2085 report the signal to GDB, and pray for the best. */
2086
229d26fc
SM
2087 lwp->collecting_fast_tracepoint
2088 = fast_tpoint_collect_result::not_collecting;
fa593d66 2089
229d26fc 2090 if (r != fast_tpoint_collect_result::not_collecting
fa593d66
PA
2091 && (status.adjusted_insn_addr <= lwp->stop_pc
2092 && lwp->stop_pc < status.adjusted_insn_addr_end))
2093 {
2094 siginfo_t info;
2095 struct regcache *regcache;
2096
2097 /* The si_addr on a few signals references the address
2098 of the faulting instruction. Adjust that as
2099 well. */
2100 if ((WSTOPSIG (*wstat) == SIGILL
2101 || WSTOPSIG (*wstat) == SIGFPE
2102 || WSTOPSIG (*wstat) == SIGBUS
2103 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 2104 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2105 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
2106 /* Final check just to make sure we don't clobber
2107 the siginfo of non-kernel-sent signals. */
2108 && (uintptr_t) info.si_addr == lwp->stop_pc)
2109 {
2110 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 2111 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2112 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
2113 }
2114
0bfdf32f 2115 regcache = get_thread_regcache (current_thread, 1);
bf9ae9d8 2116 low_set_pc (regcache, status.tpoint_addr);
fa593d66
PA
2117 lwp->stop_pc = status.tpoint_addr;
2118
2119 /* Cancel any fast tracepoint lock this thread was
2120 holding. */
2121 force_unlock_trace_buffer ();
2122 }
2123
2124 if (lwp->exit_jump_pad_bkpt != NULL)
2125 {
c058728c
SM
2126 threads_debug_printf
2127 ("Cancelling fast exit-jump-pad: removing bkpt."
2128 "stopping all threads momentarily.");
fa593d66
PA
2129
2130 stop_all_lwps (1, lwp);
fa593d66
PA
2131
2132 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2133 lwp->exit_jump_pad_bkpt = NULL;
2134
2135 unstop_all_lwps (1, lwp);
2136
2137 gdb_assert (lwp->suspended >= 0);
2138 }
2139 }
2140 }
2141
c058728c
SM
2142 threads_debug_printf
2143 ("Checking whether LWP %ld needs to move out of the jump pad... no",
2144 lwpid_of (current_thread));
0cccb683 2145
d16f3f6c 2146 return false;
fa593d66
PA
2147}
2148
2149/* Enqueue one signal in the "signals to report later when out of the
2150 jump pad" list. */
2151
2152static void
2153enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2154{
d86d4aaf 2155 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66 2156
c058728c
SM
2157 threads_debug_printf ("Deferring signal %d for LWP %ld.",
2158 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2159
2160 if (debug_threads)
2161 {
013e3554 2162 for (const auto &sig : lwp->pending_signals_to_report)
c058728c 2163 threads_debug_printf (" Already queued %d", sig.signal);
fa593d66 2164
c058728c 2165 threads_debug_printf (" (no more currently queued signals)");
fa593d66
PA
2166 }
2167
1a981360
PA
2168 /* Don't enqueue non-RT signals if they are already in the deferred
2169 queue. (SIGSTOP being the easiest signal to see ending up here
2170 twice) */
2171 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2172 {
013e3554 2173 for (const auto &sig : lwp->pending_signals_to_report)
1a981360 2174 {
013e3554 2175 if (sig.signal == WSTOPSIG (*wstat))
1a981360 2176 {
c058728c
SM
2177 threads_debug_printf
2178 ("Not requeuing already queued non-RT signal %d for LWP %ld",
2179 sig.signal, lwpid_of (thread));
1a981360
PA
2180 return;
2181 }
2182 }
2183 }
2184
013e3554 2185 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
8d749320 2186
d86d4aaf 2187 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554 2188 &lwp->pending_signals_to_report.back ().info);
fa593d66
PA
2189}
2190
2191/* Dequeue one signal from the "signals to report later when out of
2192 the jump pad" list. */
2193
2194static int
2195dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2196{
d86d4aaf
DE
2197 struct thread_info *thread = get_lwp_thread (lwp);
2198
013e3554 2199 if (!lwp->pending_signals_to_report.empty ())
fa593d66 2200 {
013e3554 2201 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
fa593d66 2202
013e3554
TBA
2203 *wstat = W_STOPCODE (p_sig.signal);
2204 if (p_sig.info.si_signo != 0)
d86d4aaf 2205 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554
TBA
2206 &p_sig.info);
2207
2208 lwp->pending_signals_to_report.pop_front ();
fa593d66 2209
c058728c
SM
2210 threads_debug_printf ("Reporting deferred signal %d for LWP %ld.",
2211 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2212
2213 if (debug_threads)
2214 {
013e3554 2215 for (const auto &sig : lwp->pending_signals_to_report)
c058728c 2216 threads_debug_printf (" Still queued %d", sig.signal);
fa593d66 2217
c058728c 2218 threads_debug_printf (" (no more queued signals)");
fa593d66
PA
2219 }
2220
2221 return 1;
2222 }
2223
2224 return 0;
2225}
2226
ac1bbaca
TBA
2227bool
2228linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
582511be 2229{
24583e45
TBA
2230 scoped_restore_current_thread restore_thread;
2231 switch_to_thread (get_lwp_thread (child));
d50171e4 2232
ac1bbaca
TBA
2233 if (low_stopped_by_watchpoint ())
2234 {
2235 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2236 child->stopped_data_address = low_stopped_data_address ();
2237 }
582511be 2238
ac1bbaca
TBA
2239 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2240}
d50171e4 2241
ac1bbaca
TBA
2242bool
2243linux_process_target::low_stopped_by_watchpoint ()
2244{
2245 return false;
2246}
d50171e4 2247
ac1bbaca
TBA
2248CORE_ADDR
2249linux_process_target::low_stopped_data_address ()
2250{
2251 return 0;
c4d9ceb6
YQ
2252}
2253
de0d863e
DB
2254/* Return the ptrace options that we want to try to enable. */
2255
2256static int
2257linux_low_ptrace_options (int attached)
2258{
c12a5089 2259 client_state &cs = get_client_state ();
de0d863e
DB
2260 int options = 0;
2261
2262 if (!attached)
2263 options |= PTRACE_O_EXITKILL;
2264
c12a5089 2265 if (cs.report_fork_events)
de0d863e
DB
2266 options |= PTRACE_O_TRACEFORK;
2267
c12a5089 2268 if (cs.report_vfork_events)
c269dbdb
DB
2269 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2270
c12a5089 2271 if (cs.report_exec_events)
94585166
DB
2272 options |= PTRACE_O_TRACEEXEC;
2273
82075af2
JS
2274 options |= PTRACE_O_TRACESYSGOOD;
2275
de0d863e
DB
2276 return options;
2277}
2278
1a48f002 2279void
d16f3f6c 2280linux_process_target::filter_event (int lwpid, int wstat)
fa96cb38
PA
2281{
2282 struct lwp_info *child;
2283 struct thread_info *thread;
582511be 2284 int have_stop_pc = 0;
fa96cb38 2285
f2907e49 2286 child = find_lwp_pid (ptid_t (lwpid));
fa96cb38 2287
5406bc3f
PA
2288 /* Check for events reported by anything not in our LWP list. */
2289 if (child == nullptr)
94585166 2290 {
5406bc3f
PA
2291 if (WIFSTOPPED (wstat))
2292 {
2293 if (WSTOPSIG (wstat) == SIGTRAP
2294 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2295 {
2296 /* A non-leader thread exec'ed after we've seen the
2297 leader zombie, and removed it from our lists (in
2298 check_zombie_leaders). The non-leader thread changes
2299 its tid to the tgid. */
2300 threads_debug_printf
2301 ("Re-adding thread group leader LWP %d after exec.",
2302 lwpid);
94585166 2303
5406bc3f
PA
2304 child = add_lwp (ptid_t (lwpid, lwpid));
2305 child->stopped = 1;
2306 switch_to_thread (child->thread);
2307 }
2308 else
2309 {
2310 /* A process we are controlling has forked and the new
2311 child's stop was reported to us by the kernel. Save
2312 its PID and go back to waiting for the fork event to
2313 be reported - the stopped process might be returned
2314 from waitpid before or after the fork event is. */
2315 threads_debug_printf
2316 ("Saving LWP %d status %s in stopped_pids list",
2317 lwpid, status_to_str (wstat).c_str ());
2318 add_to_pid_list (&stopped_pids, lwpid, wstat);
2319 }
2320 }
2321 else
2322 {
2323 /* Don't report an event for the exit of an LWP not in our
2324 list, i.e. not part of any inferior we're debugging.
2325 This can happen if we detach from a program we originally
8a841a35
PA
2326 forked and then it exits. However, note that we may have
2327 earlier deleted a leader of an inferior we're debugging,
2328 in check_zombie_leaders. Re-add it back here if so. */
2329 find_process ([&] (process_info *proc)
2330 {
2331 if (proc->pid == lwpid)
2332 {
2333 threads_debug_printf
2334 ("Re-adding thread group leader LWP %d after exit.",
2335 lwpid);
2336
2337 child = add_lwp (ptid_t (lwpid, lwpid));
2338 return true;
2339 }
2340 return false;
2341 });
5406bc3f 2342 }
94585166 2343
5406bc3f
PA
2344 if (child == nullptr)
2345 return;
fa96cb38 2346 }
fa96cb38
PA
2347
2348 thread = get_lwp_thread (child);
2349
2350 child->stopped = 1;
2351
2352 child->last_status = wstat;
2353
582511be
PA
2354 /* Check if the thread has exited. */
2355 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2356 {
c058728c 2357 threads_debug_printf ("%d exited", lwpid);
f50bf8e5
YQ
2358
2359 if (finish_step_over (child))
2360 {
2361 /* Unsuspend all other LWPs, and set them back running again. */
2362 unsuspend_all_lwps (child);
2363 }
2364
8a841a35
PA
2365 /* If this is not the leader LWP, then the exit signal was not
2366 the end of the debugged application and should be ignored,
2367 unless GDB wants to hear about thread exits. */
48989498 2368 if (report_exit_events_for (thread) || is_leader (thread))
582511be 2369 {
65706a29
PA
2370 /* Since events are serialized to GDB core, and we can't
2371 report this one right now. Leave the status pending for
2372 the next time we're able to report it. */
e8a625d1 2373 mark_lwp_dead (child, wstat, false);
1a48f002 2374 return;
582511be
PA
2375 }
2376 else
2377 {
65706a29 2378 delete_lwp (child);
1a48f002 2379 return;
582511be
PA
2380 }
2381 }
2382
2383 gdb_assert (WIFSTOPPED (wstat));
2384
fa96cb38
PA
2385 if (WIFSTOPPED (wstat))
2386 {
2387 struct process_info *proc;
2388
c06cbd92 2389 /* Architecture-specific setup after inferior is running. */
fa96cb38 2390 proc = find_process_pid (pid_of (thread));
c06cbd92 2391 if (proc->tdesc == NULL)
fa96cb38 2392 {
c06cbd92
YQ
2393 if (proc->attached)
2394 {
c06cbd92
YQ
2395 /* This needs to happen after we have attached to the
2396 inferior and it is stopped for the first time, but
2397 before we access any inferior registers. */
797bcff5 2398 arch_setup_thread (thread);
c06cbd92
YQ
2399 }
2400 else
2401 {
2402 /* The process is started, but GDBserver will do
2403 architecture-specific setup after the program stops at
2404 the first instruction. */
2405 child->status_pending_p = 1;
2406 child->status_pending = wstat;
1a48f002 2407 return;
c06cbd92 2408 }
fa96cb38
PA
2409 }
2410 }
2411
fa96cb38
PA
2412 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2413 {
beed38b8 2414 struct process_info *proc = find_process_pid (pid_of (thread));
de0d863e 2415 int options = linux_low_ptrace_options (proc->attached);
beed38b8 2416
de0d863e 2417 linux_enable_event_reporting (lwpid, options);
fa96cb38
PA
2418 child->must_set_ptrace_flags = 0;
2419 }
2420
82075af2
JS
2421 /* Always update syscall_state, even if it will be filtered later. */
2422 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2423 {
2424 child->syscall_state
2425 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2426 ? TARGET_WAITKIND_SYSCALL_RETURN
2427 : TARGET_WAITKIND_SYSCALL_ENTRY);
2428 }
2429 else
2430 {
2431 /* Almost all other ptrace-stops are known to be outside of system
2432 calls, with further exceptions in handle_extended_wait. */
2433 child->syscall_state = TARGET_WAITKIND_IGNORE;
2434 }
2435
e7ad2f14
PA
2436 /* Be careful to not overwrite stop_pc until save_stop_reason is
2437 called. */
fa96cb38 2438 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2439 && linux_is_extended_waitstatus (wstat))
fa96cb38 2440 {
582511be 2441 child->stop_pc = get_pc (child);
94585166 2442 if (handle_extended_wait (&child, wstat))
de0d863e
DB
2443 {
2444 /* The event has been handled, so just return without
2445 reporting it. */
1a48f002 2446 return;
de0d863e 2447 }
fa96cb38
PA
2448 }
2449
80aea927 2450 if (linux_wstatus_maybe_breakpoint (wstat))
582511be 2451 {
e7ad2f14 2452 if (save_stop_reason (child))
582511be
PA
2453 have_stop_pc = 1;
2454 }
2455
2456 if (!have_stop_pc)
2457 child->stop_pc = get_pc (child);
2458
fa96cb38
PA
2459 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2460 && child->stop_expected)
2461 {
c058728c
SM
2462 threads_debug_printf ("Expected stop.");
2463
fa96cb38
PA
2464 child->stop_expected = 0;
2465
2466 if (thread->last_resume_kind == resume_stop)
2467 {
2468 /* We want to report the stop to the core. Treat the
2469 SIGSTOP as a normal event. */
c058728c
SM
2470 threads_debug_printf ("resume_stop SIGSTOP caught for %s.",
2471 target_pid_to_str (ptid_of (thread)).c_str ());
fa96cb38
PA
2472 }
2473 else if (stopping_threads != NOT_STOPPING_THREADS)
2474 {
2475 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2476 pending. */
c058728c
SM
2477 threads_debug_printf ("SIGSTOP caught for %s while stopping threads.",
2478 target_pid_to_str (ptid_of (thread)).c_str ());
1a48f002 2479 return;
fa96cb38
PA
2480 }
2481 else
2482 {
2bf6fb9d 2483 /* This is a delayed SIGSTOP. Filter out the event. */
c058728c 2484 threads_debug_printf ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2bf6fb9d 2485 child->stepping ? "step" : "continue",
61d7f128 2486 target_pid_to_str (ptid_of (thread)).c_str ());
2bf6fb9d 2487
df95181f 2488 resume_one_lwp (child, child->stepping, 0, NULL);
1a48f002 2489 return;
fa96cb38
PA
2490 }
2491 }
2492
582511be
PA
2493 child->status_pending_p = 1;
2494 child->status_pending = wstat;
1a48f002 2495 return;
fa96cb38
PA
2496}
2497
b31cdfa6
TBA
2498bool
2499linux_process_target::maybe_hw_step (thread_info *thread)
f79b145d 2500{
b31cdfa6
TBA
2501 if (supports_hardware_single_step ())
2502 return true;
f79b145d
YQ
2503 else
2504 {
3b9a79ef 2505 /* GDBserver must insert single-step breakpoint for software
f79b145d 2506 single step. */
3b9a79ef 2507 gdb_assert (has_single_step_breakpoints (thread));
b31cdfa6 2508 return false;
f79b145d
YQ
2509 }
2510}
2511
df95181f
TBA
2512void
2513linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
20ba1ce6 2514{
20ba1ce6
PA
2515 struct lwp_info *lp = get_thread_lwp (thread);
2516
2517 if (lp->stopped
863d01bd 2518 && !lp->suspended
20ba1ce6 2519 && !lp->status_pending_p
183be222 2520 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
20ba1ce6 2521 {
8901d193
YQ
2522 int step = 0;
2523
2524 if (thread->last_resume_kind == resume_step)
b6d8d612
KB
2525 {
2526 if (supports_software_single_step ())
2527 install_software_single_step_breakpoints (lp);
2528
2529 step = maybe_hw_step (thread);
2530 }
20ba1ce6 2531
c058728c
SM
2532 threads_debug_printf ("resuming stopped-resumed LWP %s at %s: step=%d",
2533 target_pid_to_str (ptid_of (thread)).c_str (),
2534 paddress (lp->stop_pc), step);
20ba1ce6 2535
df95181f 2536 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
20ba1ce6
PA
2537 }
2538}
2539
d16f3f6c
TBA
2540int
2541linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2542 ptid_t filter_ptid,
2543 int *wstatp, int options)
0d62e5e8 2544{
d86d4aaf 2545 struct thread_info *event_thread;
d50171e4 2546 struct lwp_info *event_child, *requested_child;
fa96cb38 2547 sigset_t block_mask, prev_mask;
d50171e4 2548
fa96cb38 2549 retry:
d86d4aaf
DE
2550 /* N.B. event_thread points to the thread_info struct that contains
2551 event_child. Keep them in sync. */
2552 event_thread = NULL;
d50171e4
PA
2553 event_child = NULL;
2554 requested_child = NULL;
0d62e5e8 2555
95954743 2556 /* Check for a lwp with a pending status. */
bd99dc85 2557
d7e15655 2558 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
0d62e5e8 2559 {
83e1b6c1
SM
2560 event_thread = find_thread_in_random ([&] (thread_info *thread)
2561 {
2562 return status_pending_p_callback (thread, filter_ptid);
2563 });
2564
d86d4aaf 2565 if (event_thread != NULL)
c058728c
SM
2566 {
2567 event_child = get_thread_lwp (event_thread);
2568 threads_debug_printf ("Got a pending child %ld", lwpid_of (event_thread));
2569 }
0d62e5e8 2570 }
d7e15655 2571 else if (filter_ptid != null_ptid)
0d62e5e8 2572 {
fa96cb38 2573 requested_child = find_lwp_pid (filter_ptid);
59487af3 2574 gdb_assert (requested_child != nullptr);
d50171e4 2575
bde24c0a 2576 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66 2577 && requested_child->status_pending_p
229d26fc
SM
2578 && (requested_child->collecting_fast_tracepoint
2579 != fast_tpoint_collect_result::not_collecting))
fa593d66
PA
2580 {
2581 enqueue_one_deferred_signal (requested_child,
2582 &requested_child->status_pending);
2583 requested_child->status_pending_p = 0;
2584 requested_child->status_pending = 0;
df95181f 2585 resume_one_lwp (requested_child, 0, 0, NULL);
fa593d66
PA
2586 }
2587
2588 if (requested_child->suspended
2589 && requested_child->status_pending_p)
38e08fca 2590 {
f34652de 2591 internal_error ("requesting an event out of a"
38e08fca
GB
2592 " suspended child?");
2593 }
fa593d66 2594
d50171e4 2595 if (requested_child->status_pending_p)
d86d4aaf
DE
2596 {
2597 event_child = requested_child;
2598 event_thread = get_lwp_thread (event_child);
2599 }
0d62e5e8 2600 }
611cb4a5 2601
0d62e5e8
DJ
2602 if (event_child != NULL)
2603 {
c058728c
SM
2604 threads_debug_printf ("Got an event from pending child %ld (%04x)",
2605 lwpid_of (event_thread),
2606 event_child->status_pending);
2607
fa96cb38 2608 *wstatp = event_child->status_pending;
bd99dc85
PA
2609 event_child->status_pending_p = 0;
2610 event_child->status_pending = 0;
24583e45 2611 switch_to_thread (event_thread);
d86d4aaf 2612 return lwpid_of (event_thread);
0d62e5e8
DJ
2613 }
2614
fa96cb38
PA
2615 /* But if we don't find a pending event, we'll have to wait.
2616
2617 We only enter this loop if no process has a pending wait status.
2618 Thus any action taken in response to a wait status inside this
2619 loop is responding as soon as we detect the status, not after any
2620 pending events. */
d8301ad1 2621
fa96cb38
PA
2622 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2623 all signals while here. */
2624 sigfillset (&block_mask);
21987b9c 2625 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
fa96cb38 2626
582511be
PA
2627 /* Always pull all events out of the kernel. We'll randomly select
2628 an event LWP out of all that have events, to prevent
2629 starvation. */
fa96cb38 2630 while (event_child == NULL)
0d62e5e8 2631 {
fa96cb38 2632 pid_t ret = 0;
0d62e5e8 2633
fa96cb38
PA
2634 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2635 quirks:
0d62e5e8 2636
fa96cb38
PA
2637 - If the thread group leader exits while other threads in the
2638 thread group still exist, waitpid(TGID, ...) hangs. That
2639 waitpid won't return an exit status until the other threads
2640 in the group are reaped.
611cb4a5 2641
fa96cb38
PA
2642 - When a non-leader thread execs, that thread just vanishes
2643 without reporting an exit (so we'd hang if we waited for it
2644 explicitly in that case). The exec event is reported to
94585166 2645 the TGID pid. */
fa96cb38
PA
2646 errno = 0;
2647 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2648
c058728c
SM
2649 threads_debug_printf ("waitpid(-1, ...) returned %d, %s",
2650 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
0d62e5e8 2651
fa96cb38 2652 if (ret > 0)
0d62e5e8 2653 {
c058728c
SM
2654 threads_debug_printf ("waitpid %ld received %s",
2655 (long) ret, status_to_str (*wstatp).c_str ());
89be2091 2656
582511be
PA
2657 /* Filter all events. IOW, leave all events pending. We'll
2658 randomly select an event LWP out of all that have events
2659 below. */
d16f3f6c 2660 filter_event (ret, *wstatp);
fa96cb38
PA
2661 /* Retry until nothing comes out of waitpid. A single
2662 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2663 continue;
2664 }
2665
20ba1ce6
PA
2666 /* Now that we've pulled all events out of the kernel, resume
2667 LWPs that don't have an interesting event to report. */
2668 if (stopping_threads == NOT_STOPPING_THREADS)
df95181f
TBA
2669 for_each_thread ([this] (thread_info *thread)
2670 {
2671 resume_stopped_resumed_lwps (thread);
2672 });
20ba1ce6
PA
2673
2674 /* ... and find an LWP with a status to report to the core, if
2675 any. */
83e1b6c1
SM
2676 event_thread = find_thread_in_random ([&] (thread_info *thread)
2677 {
2678 return status_pending_p_callback (thread, filter_ptid);
2679 });
2680
582511be
PA
2681 if (event_thread != NULL)
2682 {
2683 event_child = get_thread_lwp (event_thread);
2684 *wstatp = event_child->status_pending;
2685 event_child->status_pending_p = 0;
2686 event_child->status_pending = 0;
2687 break;
2688 }
2689
fa96cb38
PA
2690 /* Check for zombie thread group leaders. Those can't be reaped
2691 until all other threads in the thread group are. */
e8a625d1
PA
2692 if (check_zombie_leaders ())
2693 goto retry;
fa96cb38 2694
a1385b7b
SM
2695 auto not_stopped = [&] (thread_info *thread)
2696 {
2697 return not_stopped_callback (thread, wait_ptid);
2698 };
2699
fa96cb38
PA
2700 /* If there are no resumed children left in the set of LWPs we
2701 want to wait for, bail. We can't just block in
2702 waitpid/sigsuspend, because lwps might have been left stopped
2703 in trace-stop state, and we'd be stuck forever waiting for
2704 their status to change (which would only happen if we resumed
2705 them). Even if WNOHANG is set, this return code is preferred
2706 over 0 (below), as it is more detailed. */
a1385b7b 2707 if (find_thread (not_stopped) == NULL)
a6dbe5df 2708 {
c058728c
SM
2709 threads_debug_printf ("exit (no unwaited-for LWP)");
2710
21987b9c 2711 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2712 return -1;
a6dbe5df
PA
2713 }
2714
fa96cb38
PA
2715 /* No interesting event to report to the caller. */
2716 if ((options & WNOHANG))
24a09b5f 2717 {
c058728c 2718 threads_debug_printf ("WNOHANG set, no event found");
fa96cb38 2719
21987b9c 2720 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2721 return 0;
24a09b5f
DJ
2722 }
2723
fa96cb38 2724 /* Block until we get an event reported with SIGCHLD. */
c058728c 2725 threads_debug_printf ("sigsuspend'ing");
d50171e4 2726
fa96cb38 2727 sigsuspend (&prev_mask);
21987b9c 2728 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38
PA
2729 goto retry;
2730 }
d50171e4 2731
21987b9c 2732 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2733
24583e45 2734 switch_to_thread (event_thread);
d50171e4 2735
fa96cb38
PA
2736 return lwpid_of (event_thread);
2737}
2738
d16f3f6c
TBA
2739int
2740linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
fa96cb38 2741{
d16f3f6c 2742 return wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2743}
2744
6bf5e0ba
PA
2745/* Select one LWP out of those that have events pending. */
2746
2747static void
2748select_event_lwp (struct lwp_info **orig_lp)
2749{
582511be
PA
2750 struct thread_info *event_thread = NULL;
2751
2752 /* In all-stop, give preference to the LWP that is being
2753 single-stepped. There will be at most one, and it's the LWP that
2754 the core is most interested in. If we didn't do this, then we'd
2755 have to handle pending step SIGTRAPs somehow in case the core
2756 later continues the previously-stepped thread, otherwise we'd
2757 report the pending SIGTRAP, and the core, not having stepped the
2758 thread, wouldn't understand what the trap was for, and therefore
2759 would report it to the user as a random signal. */
2760 if (!non_stop)
6bf5e0ba 2761 {
39a64da5
SM
2762 event_thread = find_thread ([] (thread_info *thread)
2763 {
2764 lwp_info *lp = get_thread_lwp (thread);
2765
183be222 2766 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
39a64da5
SM
2767 && thread->last_resume_kind == resume_step
2768 && lp->status_pending_p);
2769 });
2770
582511be 2771 if (event_thread != NULL)
c058728c
SM
2772 threads_debug_printf
2773 ("Select single-step %s",
2774 target_pid_to_str (ptid_of (event_thread)).c_str ());
6bf5e0ba 2775 }
582511be 2776 if (event_thread == NULL)
6bf5e0ba
PA
2777 {
2778 /* No single-stepping LWP. Select one at random, out of those
dda83cd7 2779 which have had events. */
6bf5e0ba 2780
b0319eaa 2781 event_thread = find_thread_in_random ([&] (thread_info *thread)
39a64da5
SM
2782 {
2783 lwp_info *lp = get_thread_lwp (thread);
2784
b0319eaa 2785 /* Only resumed LWPs that have an event pending. */
183be222 2786 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
b0319eaa 2787 && lp->status_pending_p);
39a64da5 2788 });
6bf5e0ba
PA
2789 }
2790
d86d4aaf 2791 if (event_thread != NULL)
6bf5e0ba 2792 {
d86d4aaf
DE
2793 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2794
6bf5e0ba
PA
2795 /* Switch the event LWP. */
2796 *orig_lp = event_lp;
2797 }
2798}
2799
7984d532
PA
2800/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2801 NULL. */
2802
2803static void
2804unsuspend_all_lwps (struct lwp_info *except)
2805{
139720c5
SM
2806 for_each_thread ([&] (thread_info *thread)
2807 {
2808 lwp_info *lwp = get_thread_lwp (thread);
2809
2810 if (lwp != except)
2811 lwp_suspended_decr (lwp);
2812 });
7984d532
PA
2813}
2814
5a6b0a41 2815static bool lwp_running (thread_info *thread);
fa593d66
PA
2816
2817/* Stabilize threads (move out of jump pads).
2818
2819 If a thread is midway collecting a fast tracepoint, we need to
2820 finish the collection and move it out of the jump pad before
2821 reporting the signal.
2822
2823 This avoids recursion while collecting (when a signal arrives
2824 midway, and the signal handler itself collects), which would trash
2825 the trace buffer. In case the user set a breakpoint in a signal
2826 handler, this avoids the backtrace showing the jump pad, etc..
2827 Most importantly, there are certain things we can't do safely if
2828 threads are stopped in a jump pad (or in its callee's). For
2829 example:
2830
2831 - starting a new trace run. A thread still collecting the
2832 previous run, could trash the trace buffer when resumed. The trace
2833 buffer control structures would have been reset but the thread had
2834 no way to tell. The thread could even midway memcpy'ing to the
2835 buffer, which would mean that when resumed, it would clobber the
2836 trace buffer that had been set for a new run.
2837
2838 - we can't rewrite/reuse the jump pads for new tracepoints
2839 safely. Say you do tstart while a thread is stopped midway while
2840 collecting. When the thread is later resumed, it finishes the
2841 collection, and returns to the jump pad, to execute the original
2842 instruction that was under the tracepoint jump at the time the
2843 older run had been started. If the jump pad had been rewritten
2844 since for something else in the new run, the thread would now
2845 execute the wrong / random instructions. */
2846
5c9eb2f2
TBA
2847void
2848linux_process_target::stabilize_threads ()
fa593d66 2849{
13e567af
TBA
2850 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2851 {
2852 return stuck_in_jump_pad (thread);
2853 });
fa593d66 2854
d86d4aaf 2855 if (thread_stuck != NULL)
fa593d66 2856 {
c058728c
SM
2857 threads_debug_printf ("can't stabilize, LWP %ld is stuck in jump pad",
2858 lwpid_of (thread_stuck));
fa593d66
PA
2859 return;
2860 }
2861
24583e45 2862 scoped_restore_current_thread restore_thread;
fa593d66
PA
2863
2864 stabilizing_threads = 1;
2865
2866 /* Kick 'em all. */
d16f3f6c
TBA
2867 for_each_thread ([this] (thread_info *thread)
2868 {
2869 move_out_of_jump_pad (thread);
2870 });
fa593d66
PA
2871
2872 /* Loop until all are stopped out of the jump pads. */
5a6b0a41 2873 while (find_thread (lwp_running) != NULL)
fa593d66
PA
2874 {
2875 struct target_waitstatus ourstatus;
2876 struct lwp_info *lwp;
fa593d66
PA
2877 int wstat;
2878
2879 /* Note that we go through the full wait even loop. While
2880 moving threads out of jump pad, we need to be able to step
2881 over internal breakpoints and such. */
d16f3f6c 2882 wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66 2883
183be222 2884 if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
fa593d66 2885 {
0bfdf32f 2886 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2887
2888 /* Lock it. */
863d01bd 2889 lwp_suspended_inc (lwp);
fa593d66 2890
183be222 2891 if (ourstatus.sig () != GDB_SIGNAL_0
0bfdf32f 2892 || current_thread->last_resume_kind == resume_stop)
fa593d66 2893 {
183be222 2894 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
fa593d66
PA
2895 enqueue_one_deferred_signal (lwp, &wstat);
2896 }
2897 }
2898 }
2899
fcdad592 2900 unsuspend_all_lwps (NULL);
fa593d66
PA
2901
2902 stabilizing_threads = 0;
2903
b4d51a55 2904 if (debug_threads)
fa593d66 2905 {
13e567af
TBA
2906 thread_stuck = find_thread ([this] (thread_info *thread)
2907 {
2908 return stuck_in_jump_pad (thread);
2909 });
fcb056a5 2910
d86d4aaf 2911 if (thread_stuck != NULL)
c058728c
SM
2912 threads_debug_printf
2913 ("couldn't stabilize, LWP %ld got stuck in jump pad",
2914 lwpid_of (thread_stuck));
fa593d66
PA
2915 }
2916}
2917
582511be
PA
2918/* Convenience function that is called when the kernel reports an
2919 event that is not passed out to GDB. */
2920
2921static ptid_t
2922ignore_event (struct target_waitstatus *ourstatus)
2923{
2924 /* If we got an event, there may still be others, as a single
2925 SIGCHLD can indicate more than one child stopped. This forces
2926 another target_wait call. */
2927 async_file_mark ();
2928
183be222 2929 ourstatus->set_ignore ();
582511be
PA
2930 return null_ptid;
2931}
2932
fd000fb3
TBA
2933ptid_t
2934linux_process_target::filter_exit_event (lwp_info *event_child,
2935 target_waitstatus *ourstatus)
65706a29
PA
2936{
2937 struct thread_info *thread = get_lwp_thread (event_child);
2938 ptid_t ptid = ptid_of (thread);
2939
e8a625d1
PA
2940 if (ourstatus->kind () == TARGET_WAITKIND_THREAD_EXITED)
2941 {
2942 /* We're reporting a thread exit for the leader. The exit was
2943 detected by check_zombie_leaders. */
2944 gdb_assert (is_leader (thread));
2945 gdb_assert (report_exit_events_for (thread));
2946
2947 delete_lwp (event_child);
2948 return ptid;
2949 }
2950
48989498
PA
2951 /* Note we must filter TARGET_WAITKIND_SIGNALLED as well, otherwise
2952 if a non-leader thread exits with a signal, we'd report it to the
2953 core which would interpret it as the whole-process exiting.
2954 There is no TARGET_WAITKIND_THREAD_SIGNALLED event kind. */
2955 if (ourstatus->kind () != TARGET_WAITKIND_EXITED
2956 && ourstatus->kind () != TARGET_WAITKIND_SIGNALLED)
2957 return ptid;
2958
8a841a35 2959 if (!is_leader (thread))
65706a29 2960 {
48989498 2961 if (report_exit_events_for (thread))
183be222 2962 ourstatus->set_thread_exited (0);
65706a29 2963 else
183be222 2964 ourstatus->set_ignore ();
65706a29
PA
2965
2966 delete_lwp (event_child);
2967 }
2968 return ptid;
2969}
2970
82075af2
JS
2971/* Returns 1 if GDB is interested in any event_child syscalls. */
2972
2973static int
2974gdb_catching_syscalls_p (struct lwp_info *event_child)
2975{
2976 struct thread_info *thread = get_lwp_thread (event_child);
2977 struct process_info *proc = get_thread_process (thread);
2978
f27866ba 2979 return !proc->syscalls_to_catch.empty ();
82075af2
JS
2980}
2981
9eedd27d
TBA
2982bool
2983linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
82075af2 2984{
4cc32bec 2985 int sysno;
82075af2
JS
2986 struct thread_info *thread = get_lwp_thread (event_child);
2987 struct process_info *proc = get_thread_process (thread);
2988
f27866ba 2989 if (proc->syscalls_to_catch.empty ())
9eedd27d 2990 return false;
82075af2 2991
f27866ba 2992 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
9eedd27d 2993 return true;
82075af2 2994
4cc32bec 2995 get_syscall_trapinfo (event_child, &sysno);
f27866ba
SM
2996
2997 for (int iter : proc->syscalls_to_catch)
82075af2 2998 if (iter == sysno)
9eedd27d 2999 return true;
82075af2 3000
9eedd27d 3001 return false;
82075af2
JS
3002}
3003
d16f3f6c
TBA
3004ptid_t
3005linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
b60cea74 3006 target_wait_flags target_options)
da6d8c04 3007{
c058728c
SM
3008 THREADS_SCOPED_DEBUG_ENTER_EXIT;
3009
c12a5089 3010 client_state &cs = get_client_state ();
e5f1222d 3011 int w;
fc7238bb 3012 struct lwp_info *event_child;
bd99dc85 3013 int options;
bd99dc85 3014 int pid;
6bf5e0ba
PA
3015 int step_over_finished;
3016 int bp_explains_trap;
3017 int maybe_internal_trap;
3018 int report_to_gdb;
219f2f23 3019 int trace_event;
c2d6af84 3020 int in_step_range;
bd99dc85 3021
c058728c 3022 threads_debug_printf ("[%s]", target_pid_to_str (ptid).c_str ());
87ce2a04 3023
bd99dc85
PA
3024 /* Translate generic target options into linux options. */
3025 options = __WALL;
3026 if (target_options & TARGET_WNOHANG)
3027 options |= WNOHANG;
0d62e5e8 3028
fa593d66
PA
3029 bp_explains_trap = 0;
3030 trace_event = 0;
c2d6af84 3031 in_step_range = 0;
183be222 3032 ourstatus->set_ignore ();
bd99dc85 3033
ef980d65 3034 bool was_any_resumed = any_resumed ();
f2faf941 3035
d7e15655 3036 if (step_over_bkpt == null_ptid)
d16f3f6c 3037 pid = wait_for_event (ptid, &w, options);
6bf5e0ba
PA
3038 else
3039 {
c058728c
SM
3040 threads_debug_printf ("step_over_bkpt set [%s], doing a blocking wait",
3041 target_pid_to_str (step_over_bkpt).c_str ());
d16f3f6c 3042 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
6bf5e0ba
PA
3043 }
3044
ef980d65 3045 if (pid == 0 || (pid == -1 && !was_any_resumed))
87ce2a04 3046 {
fa96cb38
PA
3047 gdb_assert (target_options & TARGET_WNOHANG);
3048
c058728c 3049 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_IGNORE");
fa96cb38 3050
183be222 3051 ourstatus->set_ignore ();
87ce2a04
DE
3052 return null_ptid;
3053 }
fa96cb38
PA
3054 else if (pid == -1)
3055 {
c058728c 3056 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_NO_RESUMED");
bd99dc85 3057
183be222 3058 ourstatus->set_no_resumed ();
fa96cb38
PA
3059 return null_ptid;
3060 }
0d62e5e8 3061
0bfdf32f 3062 event_child = get_thread_lwp (current_thread);
0d62e5e8 3063
d16f3f6c 3064 /* wait_for_event only returns an exit status for the last
fa96cb38
PA
3065 child of a process. Report it. */
3066 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 3067 {
fa96cb38 3068 if (WIFEXITED (w))
0d62e5e8 3069 {
e8a625d1
PA
3070 /* If we already have the exit recorded in waitstatus, use
3071 it. This will happen when we detect a zombie leader,
3072 when we had GDB_THREAD_OPTION_EXIT enabled for it. We
3073 want to report its exit as TARGET_WAITKIND_THREAD_EXITED,
3074 as the whole process hasn't exited yet. */
3075 const target_waitstatus &ws = event_child->waitstatus;
3076 if (ws.kind () != TARGET_WAITKIND_IGNORE)
3077 {
3078 gdb_assert (ws.kind () == TARGET_WAITKIND_EXITED
3079 || ws.kind () == TARGET_WAITKIND_THREAD_EXITED);
3080 *ourstatus = ws;
3081 }
3082 else
3083 ourstatus->set_exited (WEXITSTATUS (w));
bd99dc85 3084
c058728c
SM
3085 threads_debug_printf
3086 ("ret = %s, exited with retcode %d",
3087 target_pid_to_str (ptid_of (current_thread)).c_str (),
3088 WEXITSTATUS (w));
fa96cb38
PA
3089 }
3090 else
3091 {
183be222 3092 ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
5b1c542e 3093
c058728c
SM
3094 threads_debug_printf
3095 ("ret = %s, terminated with signal %d",
3096 target_pid_to_str (ptid_of (current_thread)).c_str (),
3097 WTERMSIG (w));
0d62e5e8 3098 }
fa96cb38 3099
48989498 3100 return filter_exit_event (event_child, ourstatus);
da6d8c04
DJ
3101 }
3102
2d97cd35
AT
3103 /* If step-over executes a breakpoint instruction, in the case of a
3104 hardware single step it means a gdb/gdbserver breakpoint had been
3105 planted on top of a permanent breakpoint, in the case of a software
3106 single step it may just mean that gdbserver hit the reinsert breakpoint.
e7ad2f14 3107 The PC has been adjusted by save_stop_reason to point at
2d97cd35
AT
3108 the breakpoint address.
3109 So in the case of the hardware single step advance the PC manually
3110 past the breakpoint and in the case of software single step advance only
3b9a79ef 3111 if it's not the single_step_breakpoint we are hitting.
2d97cd35
AT
3112 This avoids that a program would keep trapping a permanent breakpoint
3113 forever. */
d7e15655 3114 if (step_over_bkpt != null_ptid
2d97cd35
AT
3115 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3116 && (event_child->stepping
3b9a79ef 3117 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
8090aef2 3118 {
dd373349
AT
3119 int increment_pc = 0;
3120 int breakpoint_kind = 0;
3121 CORE_ADDR stop_pc = event_child->stop_pc;
3122
d16f3f6c
TBA
3123 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3124 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
8090aef2 3125
c058728c
SM
3126 threads_debug_printf
3127 ("step-over for %s executed software breakpoint",
3128 target_pid_to_str (ptid_of (current_thread)).c_str ());
8090aef2
PA
3129
3130 if (increment_pc != 0)
3131 {
3132 struct regcache *regcache
3133 = get_thread_regcache (current_thread, 1);
3134
3135 event_child->stop_pc += increment_pc;
bf9ae9d8 3136 low_set_pc (regcache, event_child->stop_pc);
8090aef2 3137
d7146cda 3138 if (!low_breakpoint_at (event_child->stop_pc))
15c66dd6 3139 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
3140 }
3141 }
3142
6bf5e0ba
PA
3143 /* If this event was not handled before, and is not a SIGTRAP, we
3144 report it. SIGILL and SIGSEGV are also treated as traps in case
3145 a breakpoint is inserted at the current PC. If this target does
3146 not support internal breakpoints at all, we also report the
3147 SIGTRAP without further processing; it's of no concern to us. */
3148 maybe_internal_trap
bf9ae9d8 3149 = (low_supports_breakpoints ()
6bf5e0ba
PA
3150 && (WSTOPSIG (w) == SIGTRAP
3151 || ((WSTOPSIG (w) == SIGILL
3152 || WSTOPSIG (w) == SIGSEGV)
d7146cda 3153 && low_breakpoint_at (event_child->stop_pc))));
6bf5e0ba
PA
3154
3155 if (maybe_internal_trap)
3156 {
3157 /* Handle anything that requires bookkeeping before deciding to
3158 report the event or continue waiting. */
3159
3160 /* First check if we can explain the SIGTRAP with an internal
3161 breakpoint, or if we should possibly report the event to GDB.
3162 Do this before anything that may remove or insert a
3163 breakpoint. */
3164 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3165
3166 /* We have a SIGTRAP, possibly a step-over dance has just
3167 finished. If so, tweak the state machine accordingly,
3b9a79ef
YQ
3168 reinsert breakpoints and delete any single-step
3169 breakpoints. */
6bf5e0ba
PA
3170 step_over_finished = finish_step_over (event_child);
3171
3172 /* Now invoke the callbacks of any internal breakpoints there. */
3173 check_breakpoints (event_child->stop_pc);
3174
219f2f23
PA
3175 /* Handle tracepoint data collecting. This may overflow the
3176 trace buffer, and cause a tracing stop, removing
3177 breakpoints. */
3178 trace_event = handle_tracepoints (event_child);
3179
6bf5e0ba 3180 if (bp_explains_trap)
c058728c 3181 threads_debug_printf ("Hit a gdbserver breakpoint.");
6bf5e0ba
PA
3182 }
3183 else
3184 {
3185 /* We have some other signal, possibly a step-over dance was in
3186 progress, and it should be cancelled too. */
3187 step_over_finished = finish_step_over (event_child);
fa593d66
PA
3188 }
3189
3190 /* We have all the data we need. Either report the event to GDB, or
3191 resume threads and keep waiting for more. */
3192
3193 /* If we're collecting a fast tracepoint, finish the collection and
3194 move out of the jump pad before delivering a signal. See
3195 linux_stabilize_threads. */
3196
3197 if (WIFSTOPPED (w)
3198 && WSTOPSIG (w) != SIGTRAP
3199 && supports_fast_tracepoints ()
58b4daa5 3200 && agent_loaded_p ())
fa593d66 3201 {
c058728c
SM
3202 threads_debug_printf ("Got signal %d for LWP %ld. Check if we need "
3203 "to defer or adjust it.",
3204 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3205
3206 /* Allow debugging the jump pad itself. */
0bfdf32f 3207 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
3208 && maybe_move_out_of_jump_pad (event_child, &w))
3209 {
3210 enqueue_one_deferred_signal (event_child, &w);
3211
c058728c
SM
3212 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad)",
3213 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66 3214
df95181f 3215 resume_one_lwp (event_child, 0, 0, NULL);
582511be
PA
3216
3217 return ignore_event (ourstatus);
fa593d66
PA
3218 }
3219 }
219f2f23 3220
229d26fc
SM
3221 if (event_child->collecting_fast_tracepoint
3222 != fast_tpoint_collect_result::not_collecting)
fa593d66 3223 {
c058728c
SM
3224 threads_debug_printf
3225 ("LWP %ld was trying to move out of the jump pad (%d). "
3226 "Check if we're already there.",
3227 lwpid_of (current_thread),
3228 (int) event_child->collecting_fast_tracepoint);
fa593d66
PA
3229
3230 trace_event = 1;
3231
3232 event_child->collecting_fast_tracepoint
3233 = linux_fast_tracepoint_collecting (event_child, NULL);
3234
229d26fc
SM
3235 if (event_child->collecting_fast_tracepoint
3236 != fast_tpoint_collect_result::before_insn)
fa593d66
PA
3237 {
3238 /* No longer need this breakpoint. */
3239 if (event_child->exit_jump_pad_bkpt != NULL)
3240 {
c058728c
SM
3241 threads_debug_printf
3242 ("No longer need exit-jump-pad bkpt; removing it."
3243 "stopping all threads momentarily.");
fa593d66
PA
3244
3245 /* Other running threads could hit this breakpoint.
3246 We don't handle moribund locations like GDB does,
3247 instead we always pause all threads when removing
3248 breakpoints, so that any step-over or
3249 decr_pc_after_break adjustment is always taken
3250 care of while the breakpoint is still
3251 inserted. */
3252 stop_all_lwps (1, event_child);
fa593d66
PA
3253
3254 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3255 event_child->exit_jump_pad_bkpt = NULL;
3256
3257 unstop_all_lwps (1, event_child);
3258
3259 gdb_assert (event_child->suspended >= 0);
3260 }
3261 }
3262
229d26fc
SM
3263 if (event_child->collecting_fast_tracepoint
3264 == fast_tpoint_collect_result::not_collecting)
fa593d66 3265 {
c058728c
SM
3266 threads_debug_printf
3267 ("fast tracepoint finished collecting successfully.");
fa593d66
PA
3268
3269 /* We may have a deferred signal to report. */
3270 if (dequeue_one_deferred_signal (event_child, &w))
c058728c 3271 threads_debug_printf ("dequeued one signal.");
3c11dd79 3272 else
fa593d66 3273 {
c058728c 3274 threads_debug_printf ("no deferred signals.");
fa593d66
PA
3275
3276 if (stabilizing_threads)
3277 {
183be222 3278 ourstatus->set_stopped (GDB_SIGNAL_0);
87ce2a04 3279
c058728c
SM
3280 threads_debug_printf
3281 ("ret = %s, stopped while stabilizing threads",
3282 target_pid_to_str (ptid_of (current_thread)).c_str ());
87ce2a04 3283
0bfdf32f 3284 return ptid_of (current_thread);
fa593d66
PA
3285 }
3286 }
3287 }
6bf5e0ba
PA
3288 }
3289
e471f25b
PA
3290 /* Check whether GDB would be interested in this event. */
3291
82075af2
JS
3292 /* Check if GDB is interested in this syscall. */
3293 if (WIFSTOPPED (w)
3294 && WSTOPSIG (w) == SYSCALL_SIGTRAP
9eedd27d 3295 && !gdb_catch_this_syscall (event_child))
82075af2 3296 {
c058728c
SM
3297 threads_debug_printf ("Ignored syscall for LWP %ld.",
3298 lwpid_of (current_thread));
82075af2 3299
df95181f 3300 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
edeeb602 3301
82075af2
JS
3302 return ignore_event (ourstatus);
3303 }
3304
e471f25b
PA
3305 /* If GDB is not interested in this signal, don't stop other
3306 threads, and don't report it to GDB. Just resume the inferior
3307 right away. We do this for threading-related signals as well as
3308 any that GDB specifically requested we ignore. But never ignore
3309 SIGSTOP if we sent it ourselves, and do not ignore signals when
3310 stepping - they may require special handling to skip the signal
c9587f88
AT
3311 handler. Also never ignore signals that could be caused by a
3312 breakpoint. */
e471f25b 3313 if (WIFSTOPPED (w)
0bfdf32f 3314 && current_thread->last_resume_kind != resume_step
e471f25b 3315 && (
1a981360 3316#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 3317 (current_process ()->priv->thread_db != NULL
e471f25b
PA
3318 && (WSTOPSIG (w) == __SIGRTMIN
3319 || WSTOPSIG (w) == __SIGRTMIN + 1))
3320 ||
3321#endif
c12a5089 3322 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 3323 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
3324 && current_thread->last_resume_kind == resume_stop)
3325 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
3326 {
3327 siginfo_t info, *info_p;
3328
c058728c
SM
3329 threads_debug_printf ("Ignored signal %d for LWP %ld.",
3330 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 3331
0bfdf32f 3332 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 3333 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
3334 info_p = &info;
3335 else
3336 info_p = NULL;
863d01bd
PA
3337
3338 if (step_over_finished)
3339 {
3340 /* We cancelled this thread's step-over above. We still
3341 need to unsuspend all other LWPs, and set them back
3342 running again while the signal handler runs. */
3343 unsuspend_all_lwps (event_child);
3344
3345 /* Enqueue the pending signal info so that proceed_all_lwps
3346 doesn't lose it. */
3347 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3348
3349 proceed_all_lwps ();
3350 }
3351 else
3352 {
df95181f
TBA
3353 resume_one_lwp (event_child, event_child->stepping,
3354 WSTOPSIG (w), info_p);
863d01bd 3355 }
edeeb602 3356
582511be 3357 return ignore_event (ourstatus);
e471f25b
PA
3358 }
3359
c2d6af84
PA
3360 /* Note that all addresses are always "out of the step range" when
3361 there's no range to begin with. */
3362 in_step_range = lwp_in_step_range (event_child);
3363
3364 /* If GDB wanted this thread to single step, and the thread is out
3365 of the step range, we always want to report the SIGTRAP, and let
3366 GDB handle it. Watchpoints should always be reported. So should
3367 signals we can't explain. A SIGTRAP we can't explain could be a
3368 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3369 do, we're be able to handle GDB breakpoints on top of internal
3370 breakpoints, by handling the internal breakpoint and still
3371 reporting the event to GDB. If we don't, we're out of luck, GDB
863d01bd
PA
3372 won't see the breakpoint hit. If we see a single-step event but
3373 the thread should be continuing, don't pass the trap to gdb.
3374 That indicates that we had previously finished a single-step but
3375 left the single-step pending -- see
3376 complete_ongoing_step_over. */
6bf5e0ba 3377 report_to_gdb = (!maybe_internal_trap
0bfdf32f 3378 || (current_thread->last_resume_kind == resume_step
c2d6af84 3379 && !in_step_range)
15c66dd6 3380 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
863d01bd
PA
3381 || (!in_step_range
3382 && !bp_explains_trap
3383 && !trace_event
3384 && !step_over_finished
3385 && !(current_thread->last_resume_kind == resume_continue
3386 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
9f3a5c85 3387 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5 3388 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
de0d863e 3389 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
183be222 3390 || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
d3ce09f5
SS
3391
3392 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
3393
3394 /* We found no reason GDB would want us to stop. We either hit one
3395 of our own breakpoints, or finished an internal step GDB
3396 shouldn't know about. */
3397 if (!report_to_gdb)
3398 {
c058728c
SM
3399 if (bp_explains_trap)
3400 threads_debug_printf ("Hit a gdbserver breakpoint.");
3401
3402 if (step_over_finished)
3403 threads_debug_printf ("Step-over finished.");
3404
3405 if (trace_event)
3406 threads_debug_printf ("Tracepoint event.");
3407
3408 if (lwp_in_step_range (event_child))
3409 threads_debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).",
3410 paddress (event_child->stop_pc),
3411 paddress (event_child->step_range_start),
3412 paddress (event_child->step_range_end));
6bf5e0ba
PA
3413
3414 /* We're not reporting this breakpoint to GDB, so apply the
3415 decr_pc_after_break adjustment to the inferior's regcache
3416 ourselves. */
3417
bf9ae9d8 3418 if (low_supports_breakpoints ())
6bf5e0ba
PA
3419 {
3420 struct regcache *regcache
0bfdf32f 3421 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3422 low_set_pc (regcache, event_child->stop_pc);
6bf5e0ba
PA
3423 }
3424
7984d532 3425 if (step_over_finished)
e3652c84
YQ
3426 {
3427 /* If we have finished stepping over a breakpoint, we've
3428 stopped and suspended all LWPs momentarily except the
3429 stepping one. This is where we resume them all again.
3430 We're going to keep waiting, so use proceed, which
3431 handles stepping over the next breakpoint. */
3432 unsuspend_all_lwps (event_child);
3433 }
3434 else
3435 {
3436 /* Remove the single-step breakpoints if any. Note that
3437 there isn't single-step breakpoint if we finished stepping
3438 over. */
7582c77c 3439 if (supports_software_single_step ()
e3652c84
YQ
3440 && has_single_step_breakpoints (current_thread))
3441 {
3442 stop_all_lwps (0, event_child);
3443 delete_single_step_breakpoints (current_thread);
3444 unstop_all_lwps (0, event_child);
3445 }
3446 }
7984d532 3447
c058728c 3448 threads_debug_printf ("proceeding all threads.");
edeeb602 3449
c058728c 3450 proceed_all_lwps ();
edeeb602 3451
582511be 3452 return ignore_event (ourstatus);
6bf5e0ba
PA
3453 }
3454
c058728c
SM
3455 if (debug_threads)
3456 {
3457 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3458 threads_debug_printf ("LWP %ld: extended event with waitstatus %s",
3459 lwpid_of (get_lwp_thread (event_child)),
3460 event_child->waitstatus.to_string ().c_str ());
3461
3462 if (current_thread->last_resume_kind == resume_step)
3463 {
3464 if (event_child->step_range_start == event_child->step_range_end)
3465 threads_debug_printf
3466 ("GDB wanted to single-step, reporting event.");
3467 else if (!lwp_in_step_range (event_child))
3468 threads_debug_printf ("Out of step range, reporting event.");
3469 }
3470
3471 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3472 threads_debug_printf ("Stopped by watchpoint.");
3473 else if (gdb_breakpoint_here (event_child->stop_pc))
3474 threads_debug_printf ("Stopped by GDB breakpoint.");
3475 }
3476
3477 threads_debug_printf ("Hit a non-gdbserver trap event.");
6bf5e0ba
PA
3478
3479 /* Alright, we're going to report a stop. */
3480
3b9a79ef 3481 /* Remove single-step breakpoints. */
7582c77c 3482 if (supports_software_single_step ())
8901d193 3483 {
3b9a79ef 3484 /* Remove single-step breakpoints or not. It it is true, stop all
8901d193
YQ
3485 lwps, so that other threads won't hit the breakpoint in the
3486 staled memory. */
3b9a79ef 3487 int remove_single_step_breakpoints_p = 0;
8901d193
YQ
3488
3489 if (non_stop)
3490 {
3b9a79ef
YQ
3491 remove_single_step_breakpoints_p
3492 = has_single_step_breakpoints (current_thread);
8901d193
YQ
3493 }
3494 else
3495 {
3496 /* In all-stop, a stop reply cancels all previous resume
3b9a79ef 3497 requests. Delete all single-step breakpoints. */
8901d193 3498
9c80ecd6
SM
3499 find_thread ([&] (thread_info *thread) {
3500 if (has_single_step_breakpoints (thread))
3501 {
3502 remove_single_step_breakpoints_p = 1;
3503 return true;
3504 }
8901d193 3505
9c80ecd6
SM
3506 return false;
3507 });
8901d193
YQ
3508 }
3509
3b9a79ef 3510 if (remove_single_step_breakpoints_p)
8901d193 3511 {
3b9a79ef 3512 /* If we remove single-step breakpoints from memory, stop all lwps,
8901d193
YQ
3513 so that other threads won't hit the breakpoint in the staled
3514 memory. */
3515 stop_all_lwps (0, event_child);
3516
3517 if (non_stop)
3518 {
3b9a79ef
YQ
3519 gdb_assert (has_single_step_breakpoints (current_thread));
3520 delete_single_step_breakpoints (current_thread);
8901d193
YQ
3521 }
3522 else
3523 {
9c80ecd6
SM
3524 for_each_thread ([] (thread_info *thread){
3525 if (has_single_step_breakpoints (thread))
3526 delete_single_step_breakpoints (thread);
3527 });
8901d193
YQ
3528 }
3529
3530 unstop_all_lwps (0, event_child);
3531 }
3532 }
3533
582511be 3534 if (!stabilizing_threads)
6bf5e0ba
PA
3535 {
3536 /* In all-stop, stop all threads. */
582511be
PA
3537 if (!non_stop)
3538 stop_all_lwps (0, NULL);
6bf5e0ba 3539
c03e6ccc 3540 if (step_over_finished)
582511be
PA
3541 {
3542 if (!non_stop)
3543 {
3544 /* If we were doing a step-over, all other threads but
3545 the stepping one had been paused in start_step_over,
3546 with their suspend counts incremented. We don't want
3547 to do a full unstop/unpause, because we're in
3548 all-stop mode (so we want threads stopped), but we
3549 still need to unsuspend the other threads, to
3550 decrement their `suspended' count back. */
3551 unsuspend_all_lwps (event_child);
3552 }
3553 else
3554 {
3555 /* If we just finished a step-over, then all threads had
3556 been momentarily paused. In all-stop, that's fine,
3557 we want threads stopped by now anyway. In non-stop,
3558 we need to re-resume threads that GDB wanted to be
3559 running. */
3560 unstop_all_lwps (1, event_child);
3561 }
3562 }
c03e6ccc 3563
3aa5cfa0
AT
3564 /* If we're not waiting for a specific LWP, choose an event LWP
3565 from among those that have had events. Giving equal priority
3566 to all LWPs that have had events helps prevent
3567 starvation. */
d7e15655 3568 if (ptid == minus_one_ptid)
3aa5cfa0
AT
3569 {
3570 event_child->status_pending_p = 1;
3571 event_child->status_pending = w;
3572
3573 select_event_lwp (&event_child);
3574
3575 /* current_thread and event_child must stay in sync. */
24583e45 3576 switch_to_thread (get_lwp_thread (event_child));
3aa5cfa0
AT
3577
3578 event_child->status_pending_p = 0;
3579 w = event_child->status_pending;
3580 }
3581
3582
fa593d66 3583 /* Stabilize threads (move out of jump pads). */
582511be 3584 if (!non_stop)
5c9eb2f2 3585 target_stabilize_threads ();
6bf5e0ba
PA
3586 }
3587 else
3588 {
3589 /* If we just finished a step-over, then all threads had been
3590 momentarily paused. In all-stop, that's fine, we want
3591 threads stopped by now anyway. In non-stop, we need to
3592 re-resume threads that GDB wanted to be running. */
3593 if (step_over_finished)
7984d532 3594 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3595 }
3596
e88cf517
SM
3597 /* At this point, we haven't set OURSTATUS. This is where we do it. */
3598 gdb_assert (ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3599
183be222 3600 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
de0d863e 3601 {
393a6b59
PA
3602 /* If the reported event is an exit, fork, vfork, clone or exec,
3603 let GDB know. */
5a04c4cf 3604
393a6b59
PA
3605 /* Break the unreported fork/vfork/clone relationship chain. */
3606 if (is_new_child_status (event_child->waitstatus.kind ()))
5a04c4cf 3607 {
393a6b59
PA
3608 event_child->relative->relative = NULL;
3609 event_child->relative = NULL;
5a04c4cf
PA
3610 }
3611
00db26fa 3612 *ourstatus = event_child->waitstatus;
de0d863e 3613 /* Clear the event lwp's waitstatus since we handled it already. */
183be222 3614 event_child->waitstatus.set_ignore ();
de0d863e
DB
3615 }
3616 else
183be222 3617 {
e88cf517 3618 /* The LWP stopped due to a plain signal or a syscall signal. Either way,
3bfdcabb 3619 event_child->waitstatus wasn't filled in with the details, so look at
e88cf517
SM
3620 the wait status W. */
3621 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3622 {
3623 int syscall_number;
3624
3625 get_syscall_trapinfo (event_child, &syscall_number);
3626 if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3627 ourstatus->set_syscall_entry (syscall_number);
3628 else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3629 ourstatus->set_syscall_return (syscall_number);
3630 else
3631 gdb_assert_not_reached ("unexpected syscall state");
3632 }
3633 else if (current_thread->last_resume_kind == resume_stop
3634 && WSTOPSIG (w) == SIGSTOP)
3635 {
3636 /* A thread that has been requested to stop by GDB with vCont;t,
3637 and it stopped cleanly, so report as SIG0. The use of
3638 SIGSTOP is an implementation detail. */
3639 ourstatus->set_stopped (GDB_SIGNAL_0);
3640 }
3641 else
3642 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
183be222 3643 }
5b1c542e 3644
582511be 3645 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3646 it was a software breakpoint, and the client doesn't know we can
3647 adjust the breakpoint ourselves. */
3648 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
c12a5089 3649 && !cs.swbreak_feature)
582511be 3650 {
d4807ea2 3651 int decr_pc = low_decr_pc_after_break ();
582511be
PA
3652
3653 if (decr_pc != 0)
3654 {
3655 struct regcache *regcache
3656 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3657 low_set_pc (regcache, event_child->stop_pc + decr_pc);
582511be
PA
3658 }
3659 }
3660
d7e15655 3661 gdb_assert (step_over_bkpt == null_ptid);
d50171e4 3662
e48359ea 3663 threads_debug_printf ("ret = %s, %s",
c058728c 3664 target_pid_to_str (ptid_of (current_thread)).c_str (),
e48359ea 3665 ourstatus->to_string ().c_str ());
bd99dc85 3666
48989498 3667 return filter_exit_event (event_child, ourstatus);
bd99dc85
PA
3668}
3669
3670/* Get rid of any pending event in the pipe. */
3671static void
3672async_file_flush (void)
3673{
cdc8e9b2 3674 linux_event_pipe.flush ();
bd99dc85
PA
3675}
3676
3677/* Put something in the pipe, so the event loop wakes up. */
3678static void
3679async_file_mark (void)
3680{
cdc8e9b2 3681 linux_event_pipe.mark ();
bd99dc85
PA
3682}
3683
6532e7e3
TBA
3684ptid_t
3685linux_process_target::wait (ptid_t ptid,
3686 target_waitstatus *ourstatus,
b60cea74 3687 target_wait_flags target_options)
bd99dc85 3688{
95954743 3689 ptid_t event_ptid;
bd99dc85 3690
bd99dc85
PA
3691 /* Flush the async file first. */
3692 if (target_is_async_p ())
3693 async_file_flush ();
3694
582511be
PA
3695 do
3696 {
d16f3f6c 3697 event_ptid = wait_1 (ptid, ourstatus, target_options);
582511be
PA
3698 }
3699 while ((target_options & TARGET_WNOHANG) == 0
183be222 3700 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3701
3702 /* If at least one stop was reported, there may be more. A single
3703 SIGCHLD can signal more than one child stop. */
3704 if (target_is_async_p ()
3705 && (target_options & TARGET_WNOHANG) != 0
d7e15655 3706 && event_ptid != null_ptid)
bd99dc85
PA
3707 async_file_mark ();
3708
3709 return event_ptid;
da6d8c04
DJ
3710}
3711
c5f62d5f 3712/* Send a signal to an LWP. */
fd500816
DJ
3713
3714static int
a1928bad 3715kill_lwp (unsigned long lwpid, int signo)
fd500816 3716{
4a6ed09b 3717 int ret;
fd500816 3718
4a6ed09b
PA
3719 errno = 0;
3720 ret = syscall (__NR_tkill, lwpid, signo);
3721 if (errno == ENOSYS)
3722 {
3723 /* If tkill fails, then we are not using nptl threads, a
3724 configuration we no longer support. */
3725 perror_with_name (("tkill"));
3726 }
3727 return ret;
fd500816
DJ
3728}
3729
964e4306
PA
3730void
3731linux_stop_lwp (struct lwp_info *lwp)
3732{
3733 send_sigstop (lwp);
3734}
3735
0d62e5e8 3736static void
02fc4de7 3737send_sigstop (struct lwp_info *lwp)
0d62e5e8 3738{
bd99dc85 3739 int pid;
0d62e5e8 3740
d86d4aaf 3741 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3742
0d62e5e8
DJ
3743 /* If we already have a pending stop signal for this process, don't
3744 send another. */
54a0b537 3745 if (lwp->stop_expected)
0d62e5e8 3746 {
c058728c 3747 threads_debug_printf ("Have pending sigstop for lwp %d", pid);
ae13219e 3748
0d62e5e8
DJ
3749 return;
3750 }
3751
c058728c 3752 threads_debug_printf ("Sending sigstop to lwp %d", pid);
0d62e5e8 3753
d50171e4 3754 lwp->stop_expected = 1;
bd99dc85 3755 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3756}
3757
df3e4dbe
SM
3758static void
3759send_sigstop (thread_info *thread, lwp_info *except)
02fc4de7 3760{
d86d4aaf 3761 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3762
7984d532
PA
3763 /* Ignore EXCEPT. */
3764 if (lwp == except)
df3e4dbe 3765 return;
7984d532 3766
02fc4de7 3767 if (lwp->stopped)
df3e4dbe 3768 return;
02fc4de7
PA
3769
3770 send_sigstop (lwp);
7984d532
PA
3771}
3772
3773/* Increment the suspend count of an LWP, and stop it, if not stopped
3774 yet. */
df3e4dbe
SM
3775static void
3776suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
7984d532 3777{
d86d4aaf 3778 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3779
3780 /* Ignore EXCEPT. */
3781 if (lwp == except)
df3e4dbe 3782 return;
7984d532 3783
863d01bd 3784 lwp_suspended_inc (lwp);
7984d532 3785
df3e4dbe 3786 send_sigstop (thread, except);
02fc4de7
PA
3787}
3788
e8a625d1
PA
3789/* Mark LWP dead, with WSTAT as exit status pending to report later.
3790 If THREAD_EVENT is true, interpret WSTAT as a thread exit event
3791 instead of a process exit event. This is meaningful for the leader
3792 thread, as we normally report a process-wide exit event when we see
3793 the leader exit, and a thread exit event when we see any other
3794 thread exit. */
3795
95954743 3796static void
e8a625d1 3797mark_lwp_dead (struct lwp_info *lwp, int wstat, bool thread_event)
95954743 3798{
95954743
PA
3799 /* Store the exit status for later. */
3800 lwp->status_pending_p = 1;
3801 lwp->status_pending = wstat;
3802
00db26fa
PA
3803 /* Store in waitstatus as well, as there's nothing else to process
3804 for this event. */
3805 if (WIFEXITED (wstat))
e8a625d1
PA
3806 {
3807 if (thread_event)
3808 lwp->waitstatus.set_thread_exited (WEXITSTATUS (wstat));
3809 else
3810 lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
3811 }
00db26fa 3812 else if (WIFSIGNALED (wstat))
e8a625d1
PA
3813 {
3814 gdb_assert (!thread_event);
3815 lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
3816 }
3817 else
3818 gdb_assert_not_reached ("unknown status kind");
00db26fa 3819
95954743
PA
3820 /* Prevent trying to stop it. */
3821 lwp->stopped = 1;
3822
3823 /* No further stops are expected from a dead lwp. */
3824 lwp->stop_expected = 0;
3825}
3826
00db26fa
PA
3827/* Return true if LWP has exited already, and has a pending exit event
3828 to report to GDB. */
3829
3830static int
3831lwp_is_marked_dead (struct lwp_info *lwp)
3832{
3833 return (lwp->status_pending_p
3834 && (WIFEXITED (lwp->status_pending)
3835 || WIFSIGNALED (lwp->status_pending)));
3836}
3837
d16f3f6c
TBA
3838void
3839linux_process_target::wait_for_sigstop ()
0d62e5e8 3840{
0bfdf32f 3841 struct thread_info *saved_thread;
95954743 3842 ptid_t saved_tid;
fa96cb38
PA
3843 int wstat;
3844 int ret;
0d62e5e8 3845
0bfdf32f
GB
3846 saved_thread = current_thread;
3847 if (saved_thread != NULL)
9c80ecd6 3848 saved_tid = saved_thread->id;
bd99dc85 3849 else
95954743 3850 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3851
20ac1cdb
TBA
3852 scoped_restore_current_thread restore_thread;
3853
c058728c 3854 threads_debug_printf ("pulling events");
d50171e4 3855
fa96cb38
PA
3856 /* Passing NULL_PTID as filter indicates we want all events to be
3857 left pending. Eventually this returns when there are no
3858 unwaited-for children left. */
d16f3f6c 3859 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
fa96cb38 3860 gdb_assert (ret == -1);
0d62e5e8 3861
13d3d99b 3862 if (saved_thread == NULL || mythread_alive (saved_tid))
20ac1cdb 3863 return;
0d62e5e8
DJ
3864 else
3865 {
c058728c 3866 threads_debug_printf ("Previously current thread died.");
0d62e5e8 3867
f0db101d
PA
3868 /* We can't change the current inferior behind GDB's back,
3869 otherwise, a subsequent command may apply to the wrong
3870 process. */
20ac1cdb
TBA
3871 restore_thread.dont_restore ();
3872 switch_to_thread (nullptr);
0d62e5e8
DJ
3873 }
3874}
3875
13e567af
TBA
3876bool
3877linux_process_target::stuck_in_jump_pad (thread_info *thread)
fa593d66 3878{
d86d4aaf 3879 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3880
863d01bd
PA
3881 if (lwp->suspended != 0)
3882 {
f34652de 3883 internal_error ("LWP %ld is suspended, suspended=%d\n",
863d01bd
PA
3884 lwpid_of (thread), lwp->suspended);
3885 }
fa593d66
PA
3886 gdb_assert (lwp->stopped);
3887
3888 /* Allow debugging the jump pad, gdb_collect, etc.. */
3889 return (supports_fast_tracepoints ()
58b4daa5 3890 && agent_loaded_p ()
fa593d66 3891 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3892 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66 3893 || thread->last_resume_kind == resume_step)
229d26fc
SM
3894 && (linux_fast_tracepoint_collecting (lwp, NULL)
3895 != fast_tpoint_collect_result::not_collecting));
fa593d66
PA
3896}
3897
d16f3f6c
TBA
3898void
3899linux_process_target::move_out_of_jump_pad (thread_info *thread)
fa593d66 3900{
d86d4aaf 3901 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3902 int *wstat;
3903
863d01bd
PA
3904 if (lwp->suspended != 0)
3905 {
f34652de 3906 internal_error ("LWP %ld is suspended, suspended=%d\n",
863d01bd
PA
3907 lwpid_of (thread), lwp->suspended);
3908 }
fa593d66
PA
3909 gdb_assert (lwp->stopped);
3910
f0ce0d3a 3911 /* For gdb_breakpoint_here. */
24583e45
TBA
3912 scoped_restore_current_thread restore_thread;
3913 switch_to_thread (thread);
f0ce0d3a 3914
fa593d66
PA
3915 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3916
3917 /* Allow debugging the jump pad, gdb_collect, etc. */
3918 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3919 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3920 && thread->last_resume_kind != resume_step
3921 && maybe_move_out_of_jump_pad (lwp, wstat))
3922 {
c058728c
SM
3923 threads_debug_printf ("LWP %ld needs stabilizing (in jump pad)",
3924 lwpid_of (thread));
fa593d66
PA
3925
3926 if (wstat)
3927 {
3928 lwp->status_pending_p = 0;
3929 enqueue_one_deferred_signal (lwp, wstat);
3930
c058728c
SM
3931 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad",
3932 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
3933 }
3934
df95181f 3935 resume_one_lwp (lwp, 0, 0, NULL);
fa593d66
PA
3936 }
3937 else
863d01bd 3938 lwp_suspended_inc (lwp);
fa593d66
PA
3939}
3940
5a6b0a41
SM
3941static bool
3942lwp_running (thread_info *thread)
fa593d66 3943{
d86d4aaf 3944 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3945
00db26fa 3946 if (lwp_is_marked_dead (lwp))
5a6b0a41
SM
3947 return false;
3948
3949 return !lwp->stopped;
fa593d66
PA
3950}
3951
d16f3f6c
TBA
3952void
3953linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
0d62e5e8 3954{
bde24c0a
PA
3955 /* Should not be called recursively. */
3956 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3957
c058728c
SM
3958 THREADS_SCOPED_DEBUG_ENTER_EXIT;
3959
3960 threads_debug_printf
3961 ("%s, except=%s", suspend ? "stop-and-suspend" : "stop",
3962 (except != NULL
3963 ? target_pid_to_str (ptid_of (get_lwp_thread (except))).c_str ()
3964 : "none"));
87ce2a04 3965
bde24c0a
PA
3966 stopping_threads = (suspend
3967 ? STOPPING_AND_SUSPENDING_THREADS
3968 : STOPPING_THREADS);
7984d532
PA
3969
3970 if (suspend)
df3e4dbe
SM
3971 for_each_thread ([&] (thread_info *thread)
3972 {
3973 suspend_and_send_sigstop (thread, except);
3974 });
7984d532 3975 else
df3e4dbe
SM
3976 for_each_thread ([&] (thread_info *thread)
3977 {
3978 send_sigstop (thread, except);
3979 });
3980
fa96cb38 3981 wait_for_sigstop ();
bde24c0a 3982 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04 3983
c058728c 3984 threads_debug_printf ("setting stopping_threads back to !stopping");
0d62e5e8
DJ
3985}
3986
863d01bd
PA
3987/* Enqueue one signal in the chain of signals which need to be
3988 delivered to this process on next resume. */
3989
3990static void
3991enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3992{
013e3554
TBA
3993 lwp->pending_signals.emplace_back (signal);
3994 if (info == nullptr)
3995 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
863d01bd 3996 else
013e3554 3997 lwp->pending_signals.back ().info = *info;
863d01bd
PA
3998}
3999
df95181f
TBA
4000void
4001linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
fa5308bd 4002{
984a2c04
YQ
4003 struct thread_info *thread = get_lwp_thread (lwp);
4004 struct regcache *regcache = get_thread_regcache (thread, 1);
8ce47547 4005
24583e45 4006 scoped_restore_current_thread restore_thread;
984a2c04 4007
24583e45 4008 switch_to_thread (thread);
7582c77c 4009 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
fa5308bd 4010
a0ff9e1a 4011 for (CORE_ADDR pc : next_pcs)
3b9a79ef 4012 set_single_step_breakpoint (pc, current_ptid);
fa5308bd
AT
4013}
4014
df95181f
TBA
4015int
4016linux_process_target::single_step (lwp_info* lwp)
7fe5e27e
AT
4017{
4018 int step = 0;
4019
b31cdfa6 4020 if (supports_hardware_single_step ())
7fe5e27e
AT
4021 {
4022 step = 1;
4023 }
7582c77c 4024 else if (supports_software_single_step ())
7fe5e27e
AT
4025 {
4026 install_software_single_step_breakpoints (lwp);
4027 step = 0;
4028 }
4029 else
c058728c 4030 threads_debug_printf ("stepping is not implemented on this target");
7fe5e27e
AT
4031
4032 return step;
4033}
4034
35ac8b3e 4035/* The signal can be delivered to the inferior if we are not trying to
5b061e98
YQ
4036 finish a fast tracepoint collect. Since signal can be delivered in
4037 the step-over, the program may go to signal handler and trap again
4038 after return from the signal handler. We can live with the spurious
4039 double traps. */
35ac8b3e
YQ
4040
4041static int
4042lwp_signal_can_be_delivered (struct lwp_info *lwp)
4043{
229d26fc
SM
4044 return (lwp->collecting_fast_tracepoint
4045 == fast_tpoint_collect_result::not_collecting);
35ac8b3e
YQ
4046}
4047
df95181f
TBA
4048void
4049linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4050 int signal, siginfo_t *info)
da6d8c04 4051{
d86d4aaf 4052 struct thread_info *thread = get_lwp_thread (lwp);
82075af2 4053 int ptrace_request;
c06cbd92
YQ
4054 struct process_info *proc = get_thread_process (thread);
4055
4056 /* Note that target description may not be initialised
4057 (proc->tdesc == NULL) at this point because the program hasn't
4058 stopped at the first instruction yet. It means GDBserver skips
4059 the extra traps from the wrapper program (see option --wrapper).
4060 Code in this function that requires register access should be
4061 guarded by proc->tdesc == NULL or something else. */
0d62e5e8 4062
54a0b537 4063 if (lwp->stopped == 0)
0d62e5e8
DJ
4064 return;
4065
183be222 4066 gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
65706a29 4067
229d26fc
SM
4068 fast_tpoint_collect_result fast_tp_collecting
4069 = lwp->collecting_fast_tracepoint;
fa593d66 4070
229d26fc
SM
4071 gdb_assert (!stabilizing_threads
4072 || (fast_tp_collecting
4073 != fast_tpoint_collect_result::not_collecting));
fa593d66 4074
219f2f23
PA
4075 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4076 user used the "jump" command, or "set $pc = foo"). */
c06cbd92 4077 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
219f2f23
PA
4078 {
4079 /* Collecting 'while-stepping' actions doesn't make sense
4080 anymore. */
d86d4aaf 4081 release_while_stepping_state_list (thread);
219f2f23
PA
4082 }
4083
0d62e5e8 4084 /* If we have pending signals or status, and a new signal, enqueue the
35ac8b3e
YQ
4085 signal. Also enqueue the signal if it can't be delivered to the
4086 inferior right now. */
0d62e5e8 4087 if (signal != 0
fa593d66 4088 && (lwp->status_pending_p
013e3554 4089 || !lwp->pending_signals.empty ()
35ac8b3e 4090 || !lwp_signal_can_be_delivered (lwp)))
94610ec4
YQ
4091 {
4092 enqueue_pending_signal (lwp, signal, info);
4093
4094 /* Postpone any pending signal. It was enqueued above. */
4095 signal = 0;
4096 }
0d62e5e8 4097
d50171e4
PA
4098 if (lwp->status_pending_p)
4099 {
c058728c
SM
4100 threads_debug_printf
4101 ("Not resuming lwp %ld (%s, stop %s); has pending status",
4102 lwpid_of (thread), step ? "step" : "continue",
4103 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
4104 return;
4105 }
0d62e5e8 4106
24583e45
TBA
4107 scoped_restore_current_thread restore_thread;
4108 switch_to_thread (thread);
0d62e5e8 4109
0d62e5e8
DJ
4110 /* This bit needs some thinking about. If we get a signal that
4111 we must report while a single-step reinsert is still pending,
4112 we often end up resuming the thread. It might be better to
4113 (ew) allow a stack of pending events; then we could be sure that
4114 the reinsert happened right away and not lose any signals.
4115
4116 Making this stack would also shrink the window in which breakpoints are
54a0b537 4117 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
4118 complete correctness, so it won't solve that problem. It may be
4119 worthwhile just to solve this one, however. */
54a0b537 4120 if (lwp->bp_reinsert != 0)
0d62e5e8 4121 {
c058728c
SM
4122 threads_debug_printf (" pending reinsert at 0x%s",
4123 paddress (lwp->bp_reinsert));
d50171e4 4124
b31cdfa6 4125 if (supports_hardware_single_step ())
d50171e4 4126 {
229d26fc 4127 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
4128 {
4129 if (step == 0)
9986ba08 4130 warning ("BAD - reinserting but not stepping.");
fa593d66 4131 if (lwp->suspended)
9986ba08
PA
4132 warning ("BAD - reinserting and suspended(%d).",
4133 lwp->suspended);
fa593d66 4134 }
d50171e4 4135 }
f79b145d
YQ
4136
4137 step = maybe_hw_step (thread);
0d62e5e8
DJ
4138 }
4139
229d26fc 4140 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
c058728c
SM
4141 threads_debug_printf
4142 ("lwp %ld wants to get out of fast tracepoint jump pad "
4143 "(exit-jump-pad-bkpt)", lwpid_of (thread));
4144
229d26fc 4145 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
fa593d66 4146 {
c058728c
SM
4147 threads_debug_printf
4148 ("lwp %ld wants to get out of fast tracepoint jump pad single-stepping",
4149 lwpid_of (thread));
fa593d66 4150
b31cdfa6 4151 if (supports_hardware_single_step ())
fa593d66
PA
4152 step = 1;
4153 else
38e08fca 4154 {
f34652de 4155 internal_error ("moving out of jump pad single-stepping"
38e08fca
GB
4156 " not implemented on this target");
4157 }
fa593d66
PA
4158 }
4159
219f2f23
PA
4160 /* If we have while-stepping actions in this thread set it stepping.
4161 If we have a signal to deliver, it may or may not be set to
4162 SIG_IGN, we don't know. Assume so, and allow collecting
4163 while-stepping into a signal handler. A possible smart thing to
4164 do would be to set an internal breakpoint at the signal return
4165 address, continue, and carry on catching this while-stepping
4166 action only when that breakpoint is hit. A future
4167 enhancement. */
7fe5e27e 4168 if (thread->while_stepping != NULL)
219f2f23 4169 {
c058728c
SM
4170 threads_debug_printf
4171 ("lwp %ld has a while-stepping action -> forcing step.",
4172 lwpid_of (thread));
7fe5e27e
AT
4173
4174 step = single_step (lwp);
219f2f23
PA
4175 }
4176
bf9ae9d8 4177 if (proc->tdesc != NULL && low_supports_breakpoints ())
0d62e5e8 4178 {
0bfdf32f 4179 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be 4180
bf9ae9d8 4181 lwp->stop_pc = low_get_pc (regcache);
582511be 4182
c058728c
SM
4183 threads_debug_printf (" %s from pc 0x%lx", step ? "step" : "continue",
4184 (long) lwp->stop_pc);
0d62e5e8
DJ
4185 }
4186
35ac8b3e
YQ
4187 /* If we have pending signals, consume one if it can be delivered to
4188 the inferior. */
013e3554 4189 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
0d62e5e8 4190 {
013e3554 4191 const pending_signal &p_sig = lwp->pending_signals.front ();
0d62e5e8 4192
013e3554
TBA
4193 signal = p_sig.signal;
4194 if (p_sig.info.si_signo != 0)
d86d4aaf 4195 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554 4196 &p_sig.info);
32ca6d61 4197
013e3554 4198 lwp->pending_signals.pop_front ();
0d62e5e8
DJ
4199 }
4200
c058728c
SM
4201 threads_debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)",
4202 lwpid_of (thread), step ? "step" : "continue", signal,
4203 lwp->stop_expected ? "expected" : "not expected");
94610ec4 4204
d7599cc0 4205 low_prepare_to_resume (lwp);
aa5ca48f 4206
d86d4aaf 4207 regcache_invalidate_thread (thread);
da6d8c04 4208 errno = 0;
54a0b537 4209 lwp->stepping = step;
82075af2
JS
4210 if (step)
4211 ptrace_request = PTRACE_SINGLESTEP;
4212 else if (gdb_catching_syscalls_p (lwp))
4213 ptrace_request = PTRACE_SYSCALL;
4214 else
4215 ptrace_request = PTRACE_CONT;
4216 ptrace (ptrace_request,
4217 lwpid_of (thread),
b8e1b30e 4218 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
4219 /* Coerce to a uintptr_t first to avoid potential gcc warning
4220 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4221 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 4222
da6d8c04 4223 if (errno)
20471e00
SM
4224 {
4225 int saved_errno = errno;
4226
4227 threads_debug_printf ("ptrace errno = %d (%s)",
4228 saved_errno, strerror (saved_errno));
4229
4230 errno = saved_errno;
4231 perror_with_name ("resuming thread");
4232 }
23f238d3
PA
4233
4234 /* Successfully resumed. Clear state that no longer makes sense,
4235 and mark the LWP as running. Must not do this before resuming
4236 otherwise if that fails other code will be confused. E.g., we'd
4237 later try to stop the LWP and hang forever waiting for a stop
4238 status. Note that we must not throw after this is cleared,
4239 otherwise handle_zombie_lwp_error would get confused. */
4240 lwp->stopped = 0;
4241 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4242}
4243
d7599cc0
TBA
4244void
4245linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4246{
4247 /* Nop. */
4248}
4249
23f238d3
PA
4250/* Called when we try to resume a stopped LWP and that errors out. If
4251 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4252 or about to become), discard the error, clear any pending status
4253 the LWP may have, and return true (we'll collect the exit status
4254 soon enough). Otherwise, return false. */
4255
4256static int
4257check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4258{
4259 struct thread_info *thread = get_lwp_thread (lp);
4260
4261 /* If we get an error after resuming the LWP successfully, we'd
4262 confuse !T state for the LWP being gone. */
4263 gdb_assert (lp->stopped);
4264
4265 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4266 because even if ptrace failed with ESRCH, the tracee may be "not
4267 yet fully dead", but already refusing ptrace requests. In that
4268 case the tracee has 'R (Running)' state for a little bit
4269 (observed in Linux 3.18). See also the note on ESRCH in the
4270 ptrace(2) man page. Instead, check whether the LWP has any state
4271 other than ptrace-stopped. */
4272
4273 /* Don't assume anything if /proc/PID/status can't be read. */
4274 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 4275 {
23f238d3
PA
4276 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4277 lp->status_pending_p = 0;
4278 return 1;
4279 }
4280 return 0;
4281}
4282
df95181f
TBA
4283void
4284linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4285 siginfo_t *info)
23f238d3 4286{
a70b8144 4287 try
23f238d3 4288 {
df95181f 4289 resume_one_lwp_throw (lwp, step, signal, info);
23f238d3 4290 }
230d2906 4291 catch (const gdb_exception_error &ex)
23f238d3 4292 {
20471e00
SM
4293 if (check_ptrace_stopped_lwp_gone (lwp))
4294 {
4295 /* This could because we tried to resume an LWP after its leader
4296 exited. Mark it as resumed, so we can collect an exit event
4297 from it. */
4298 lwp->stopped = 0;
4299 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4300 }
4301 else
eedc3f4f 4302 throw;
3221518c 4303 }
da6d8c04
DJ
4304}
4305
5fdda392
SM
4306/* This function is called once per thread via for_each_thread.
4307 We look up which resume request applies to THREAD and mark it with a
4308 pointer to the appropriate resume request.
5544ad89
DJ
4309
4310 This algorithm is O(threads * resume elements), but resume elements
4311 is small (and will remain small at least until GDB supports thread
4312 suspension). */
ebcf782c 4313
5fdda392
SM
4314static void
4315linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
0d62e5e8 4316{
d86d4aaf 4317 struct lwp_info *lwp = get_thread_lwp (thread);
64386c31 4318
5fdda392 4319 for (int ndx = 0; ndx < n; ndx++)
95954743 4320 {
5fdda392 4321 ptid_t ptid = resume[ndx].thread;
d7e15655 4322 if (ptid == minus_one_ptid
9c80ecd6 4323 || ptid == thread->id
0c9070b3
YQ
4324 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4325 of PID'. */
e99b03dc 4326 || (ptid.pid () == pid_of (thread)
0e998d96 4327 && (ptid.is_pid ()
e38504b3 4328 || ptid.lwp () == -1)))
95954743 4329 {
5fdda392 4330 if (resume[ndx].kind == resume_stop
8336d594 4331 && thread->last_resume_kind == resume_stop)
d50171e4 4332 {
c058728c
SM
4333 threads_debug_printf
4334 ("already %s LWP %ld at GDB's request",
4335 (thread->last_status.kind () == TARGET_WAITKIND_STOPPED
4336 ? "stopped" : "stopping"),
4337 lwpid_of (thread));
d50171e4
PA
4338
4339 continue;
4340 }
4341
5a04c4cf
PA
4342 /* Ignore (wildcard) resume requests for already-resumed
4343 threads. */
5fdda392 4344 if (resume[ndx].kind != resume_stop
5a04c4cf
PA
4345 && thread->last_resume_kind != resume_stop)
4346 {
c058728c
SM
4347 threads_debug_printf
4348 ("already %s LWP %ld at GDB's request",
4349 (thread->last_resume_kind == resume_step
4350 ? "stepping" : "continuing"),
4351 lwpid_of (thread));
5a04c4cf
PA
4352 continue;
4353 }
4354
393a6b59
PA
4355 /* Don't let wildcard resumes resume fork/vfork/clone
4356 children that GDB does not yet know are new children. */
4357 if (lwp->relative != NULL)
5a04c4cf 4358 {
393a6b59 4359 struct lwp_info *rel = lwp->relative;
5a04c4cf
PA
4360
4361 if (rel->status_pending_p
393a6b59 4362 && is_new_child_status (rel->waitstatus.kind ()))
5a04c4cf 4363 {
c058728c
SM
4364 threads_debug_printf
4365 ("not resuming LWP %ld: has queued stop reply",
4366 lwpid_of (thread));
5a04c4cf
PA
4367 continue;
4368 }
4369 }
4370
4371 /* If the thread has a pending event that has already been
4372 reported to GDBserver core, but GDB has not pulled the
4373 event out of the vStopped queue yet, likewise, ignore the
4374 (wildcard) resume request. */
9c80ecd6 4375 if (in_queued_stop_replies (thread->id))
5a04c4cf 4376 {
c058728c
SM
4377 threads_debug_printf
4378 ("not resuming LWP %ld: has queued stop reply",
4379 lwpid_of (thread));
5a04c4cf
PA
4380 continue;
4381 }
4382
5fdda392 4383 lwp->resume = &resume[ndx];
8336d594 4384 thread->last_resume_kind = lwp->resume->kind;
fa593d66 4385
c2d6af84
PA
4386 lwp->step_range_start = lwp->resume->step_range_start;
4387 lwp->step_range_end = lwp->resume->step_range_end;
4388
fa593d66
PA
4389 /* If we had a deferred signal to report, dequeue one now.
4390 This can happen if LWP gets more than one signal while
4391 trying to get out of a jump pad. */
4392 if (lwp->stopped
4393 && !lwp->status_pending_p
4394 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4395 {
4396 lwp->status_pending_p = 1;
4397
c058728c
SM
4398 threads_debug_printf
4399 ("Dequeueing deferred signal %d for LWP %ld, "
4400 "leaving status pending.",
4401 WSTOPSIG (lwp->status_pending),
4402 lwpid_of (thread));
fa593d66
PA
4403 }
4404
5fdda392 4405 return;
95954743
PA
4406 }
4407 }
2bd7c093
PA
4408
4409 /* No resume action for this thread. */
4410 lwp->resume = NULL;
5544ad89
DJ
4411}
4412
df95181f
TBA
4413bool
4414linux_process_target::resume_status_pending (thread_info *thread)
5544ad89 4415{
d86d4aaf 4416 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4417
bd99dc85
PA
4418 /* LWPs which will not be resumed are not interesting, because
4419 we might not wait for them next time through linux_wait. */
2bd7c093 4420 if (lwp->resume == NULL)
25c28b4d 4421 return false;
64386c31 4422
df95181f 4423 return thread_still_has_status_pending (thread);
d50171e4
PA
4424}
4425
df95181f
TBA
4426bool
4427linux_process_target::thread_needs_step_over (thread_info *thread)
d50171e4 4428{
d86d4aaf 4429 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4430 CORE_ADDR pc;
c06cbd92
YQ
4431 struct process_info *proc = get_thread_process (thread);
4432
4433 /* GDBserver is skipping the extra traps from the wrapper program,
4434 don't have to do step over. */
4435 if (proc->tdesc == NULL)
eca55aec 4436 return false;
d50171e4
PA
4437
4438 /* LWPs which will not be resumed are not interesting, because we
4439 might not wait for them next time through linux_wait. */
4440
4441 if (!lwp->stopped)
4442 {
c058728c
SM
4443 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped",
4444 lwpid_of (thread));
eca55aec 4445 return false;
d50171e4
PA
4446 }
4447
8336d594 4448 if (thread->last_resume_kind == resume_stop)
d50171e4 4449 {
c058728c
SM
4450 threads_debug_printf
4451 ("Need step over [LWP %ld]? Ignoring, should remain stopped",
4452 lwpid_of (thread));
eca55aec 4453 return false;
d50171e4
PA
4454 }
4455
7984d532
PA
4456 gdb_assert (lwp->suspended >= 0);
4457
4458 if (lwp->suspended)
4459 {
c058728c
SM
4460 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, suspended",
4461 lwpid_of (thread));
eca55aec 4462 return false;
7984d532
PA
4463 }
4464
bd99dc85 4465 if (lwp->status_pending_p)
d50171e4 4466 {
c058728c
SM
4467 threads_debug_printf
4468 ("Need step over [LWP %ld]? Ignoring, has pending status.",
4469 lwpid_of (thread));
eca55aec 4470 return false;
d50171e4
PA
4471 }
4472
4473 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4474 or we have. */
4475 pc = get_pc (lwp);
4476
4477 /* If the PC has changed since we stopped, then don't do anything,
4478 and let the breakpoint/tracepoint be hit. This happens if, for
4479 instance, GDB handled the decr_pc_after_break subtraction itself,
4480 GDB is OOL stepping this thread, or the user has issued a "jump"
4481 command, or poked thread's registers herself. */
4482 if (pc != lwp->stop_pc)
4483 {
c058728c
SM
4484 threads_debug_printf
4485 ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4486 "Old stop_pc was 0x%s, PC is now 0x%s", lwpid_of (thread),
4487 paddress (lwp->stop_pc), paddress (pc));
eca55aec 4488 return false;
d50171e4
PA
4489 }
4490
484b3c32
YQ
4491 /* On software single step target, resume the inferior with signal
4492 rather than stepping over. */
7582c77c 4493 if (supports_software_single_step ()
013e3554 4494 && !lwp->pending_signals.empty ()
484b3c32
YQ
4495 && lwp_signal_can_be_delivered (lwp))
4496 {
c058728c
SM
4497 threads_debug_printf
4498 ("Need step over [LWP %ld]? Ignoring, has pending signals.",
4499 lwpid_of (thread));
484b3c32 4500
eca55aec 4501 return false;
484b3c32
YQ
4502 }
4503
24583e45
TBA
4504 scoped_restore_current_thread restore_thread;
4505 switch_to_thread (thread);
d50171e4 4506
8b07ae33 4507 /* We can only step over breakpoints we know about. */
fa593d66 4508 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 4509 {
8b07ae33 4510 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
4511 though. If the condition is being evaluated on the target's side
4512 and it evaluate to false, step over this breakpoint as well. */
4513 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
4514 && gdb_condition_true_at_breakpoint (pc)
4515 && gdb_no_commands_at_breakpoint (pc))
8b07ae33 4516 {
c058728c
SM
4517 threads_debug_printf ("Need step over [LWP %ld]? yes, but found"
4518 " GDB breakpoint at 0x%s; skipping step over",
4519 lwpid_of (thread), paddress (pc));
d50171e4 4520
eca55aec 4521 return false;
8b07ae33
PA
4522 }
4523 else
4524 {
c058728c
SM
4525 threads_debug_printf ("Need step over [LWP %ld]? yes, "
4526 "found breakpoint at 0x%s",
4527 lwpid_of (thread), paddress (pc));
d50171e4 4528
8b07ae33 4529 /* We've found an lwp that needs stepping over --- return 1 so
8f86d7aa 4530 that find_thread stops looking. */
eca55aec 4531 return true;
8b07ae33 4532 }
d50171e4
PA
4533 }
4534
c058728c
SM
4535 threads_debug_printf
4536 ("Need step over [LWP %ld]? No, no breakpoint found at 0x%s",
4537 lwpid_of (thread), paddress (pc));
c6ecbae5 4538
eca55aec 4539 return false;
5544ad89
DJ
4540}
4541
d16f3f6c
TBA
4542void
4543linux_process_target::start_step_over (lwp_info *lwp)
d50171e4 4544{
d86d4aaf 4545 struct thread_info *thread = get_lwp_thread (lwp);
d50171e4 4546 CORE_ADDR pc;
d50171e4 4547
c058728c
SM
4548 threads_debug_printf ("Starting step-over on LWP %ld. Stopping all threads",
4549 lwpid_of (thread));
d50171e4 4550
7984d532 4551 stop_all_lwps (1, lwp);
863d01bd
PA
4552
4553 if (lwp->suspended != 0)
4554 {
f34652de 4555 internal_error ("LWP %ld suspended=%d\n", lwpid_of (thread),
863d01bd
PA
4556 lwp->suspended);
4557 }
d50171e4 4558
c058728c 4559 threads_debug_printf ("Done stopping all threads for step-over.");
d50171e4
PA
4560
4561 /* Note, we should always reach here with an already adjusted PC,
4562 either by GDB (if we're resuming due to GDB's request), or by our
4563 caller, if we just finished handling an internal breakpoint GDB
4564 shouldn't care about. */
4565 pc = get_pc (lwp);
4566
24583e45
TBA
4567 bool step = false;
4568 {
4569 scoped_restore_current_thread restore_thread;
4570 switch_to_thread (thread);
d50171e4 4571
24583e45
TBA
4572 lwp->bp_reinsert = pc;
4573 uninsert_breakpoints_at (pc);
4574 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4 4575
24583e45
TBA
4576 step = single_step (lwp);
4577 }
d50171e4 4578
df95181f 4579 resume_one_lwp (lwp, step, 0, NULL);
d50171e4
PA
4580
4581 /* Require next event from this LWP. */
9c80ecd6 4582 step_over_bkpt = thread->id;
d50171e4
PA
4583}
4584
b31cdfa6
TBA
4585bool
4586linux_process_target::finish_step_over (lwp_info *lwp)
d50171e4
PA
4587{
4588 if (lwp->bp_reinsert != 0)
4589 {
24583e45 4590 scoped_restore_current_thread restore_thread;
f79b145d 4591
c058728c 4592 threads_debug_printf ("Finished step over.");
d50171e4 4593
24583e45 4594 switch_to_thread (get_lwp_thread (lwp));
f79b145d 4595
d50171e4
PA
4596 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4597 may be no breakpoint to reinsert there by now. */
4598 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4599 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4600
4601 lwp->bp_reinsert = 0;
4602
3b9a79ef
YQ
4603 /* Delete any single-step breakpoints. No longer needed. We
4604 don't have to worry about other threads hitting this trap,
4605 and later not being able to explain it, because we were
4606 stepping over a breakpoint, and we hold all threads but
4607 LWP stopped while doing that. */
b31cdfa6 4608 if (!supports_hardware_single_step ())
f79b145d 4609 {
3b9a79ef
YQ
4610 gdb_assert (has_single_step_breakpoints (current_thread));
4611 delete_single_step_breakpoints (current_thread);
f79b145d 4612 }
d50171e4
PA
4613
4614 step_over_bkpt = null_ptid;
b31cdfa6 4615 return true;
d50171e4
PA
4616 }
4617 else
b31cdfa6 4618 return false;
d50171e4
PA
4619}
4620
d16f3f6c
TBA
4621void
4622linux_process_target::complete_ongoing_step_over ()
863d01bd 4623{
d7e15655 4624 if (step_over_bkpt != null_ptid)
863d01bd
PA
4625 {
4626 struct lwp_info *lwp;
4627 int wstat;
4628 int ret;
4629
c058728c 4630 threads_debug_printf ("detach: step over in progress, finish it first");
863d01bd
PA
4631
4632 /* Passing NULL_PTID as filter indicates we want all events to
4633 be left pending. Eventually this returns when there are no
4634 unwaited-for children left. */
d16f3f6c
TBA
4635 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4636 __WALL);
863d01bd
PA
4637 gdb_assert (ret == -1);
4638
4639 lwp = find_lwp_pid (step_over_bkpt);
4640 if (lwp != NULL)
7e9cf1fe
PA
4641 {
4642 finish_step_over (lwp);
4643
4644 /* If we got our step SIGTRAP, don't leave it pending,
4645 otherwise we would report it to GDB as a spurious
4646 SIGTRAP. */
4647 gdb_assert (lwp->status_pending_p);
4648 if (WIFSTOPPED (lwp->status_pending)
4649 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4650 {
4651 thread_info *thread = get_lwp_thread (lwp);
4652 if (thread->last_resume_kind != resume_step)
4653 {
c058728c 4654 threads_debug_printf ("detach: discard step-over SIGTRAP");
7e9cf1fe
PA
4655
4656 lwp->status_pending_p = 0;
4657 lwp->status_pending = 0;
4658 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4659 }
4660 else
c058728c
SM
4661 threads_debug_printf
4662 ("detach: resume_step, not discarding step-over SIGTRAP");
7e9cf1fe
PA
4663 }
4664 }
863d01bd
PA
4665 step_over_bkpt = null_ptid;
4666 unsuspend_all_lwps (lwp);
4667 }
4668}
4669
df95181f
TBA
4670void
4671linux_process_target::resume_one_thread (thread_info *thread,
4672 bool leave_all_stopped)
5544ad89 4673{
d86d4aaf 4674 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4675 int leave_pending;
5544ad89 4676
2bd7c093 4677 if (lwp->resume == NULL)
c80825ff 4678 return;
5544ad89 4679
bd99dc85 4680 if (lwp->resume->kind == resume_stop)
5544ad89 4681 {
c058728c
SM
4682 threads_debug_printf ("resume_stop request for LWP %ld",
4683 lwpid_of (thread));
bd99dc85
PA
4684
4685 if (!lwp->stopped)
4686 {
c058728c 4687 threads_debug_printf ("stopping LWP %ld", lwpid_of (thread));
bd99dc85 4688
d50171e4
PA
4689 /* Stop the thread, and wait for the event asynchronously,
4690 through the event loop. */
02fc4de7 4691 send_sigstop (lwp);
bd99dc85
PA
4692 }
4693 else
4694 {
c058728c 4695 threads_debug_printf ("already stopped LWP %ld", lwpid_of (thread));
d50171e4
PA
4696
4697 /* The LWP may have been stopped in an internal event that
4698 was not meant to be notified back to GDB (e.g., gdbserver
4699 breakpoint), so we should be reporting a stop event in
4700 this case too. */
4701
4702 /* If the thread already has a pending SIGSTOP, this is a
4703 no-op. Otherwise, something later will presumably resume
4704 the thread and this will cause it to cancel any pending
4705 operation, due to last_resume_kind == resume_stop. If
4706 the thread already has a pending status to report, we
4707 will still report it the next time we wait - see
4708 status_pending_p_callback. */
1a981360
PA
4709
4710 /* If we already have a pending signal to report, then
4711 there's no need to queue a SIGSTOP, as this means we're
4712 midway through moving the LWP out of the jumppad, and we
4713 will report the pending signal as soon as that is
4714 finished. */
013e3554 4715 if (lwp->pending_signals_to_report.empty ())
1a981360 4716 send_sigstop (lwp);
bd99dc85 4717 }
32ca6d61 4718
bd99dc85
PA
4719 /* For stop requests, we're done. */
4720 lwp->resume = NULL;
183be222 4721 thread->last_status.set_ignore ();
c80825ff 4722 return;
5544ad89
DJ
4723 }
4724
bd99dc85 4725 /* If this thread which is about to be resumed has a pending status,
863d01bd
PA
4726 then don't resume it - we can just report the pending status.
4727 Likewise if it is suspended, because e.g., another thread is
4728 stepping past a breakpoint. Make sure to queue any signals that
4729 would otherwise be sent. In all-stop mode, we do this decision
4730 based on if *any* thread has a pending status. If there's a
4731 thread that needs the step-over-breakpoint dance, then don't
4732 resume any other thread but that particular one. */
4733 leave_pending = (lwp->suspended
4734 || lwp->status_pending_p
4735 || leave_all_stopped);
5544ad89 4736
0e9a339e
YQ
4737 /* If we have a new signal, enqueue the signal. */
4738 if (lwp->resume->sig != 0)
4739 {
4740 siginfo_t info, *info_p;
4741
4742 /* If this is the same signal we were previously stopped by,
4743 make sure to queue its siginfo. */
4744 if (WIFSTOPPED (lwp->last_status)
4745 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4746 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4747 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4748 info_p = &info;
4749 else
4750 info_p = NULL;
4751
4752 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4753 }
4754
d50171e4 4755 if (!leave_pending)
bd99dc85 4756 {
c058728c 4757 threads_debug_printf ("resuming LWP %ld", lwpid_of (thread));
5544ad89 4758
9c80ecd6 4759 proceed_one_lwp (thread, NULL);
bd99dc85
PA
4760 }
4761 else
c058728c 4762 threads_debug_printf ("leaving LWP %ld stopped", lwpid_of (thread));
5544ad89 4763
183be222 4764 thread->last_status.set_ignore ();
bd99dc85 4765 lwp->resume = NULL;
0d62e5e8
DJ
4766}
4767
0e4d7e35
TBA
4768void
4769linux_process_target::resume (thread_resume *resume_info, size_t n)
0d62e5e8 4770{
d86d4aaf 4771 struct thread_info *need_step_over = NULL;
c6ecbae5 4772
c058728c 4773 THREADS_SCOPED_DEBUG_ENTER_EXIT;
87ce2a04 4774
5fdda392
SM
4775 for_each_thread ([&] (thread_info *thread)
4776 {
4777 linux_set_resume_request (thread, resume_info, n);
4778 });
5544ad89 4779
d50171e4
PA
4780 /* If there is a thread which would otherwise be resumed, which has
4781 a pending status, then don't resume any threads - we can just
4782 report the pending status. Make sure to queue any signals that
4783 would otherwise be sent. In non-stop mode, we'll apply this
4784 logic to each thread individually. We consume all pending events
4785 before considering to start a step-over (in all-stop). */
25c28b4d 4786 bool any_pending = false;
bd99dc85 4787 if (!non_stop)
df95181f
TBA
4788 any_pending = find_thread ([this] (thread_info *thread)
4789 {
4790 return resume_status_pending (thread);
4791 }) != nullptr;
d50171e4
PA
4792
4793 /* If there is a thread which would otherwise be resumed, which is
4794 stopped at a breakpoint that needs stepping over, then don't
4795 resume any threads - have it step over the breakpoint with all
4796 other threads stopped, then resume all threads again. Make sure
4797 to queue any signals that would otherwise be delivered or
4798 queued. */
bf9ae9d8 4799 if (!any_pending && low_supports_breakpoints ())
df95181f
TBA
4800 need_step_over = find_thread ([this] (thread_info *thread)
4801 {
4802 return thread_needs_step_over (thread);
4803 });
d50171e4 4804
c80825ff 4805 bool leave_all_stopped = (need_step_over != NULL || any_pending);
d50171e4 4806
c058728c
SM
4807 if (need_step_over != NULL)
4808 threads_debug_printf ("Not resuming all, need step over");
4809 else if (any_pending)
4810 threads_debug_printf ("Not resuming, all-stop and found "
4811 "an LWP with pending status");
4812 else
4813 threads_debug_printf ("Resuming, no pending status or step over needed");
d50171e4
PA
4814
4815 /* Even if we're leaving threads stopped, queue all signals we'd
4816 otherwise deliver. */
c80825ff
SM
4817 for_each_thread ([&] (thread_info *thread)
4818 {
df95181f 4819 resume_one_thread (thread, leave_all_stopped);
c80825ff 4820 });
d50171e4
PA
4821
4822 if (need_step_over)
d86d4aaf 4823 start_step_over (get_thread_lwp (need_step_over));
87ce2a04 4824
1bebeeca
PA
4825 /* We may have events that were pending that can/should be sent to
4826 the client now. Trigger a linux_wait call. */
4827 if (target_is_async_p ())
4828 async_file_mark ();
d50171e4
PA
4829}
4830
df95181f
TBA
4831void
4832linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
d50171e4 4833{
d86d4aaf 4834 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4835 int step;
4836
7984d532 4837 if (lwp == except)
e2b44075 4838 return;
d50171e4 4839
c058728c 4840 threads_debug_printf ("lwp %ld", lwpid_of (thread));
d50171e4
PA
4841
4842 if (!lwp->stopped)
4843 {
c058728c 4844 threads_debug_printf (" LWP %ld already running", lwpid_of (thread));
e2b44075 4845 return;
d50171e4
PA
4846 }
4847
02fc4de7 4848 if (thread->last_resume_kind == resume_stop
183be222 4849 && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
d50171e4 4850 {
c058728c
SM
4851 threads_debug_printf (" client wants LWP to remain %ld stopped",
4852 lwpid_of (thread));
e2b44075 4853 return;
d50171e4
PA
4854 }
4855
4856 if (lwp->status_pending_p)
4857 {
c058728c
SM
4858 threads_debug_printf (" LWP %ld has pending status, leaving stopped",
4859 lwpid_of (thread));
e2b44075 4860 return;
d50171e4
PA
4861 }
4862
7984d532
PA
4863 gdb_assert (lwp->suspended >= 0);
4864
d50171e4
PA
4865 if (lwp->suspended)
4866 {
c058728c 4867 threads_debug_printf (" LWP %ld is suspended", lwpid_of (thread));
e2b44075 4868 return;
d50171e4
PA
4869 }
4870
1a981360 4871 if (thread->last_resume_kind == resume_stop
013e3554 4872 && lwp->pending_signals_to_report.empty ()
229d26fc
SM
4873 && (lwp->collecting_fast_tracepoint
4874 == fast_tpoint_collect_result::not_collecting))
02fc4de7
PA
4875 {
4876 /* We haven't reported this LWP as stopped yet (otherwise, the
4877 last_status.kind check above would catch it, and we wouldn't
4878 reach here. This LWP may have been momentarily paused by a
4879 stop_all_lwps call while handling for example, another LWP's
4880 step-over. In that case, the pending expected SIGSTOP signal
4881 that was queued at vCont;t handling time will have already
4882 been consumed by wait_for_sigstop, and so we need to requeue
4883 another one here. Note that if the LWP already has a SIGSTOP
4884 pending, this is a no-op. */
4885
c058728c
SM
4886 threads_debug_printf
4887 ("Client wants LWP %ld to stop. Making sure it has a SIGSTOP pending",
4888 lwpid_of (thread));
02fc4de7
PA
4889
4890 send_sigstop (lwp);
4891 }
4892
863d01bd
PA
4893 if (thread->last_resume_kind == resume_step)
4894 {
c058728c
SM
4895 threads_debug_printf (" stepping LWP %ld, client wants it stepping",
4896 lwpid_of (thread));
8901d193 4897
3b9a79ef 4898 /* If resume_step is requested by GDB, install single-step
8901d193 4899 breakpoints when the thread is about to be actually resumed if
3b9a79ef 4900 the single-step breakpoints weren't removed. */
7582c77c 4901 if (supports_software_single_step ()
3b9a79ef 4902 && !has_single_step_breakpoints (thread))
8901d193
YQ
4903 install_software_single_step_breakpoints (lwp);
4904
4905 step = maybe_hw_step (thread);
863d01bd
PA
4906 }
4907 else if (lwp->bp_reinsert != 0)
4908 {
c058728c
SM
4909 threads_debug_printf (" stepping LWP %ld, reinsert set",
4910 lwpid_of (thread));
f79b145d
YQ
4911
4912 step = maybe_hw_step (thread);
863d01bd
PA
4913 }
4914 else
4915 step = 0;
4916
df95181f 4917 resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
4918}
4919
df95181f
TBA
4920void
4921linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4922 lwp_info *except)
7984d532 4923{
d86d4aaf 4924 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
4925
4926 if (lwp == except)
e2b44075 4927 return;
7984d532 4928
863d01bd 4929 lwp_suspended_decr (lwp);
7984d532 4930
e2b44075 4931 proceed_one_lwp (thread, except);
d50171e4
PA
4932}
4933
d16f3f6c
TBA
4934void
4935linux_process_target::proceed_all_lwps ()
d50171e4 4936{
d86d4aaf 4937 struct thread_info *need_step_over;
d50171e4
PA
4938
4939 /* If there is a thread which would otherwise be resumed, which is
4940 stopped at a breakpoint that needs stepping over, then don't
4941 resume any threads - have it step over the breakpoint with all
4942 other threads stopped, then resume all threads again. */
4943
bf9ae9d8 4944 if (low_supports_breakpoints ())
d50171e4 4945 {
df95181f
TBA
4946 need_step_over = find_thread ([this] (thread_info *thread)
4947 {
4948 return thread_needs_step_over (thread);
4949 });
d50171e4
PA
4950
4951 if (need_step_over != NULL)
4952 {
c058728c
SM
4953 threads_debug_printf ("found thread %ld needing a step-over",
4954 lwpid_of (need_step_over));
d50171e4 4955
d86d4aaf 4956 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
4957 return;
4958 }
4959 }
5544ad89 4960
c058728c 4961 threads_debug_printf ("Proceeding, no step-over needed");
d50171e4 4962
df95181f 4963 for_each_thread ([this] (thread_info *thread)
e2b44075
SM
4964 {
4965 proceed_one_lwp (thread, NULL);
4966 });
d50171e4
PA
4967}
4968
d16f3f6c
TBA
4969void
4970linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
d50171e4 4971{
c058728c
SM
4972 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4973
4974 if (except)
4975 threads_debug_printf ("except=(LWP %ld)",
4976 lwpid_of (get_lwp_thread (except)));
4977 else
4978 threads_debug_printf ("except=nullptr");
5544ad89 4979
7984d532 4980 if (unsuspend)
e2b44075
SM
4981 for_each_thread ([&] (thread_info *thread)
4982 {
4983 unsuspend_and_proceed_one_lwp (thread, except);
4984 });
7984d532 4985 else
e2b44075
SM
4986 for_each_thread ([&] (thread_info *thread)
4987 {
4988 proceed_one_lwp (thread, except);
4989 });
0d62e5e8
DJ
4990}
4991
58caa3dc
DJ
4992
4993#ifdef HAVE_LINUX_REGSETS
4994
1faeff08
MR
4995#define use_linux_regsets 1
4996
030031ee
PA
4997/* Returns true if REGSET has been disabled. */
4998
4999static int
5000regset_disabled (struct regsets_info *info, struct regset_info *regset)
5001{
5002 return (info->disabled_regsets != NULL
5003 && info->disabled_regsets[regset - info->regsets]);
5004}
5005
5006/* Disable REGSET. */
5007
5008static void
5009disable_regset (struct regsets_info *info, struct regset_info *regset)
5010{
5011 int dr_offset;
5012
5013 dr_offset = regset - info->regsets;
5014 if (info->disabled_regsets == NULL)
224c3ddb 5015 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
030031ee
PA
5016 info->disabled_regsets[dr_offset] = 1;
5017}
5018
58caa3dc 5019static int
3aee8918
PA
5020regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5021 struct regcache *regcache)
58caa3dc
DJ
5022{
5023 struct regset_info *regset;
e9d25b98 5024 int saw_general_regs = 0;
95954743 5025 int pid;
1570b33e 5026 struct iovec iov;
58caa3dc 5027
0bfdf32f 5028 pid = lwpid_of (current_thread);
28eef672 5029 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5030 {
1570b33e
L
5031 void *buf, *data;
5032 int nt_type, res;
58caa3dc 5033
030031ee 5034 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 5035 continue;
58caa3dc 5036
bca929d3 5037 buf = xmalloc (regset->size);
1570b33e
L
5038
5039 nt_type = regset->nt_type;
5040 if (nt_type)
5041 {
5042 iov.iov_base = buf;
5043 iov.iov_len = regset->size;
5044 data = (void *) &iov;
5045 }
5046 else
5047 data = buf;
5048
dfb64f85 5049#ifndef __sparc__
f15f9948 5050 res = ptrace (regset->get_request, pid,
b8e1b30e 5051 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5052#else
1570b33e 5053 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5054#endif
58caa3dc
DJ
5055 if (res < 0)
5056 {
1ef53e6b
AH
5057 if (errno == EIO
5058 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5059 {
1ef53e6b
AH
5060 /* If we get EIO on a regset, or an EINVAL and the regset is
5061 optional, do not try it again for this process mode. */
030031ee 5062 disable_regset (regsets_info, regset);
58caa3dc 5063 }
e5a9158d
AA
5064 else if (errno == ENODATA)
5065 {
5066 /* ENODATA may be returned if the regset is currently
5067 not "active". This can happen in normal operation,
5068 so suppress the warning in this case. */
5069 }
fcd4a73d
YQ
5070 else if (errno == ESRCH)
5071 {
5072 /* At this point, ESRCH should mean the process is
5073 already gone, in which case we simply ignore attempts
5074 to read its registers. */
5075 }
58caa3dc
DJ
5076 else
5077 {
0d62e5e8 5078 char s[256];
95954743
PA
5079 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5080 pid);
0d62e5e8 5081 perror (s);
58caa3dc
DJ
5082 }
5083 }
098dbe61
AA
5084 else
5085 {
5086 if (regset->type == GENERAL_REGS)
5087 saw_general_regs = 1;
5088 regset->store_function (regcache, buf);
5089 }
fdeb2a12 5090 free (buf);
58caa3dc 5091 }
e9d25b98
DJ
5092 if (saw_general_regs)
5093 return 0;
5094 else
5095 return 1;
58caa3dc
DJ
5096}
5097
5098static int
3aee8918
PA
5099regsets_store_inferior_registers (struct regsets_info *regsets_info,
5100 struct regcache *regcache)
58caa3dc
DJ
5101{
5102 struct regset_info *regset;
e9d25b98 5103 int saw_general_regs = 0;
95954743 5104 int pid;
1570b33e 5105 struct iovec iov;
58caa3dc 5106
0bfdf32f 5107 pid = lwpid_of (current_thread);
28eef672 5108 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5109 {
1570b33e
L
5110 void *buf, *data;
5111 int nt_type, res;
58caa3dc 5112
feea5f36
AA
5113 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5114 || regset->fill_function == NULL)
28eef672 5115 continue;
58caa3dc 5116
bca929d3 5117 buf = xmalloc (regset->size);
545587ee
DJ
5118
5119 /* First fill the buffer with the current register set contents,
5120 in case there are any items in the kernel's regset that are
5121 not in gdbserver's regcache. */
1570b33e
L
5122
5123 nt_type = regset->nt_type;
5124 if (nt_type)
5125 {
5126 iov.iov_base = buf;
5127 iov.iov_len = regset->size;
5128 data = (void *) &iov;
5129 }
5130 else
5131 data = buf;
5132
dfb64f85 5133#ifndef __sparc__
f15f9948 5134 res = ptrace (regset->get_request, pid,
b8e1b30e 5135 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5136#else
689cc2ae 5137 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5138#endif
545587ee
DJ
5139
5140 if (res == 0)
5141 {
5142 /* Then overlay our cached registers on that. */
442ea881 5143 regset->fill_function (regcache, buf);
545587ee
DJ
5144
5145 /* Only now do we write the register set. */
dfb64f85 5146#ifndef __sparc__
f15f9948 5147 res = ptrace (regset->set_request, pid,
b8e1b30e 5148 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5149#else
1570b33e 5150 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 5151#endif
545587ee
DJ
5152 }
5153
58caa3dc
DJ
5154 if (res < 0)
5155 {
1ef53e6b
AH
5156 if (errno == EIO
5157 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5158 {
1ef53e6b
AH
5159 /* If we get EIO on a regset, or an EINVAL and the regset is
5160 optional, do not try it again for this process mode. */
030031ee 5161 disable_regset (regsets_info, regset);
58caa3dc 5162 }
3221518c
UW
5163 else if (errno == ESRCH)
5164 {
1b3f6016
PA
5165 /* At this point, ESRCH should mean the process is
5166 already gone, in which case we simply ignore attempts
5167 to change its registers. See also the related
df95181f 5168 comment in resume_one_lwp. */
fdeb2a12 5169 free (buf);
3221518c
UW
5170 return 0;
5171 }
58caa3dc
DJ
5172 else
5173 {
ce3a066d 5174 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
5175 }
5176 }
e9d25b98
DJ
5177 else if (regset->type == GENERAL_REGS)
5178 saw_general_regs = 1;
09ec9b38 5179 free (buf);
58caa3dc 5180 }
e9d25b98
DJ
5181 if (saw_general_regs)
5182 return 0;
5183 else
5184 return 1;
58caa3dc
DJ
5185}
5186
1faeff08 5187#else /* !HAVE_LINUX_REGSETS */
58caa3dc 5188
1faeff08 5189#define use_linux_regsets 0
3aee8918
PA
5190#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5191#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 5192
58caa3dc 5193#endif
1faeff08
MR
5194
5195/* Return 1 if register REGNO is supported by one of the regset ptrace
5196 calls or 0 if it has to be transferred individually. */
5197
5198static int
3aee8918 5199linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
5200{
5201 unsigned char mask = 1 << (regno % 8);
5202 size_t index = regno / 8;
5203
5204 return (use_linux_regsets
3aee8918
PA
5205 && (regs_info->regset_bitmap == NULL
5206 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
5207}
5208
58caa3dc 5209#ifdef HAVE_LINUX_USRREGS
1faeff08 5210
5b3da067 5211static int
3aee8918 5212register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
5213{
5214 int addr;
5215
3aee8918 5216 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
5217 error ("Invalid register number %d.", regnum);
5218
3aee8918 5219 addr = usrregs->regmap[regnum];
1faeff08
MR
5220
5221 return addr;
5222}
5223
daca57a7
TBA
5224
5225void
5226linux_process_target::fetch_register (const usrregs_info *usrregs,
5227 regcache *regcache, int regno)
1faeff08
MR
5228{
5229 CORE_ADDR regaddr;
5230 int i, size;
5231 char *buf;
5232 int pid;
5233
3aee8918 5234 if (regno >= usrregs->num_regs)
1faeff08 5235 return;
daca57a7 5236 if (low_cannot_fetch_register (regno))
1faeff08
MR
5237 return;
5238
3aee8918 5239 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5240 if (regaddr == -1)
5241 return;
5242
3aee8918
PA
5243 size = ((register_size (regcache->tdesc, regno)
5244 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5245 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5246 buf = (char *) alloca (size);
1faeff08 5247
0bfdf32f 5248 pid = lwpid_of (current_thread);
1faeff08
MR
5249 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5250 {
5251 errno = 0;
5252 *(PTRACE_XFER_TYPE *) (buf + i) =
5253 ptrace (PTRACE_PEEKUSER, pid,
5254 /* Coerce to a uintptr_t first to avoid potential gcc warning
5255 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5256 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
5257 regaddr += sizeof (PTRACE_XFER_TYPE);
5258 if (errno != 0)
9a70f35c
YQ
5259 {
5260 /* Mark register REGNO unavailable. */
5261 supply_register (regcache, regno, NULL);
5262 return;
5263 }
1faeff08
MR
5264 }
5265
b35db733 5266 low_supply_ptrace_register (regcache, regno, buf);
1faeff08
MR
5267}
5268
daca57a7
TBA
5269void
5270linux_process_target::store_register (const usrregs_info *usrregs,
5271 regcache *regcache, int regno)
1faeff08
MR
5272{
5273 CORE_ADDR regaddr;
5274 int i, size;
5275 char *buf;
5276 int pid;
5277
3aee8918 5278 if (regno >= usrregs->num_regs)
1faeff08 5279 return;
daca57a7 5280 if (low_cannot_store_register (regno))
1faeff08
MR
5281 return;
5282
3aee8918 5283 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5284 if (regaddr == -1)
5285 return;
5286
3aee8918
PA
5287 size = ((register_size (regcache->tdesc, regno)
5288 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5289 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5290 buf = (char *) alloca (size);
1faeff08
MR
5291 memset (buf, 0, size);
5292
b35db733 5293 low_collect_ptrace_register (regcache, regno, buf);
1faeff08 5294
0bfdf32f 5295 pid = lwpid_of (current_thread);
1faeff08
MR
5296 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5297 {
5298 errno = 0;
5299 ptrace (PTRACE_POKEUSER, pid,
5300 /* Coerce to a uintptr_t first to avoid potential gcc warning
5301 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5302 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5303 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
5304 if (errno != 0)
5305 {
5306 /* At this point, ESRCH should mean the process is
5307 already gone, in which case we simply ignore attempts
5308 to change its registers. See also the related
df95181f 5309 comment in resume_one_lwp. */
1faeff08
MR
5310 if (errno == ESRCH)
5311 return;
5312
daca57a7
TBA
5313
5314 if (!low_cannot_store_register (regno))
6d91ce9a 5315 error ("writing register %d: %s", regno, safe_strerror (errno));
1faeff08
MR
5316 }
5317 regaddr += sizeof (PTRACE_XFER_TYPE);
5318 }
5319}
daca57a7 5320#endif /* HAVE_LINUX_USRREGS */
1faeff08 5321
b35db733
TBA
5322void
5323linux_process_target::low_collect_ptrace_register (regcache *regcache,
5324 int regno, char *buf)
5325{
5326 collect_register (regcache, regno, buf);
5327}
5328
5329void
5330linux_process_target::low_supply_ptrace_register (regcache *regcache,
5331 int regno, const char *buf)
5332{
5333 supply_register (regcache, regno, buf);
5334}
5335
daca57a7
TBA
5336void
5337linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5338 regcache *regcache,
5339 int regno, int all)
1faeff08 5340{
daca57a7 5341#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5342 struct usrregs_info *usr = regs_info->usrregs;
5343
1faeff08
MR
5344 if (regno == -1)
5345 {
3aee8918
PA
5346 for (regno = 0; regno < usr->num_regs; regno++)
5347 if (all || !linux_register_in_regsets (regs_info, regno))
5348 fetch_register (usr, regcache, regno);
1faeff08
MR
5349 }
5350 else
3aee8918 5351 fetch_register (usr, regcache, regno);
daca57a7 5352#endif
1faeff08
MR
5353}
5354
daca57a7
TBA
5355void
5356linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5357 regcache *regcache,
5358 int regno, int all)
1faeff08 5359{
daca57a7 5360#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5361 struct usrregs_info *usr = regs_info->usrregs;
5362
1faeff08
MR
5363 if (regno == -1)
5364 {
3aee8918
PA
5365 for (regno = 0; regno < usr->num_regs; regno++)
5366 if (all || !linux_register_in_regsets (regs_info, regno))
5367 store_register (usr, regcache, regno);
1faeff08
MR
5368 }
5369 else
3aee8918 5370 store_register (usr, regcache, regno);
58caa3dc 5371#endif
daca57a7 5372}
1faeff08 5373
a5a4d4cd
TBA
5374void
5375linux_process_target::fetch_registers (regcache *regcache, int regno)
1faeff08
MR
5376{
5377 int use_regsets;
5378 int all = 0;
aa8d21c9 5379 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5380
5381 if (regno == -1)
5382 {
bd70b1f2 5383 if (regs_info->usrregs != NULL)
3aee8918 5384 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
bd70b1f2 5385 low_fetch_register (regcache, regno);
c14dfd32 5386
3aee8918
PA
5387 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5388 if (regs_info->usrregs != NULL)
5389 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
5390 }
5391 else
5392 {
bd70b1f2 5393 if (low_fetch_register (regcache, regno))
c14dfd32
PA
5394 return;
5395
3aee8918 5396 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5397 if (use_regsets)
3aee8918
PA
5398 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5399 regcache);
5400 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5401 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5402 }
58caa3dc
DJ
5403}
5404
a5a4d4cd
TBA
5405void
5406linux_process_target::store_registers (regcache *regcache, int regno)
58caa3dc 5407{
1faeff08
MR
5408 int use_regsets;
5409 int all = 0;
aa8d21c9 5410 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5411
5412 if (regno == -1)
5413 {
3aee8918
PA
5414 all = regsets_store_inferior_registers (regs_info->regsets_info,
5415 regcache);
5416 if (regs_info->usrregs != NULL)
5417 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
5418 }
5419 else
5420 {
3aee8918 5421 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5422 if (use_regsets)
3aee8918
PA
5423 all = regsets_store_inferior_registers (regs_info->regsets_info,
5424 regcache);
5425 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5426 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5427 }
58caa3dc
DJ
5428}
5429
bd70b1f2
TBA
5430bool
5431linux_process_target::low_fetch_register (regcache *regcache, int regno)
5432{
5433 return false;
5434}
da6d8c04 5435
e2558df3 5436/* A wrapper for the read_memory target op. */
da6d8c04 5437
c3e735a6 5438static int
f450004a 5439linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
e2558df3 5440{
52405d85 5441 return the_target->read_memory (memaddr, myaddr, len);
e2558df3
TBA
5442}
5443
e2558df3 5444
421490af
PA
5445/* Helper for read_memory/write_memory using /proc/PID/mem. Because
5446 we can use a single read/write call, this can be much more
5447 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
5448 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running threads.
5449 One an only one of READBUF and WRITEBUF is non-null. If READBUF is
5450 not null, then we're reading, otherwise we're writing. */
5451
5452static int
5453proc_xfer_memory (CORE_ADDR memaddr, unsigned char *readbuf,
5454 const gdb_byte *writebuf, int len)
da6d8c04 5455{
421490af 5456 gdb_assert ((readbuf == nullptr) != (writebuf == nullptr));
fd462a61 5457
421490af
PA
5458 process_info *proc = current_process ();
5459
5460 int fd = proc->priv->mem_fd;
5461 if (fd == -1)
5462 return EIO;
5463
5464 while (len > 0)
fd462a61 5465 {
4934b29e
MR
5466 int bytes;
5467
31a56a22
PA
5468 /* Use pread64/pwrite64 if available, since they save a syscall
5469 and can handle 64-bit offsets even on 32-bit platforms (for
5470 instance, SPARC debugging a SPARC64 application). But only
5471 use them if the offset isn't so high that when cast to off_t
5472 it'd be negative, as seen on SPARC64. pread64/pwrite64
5473 outright reject such offsets. lseek does not. */
fd462a61 5474#ifdef HAVE_PREAD64
31a56a22 5475 if ((off_t) memaddr >= 0)
421490af 5476 bytes = (readbuf != nullptr
31a56a22
PA
5477 ? pread64 (fd, readbuf, len, memaddr)
5478 : pwrite64 (fd, writebuf, len, memaddr));
5479 else
fd462a61 5480#endif
31a56a22
PA
5481 {
5482 bytes = -1;
5483 if (lseek (fd, memaddr, SEEK_SET) != -1)
5484 bytes = (readbuf != nullptr
5485 ? read (fd, readbuf, len)
5486 : write (fd, writebuf, len));
5487 }
fd462a61 5488
421490af
PA
5489 if (bytes < 0)
5490 return errno;
5491 else if (bytes == 0)
4934b29e 5492 {
421490af
PA
5493 /* EOF means the address space is gone, the whole process
5494 exited or execed. */
5495 return EIO;
4934b29e 5496 }
da6d8c04 5497
421490af
PA
5498 memaddr += bytes;
5499 if (readbuf != nullptr)
5500 readbuf += bytes;
5501 else
5502 writebuf += bytes;
5503 len -= bytes;
da6d8c04
DJ
5504 }
5505
421490af
PA
5506 return 0;
5507}
c3e735a6 5508
421490af
PA
5509int
5510linux_process_target::read_memory (CORE_ADDR memaddr,
5511 unsigned char *myaddr, int len)
5512{
5513 return proc_xfer_memory (memaddr, myaddr, nullptr, len);
da6d8c04
DJ
5514}
5515
93ae6fdc
PA
5516/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5517 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 5518 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 5519
e2558df3
TBA
5520int
5521linux_process_target::write_memory (CORE_ADDR memaddr,
5522 const unsigned char *myaddr, int len)
da6d8c04 5523{
0d62e5e8
DJ
5524 if (debug_threads)
5525 {
58d6951d 5526 /* Dump up to four bytes. */
bf47e248
PA
5527 char str[4 * 2 + 1];
5528 char *p = str;
5529 int dump = len < 4 ? len : 4;
5530
421490af 5531 for (int i = 0; i < dump; i++)
bf47e248
PA
5532 {
5533 sprintf (p, "%02x", myaddr[i]);
5534 p += 2;
5535 }
5536 *p = '\0';
5537
c058728c 5538 threads_debug_printf ("Writing %s to 0x%08lx in process %d",
421490af 5539 str, (long) memaddr, current_process ()->pid);
0d62e5e8
DJ
5540 }
5541
421490af 5542 return proc_xfer_memory (memaddr, nullptr, myaddr, len);
da6d8c04 5543}
2f2893d9 5544
2a31c7aa
TBA
5545void
5546linux_process_target::look_up_symbols ()
2f2893d9 5547{
0d62e5e8 5548#ifdef USE_THREAD_DB
95954743
PA
5549 struct process_info *proc = current_process ();
5550
fe978cb0 5551 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5552 return;
5553
9b4c5f87 5554 thread_db_init ();
0d62e5e8
DJ
5555#endif
5556}
5557
eb497a2a
TBA
5558void
5559linux_process_target::request_interrupt ()
e5379b03 5560{
78708b7c
PA
5561 /* Send a SIGINT to the process group. This acts just like the user
5562 typed a ^C on the controlling terminal. */
4c35c4c6
TV
5563 int res = ::kill (-signal_pid, SIGINT);
5564 if (res == -1)
5565 warning (_("Sending SIGINT to process group of pid %ld failed: %s"),
5566 signal_pid, safe_strerror (errno));
e5379b03
DJ
5567}
5568
eac215cc
TBA
5569bool
5570linux_process_target::supports_read_auxv ()
5571{
5572 return true;
5573}
5574
aa691b87
RM
5575/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5576 to debugger memory starting at MYADDR. */
5577
eac215cc 5578int
43e5fbd8
TJB
5579linux_process_target::read_auxv (int pid, CORE_ADDR offset,
5580 unsigned char *myaddr, unsigned int len)
aa691b87
RM
5581{
5582 char filename[PATH_MAX];
5583 int fd, n;
5584
6cebaf6e 5585 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5586
5587 fd = open (filename, O_RDONLY);
5588 if (fd < 0)
5589 return -1;
5590
5591 if (offset != (CORE_ADDR) 0
5592 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5593 n = -1;
5594 else
5595 n = read (fd, myaddr, len);
5596
5597 close (fd);
5598
5599 return n;
5600}
5601
7e0bde70
TBA
5602int
5603linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5604 int size, raw_breakpoint *bp)
e013ee27 5605{
c8f4bfdd
YQ
5606 if (type == raw_bkpt_type_sw)
5607 return insert_memory_breakpoint (bp);
e013ee27 5608 else
9db9aa23
TBA
5609 return low_insert_point (type, addr, size, bp);
5610}
5611
5612int
5613linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5614 int size, raw_breakpoint *bp)
5615{
5616 /* Unsupported (see target.h). */
5617 return 1;
e013ee27
OF
5618}
5619
7e0bde70
TBA
5620int
5621linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5622 int size, raw_breakpoint *bp)
e013ee27 5623{
c8f4bfdd
YQ
5624 if (type == raw_bkpt_type_sw)
5625 return remove_memory_breakpoint (bp);
e013ee27 5626 else
9db9aa23
TBA
5627 return low_remove_point (type, addr, size, bp);
5628}
5629
5630int
5631linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5632 int size, raw_breakpoint *bp)
5633{
5634 /* Unsupported (see target.h). */
5635 return 1;
e013ee27
OF
5636}
5637
84320c4e 5638/* Implement the stopped_by_sw_breakpoint target_ops
3e572f71
PA
5639 method. */
5640
84320c4e
TBA
5641bool
5642linux_process_target::stopped_by_sw_breakpoint ()
3e572f71
PA
5643{
5644 struct lwp_info *lwp = get_thread_lwp (current_thread);
5645
5646 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5647}
5648
84320c4e 5649/* Implement the supports_stopped_by_sw_breakpoint target_ops
3e572f71
PA
5650 method. */
5651
84320c4e
TBA
5652bool
5653linux_process_target::supports_stopped_by_sw_breakpoint ()
3e572f71
PA
5654{
5655 return USE_SIGTRAP_SIGINFO;
5656}
5657
93fe88b2 5658/* Implement the stopped_by_hw_breakpoint target_ops
3e572f71
PA
5659 method. */
5660
93fe88b2
TBA
5661bool
5662linux_process_target::stopped_by_hw_breakpoint ()
3e572f71
PA
5663{
5664 struct lwp_info *lwp = get_thread_lwp (current_thread);
5665
5666 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5667}
5668
93fe88b2 5669/* Implement the supports_stopped_by_hw_breakpoint target_ops
3e572f71
PA
5670 method. */
5671
93fe88b2
TBA
5672bool
5673linux_process_target::supports_stopped_by_hw_breakpoint ()
3e572f71
PA
5674{
5675 return USE_SIGTRAP_SIGINFO;
5676}
5677
70b90b91 5678/* Implement the supports_hardware_single_step target_ops method. */
45614f15 5679
22aa6223
TBA
5680bool
5681linux_process_target::supports_hardware_single_step ()
45614f15 5682{
b31cdfa6 5683 return true;
45614f15
YQ
5684}
5685
6eeb5c55
TBA
5686bool
5687linux_process_target::stopped_by_watchpoint ()
e013ee27 5688{
0bfdf32f 5689 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 5690
15c66dd6 5691 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
5692}
5693
6eeb5c55
TBA
5694CORE_ADDR
5695linux_process_target::stopped_data_address ()
e013ee27 5696{
0bfdf32f 5697 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
5698
5699 return lwp->stopped_data_address;
e013ee27
OF
5700}
5701
db0dfaa0
LM
5702/* This is only used for targets that define PT_TEXT_ADDR,
5703 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5704 the target has different ways of acquiring this information, like
5705 loadmaps. */
52fb6437 5706
5203ae1e
TBA
5707bool
5708linux_process_target::supports_read_offsets ()
5709{
5710#ifdef SUPPORTS_READ_OFFSETS
5711 return true;
5712#else
5713 return false;
5714#endif
5715}
5716
52fb6437
NS
5717/* Under uClinux, programs are loaded at non-zero offsets, which we need
5718 to tell gdb about. */
5719
5203ae1e
TBA
5720int
5721linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
52fb6437 5722{
5203ae1e 5723#ifdef SUPPORTS_READ_OFFSETS
52fb6437 5724 unsigned long text, text_end, data;
62828379 5725 int pid = lwpid_of (current_thread);
52fb6437
NS
5726
5727 errno = 0;
5728
b8e1b30e
LM
5729 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5730 (PTRACE_TYPE_ARG4) 0);
5731 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5732 (PTRACE_TYPE_ARG4) 0);
5733 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5734 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
5735
5736 if (errno == 0)
5737 {
5738 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
5739 used by gdb) are relative to the beginning of the program,
5740 with the data segment immediately following the text segment.
5741 However, the actual runtime layout in memory may put the data
5742 somewhere else, so when we send gdb a data base-address, we
5743 use the real data base address and subtract the compile-time
5744 data base-address from it (which is just the length of the
5745 text segment). BSS immediately follows data in both
5746 cases. */
52fb6437
NS
5747 *text_p = text;
5748 *data_p = data - (text_end - text);
1b3f6016 5749
52fb6437
NS
5750 return 1;
5751 }
5203ae1e
TBA
5752 return 0;
5753#else
5754 gdb_assert_not_reached ("target op read_offsets not supported");
52fb6437 5755#endif
5203ae1e 5756}
52fb6437 5757
6e3fd7e9
TBA
5758bool
5759linux_process_target::supports_get_tls_address ()
5760{
5761#ifdef USE_THREAD_DB
5762 return true;
5763#else
5764 return false;
5765#endif
5766}
5767
5768int
5769linux_process_target::get_tls_address (thread_info *thread,
5770 CORE_ADDR offset,
5771 CORE_ADDR load_module,
5772 CORE_ADDR *address)
5773{
5774#ifdef USE_THREAD_DB
5775 return thread_db_get_tls_address (thread, offset, load_module, address);
5776#else
5777 return -1;
5778#endif
5779}
5780
2d0795ee
TBA
5781bool
5782linux_process_target::supports_qxfer_osdata ()
5783{
5784 return true;
5785}
5786
5787int
5788linux_process_target::qxfer_osdata (const char *annex,
5789 unsigned char *readbuf,
5790 unsigned const char *writebuf,
5791 CORE_ADDR offset, int len)
07e059b5 5792{
d26e3629 5793 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
5794}
5795
cb63de7c
TBA
5796void
5797linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5798 gdb_byte *inf_siginfo, int direction)
d0722149 5799{
cb63de7c 5800 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
d0722149
DE
5801
5802 /* If there was no callback, or the callback didn't do anything,
5803 then just do a straight memcpy. */
5804 if (!done)
5805 {
5806 if (direction == 1)
a5362b9a 5807 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 5808 else
a5362b9a 5809 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
5810 }
5811}
5812
cb63de7c
TBA
5813bool
5814linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5815 int direction)
5816{
5817 return false;
5818}
5819
d7abedf7
TBA
5820bool
5821linux_process_target::supports_qxfer_siginfo ()
5822{
5823 return true;
5824}
5825
5826int
5827linux_process_target::qxfer_siginfo (const char *annex,
5828 unsigned char *readbuf,
5829 unsigned const char *writebuf,
5830 CORE_ADDR offset, int len)
4aa995e1 5831{
d0722149 5832 int pid;
a5362b9a 5833 siginfo_t siginfo;
8adce034 5834 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1 5835
0bfdf32f 5836 if (current_thread == NULL)
4aa995e1
PA
5837 return -1;
5838
0bfdf32f 5839 pid = lwpid_of (current_thread);
4aa995e1 5840
c058728c
SM
5841 threads_debug_printf ("%s siginfo for lwp %d.",
5842 readbuf != NULL ? "Reading" : "Writing",
5843 pid);
4aa995e1 5844
0adea5f7 5845 if (offset >= sizeof (siginfo))
4aa995e1
PA
5846 return -1;
5847
b8e1b30e 5848 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5849 return -1;
5850
d0722149
DE
5851 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5852 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5853 inferior with a 64-bit GDBSERVER should look the same as debugging it
5854 with a 32-bit GDBSERVER, we need to convert it. */
5855 siginfo_fixup (&siginfo, inf_siginfo, 0);
5856
4aa995e1
PA
5857 if (offset + len > sizeof (siginfo))
5858 len = sizeof (siginfo) - offset;
5859
5860 if (readbuf != NULL)
d0722149 5861 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
5862 else
5863 {
d0722149
DE
5864 memcpy (inf_siginfo + offset, writebuf, len);
5865
5866 /* Convert back to ptrace layout before flushing it out. */
5867 siginfo_fixup (&siginfo, inf_siginfo, 1);
5868
b8e1b30e 5869 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5870 return -1;
5871 }
5872
5873 return len;
5874}
5875
bd99dc85
PA
5876/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5877 so we notice when children change state; as the handler for the
5878 sigsuspend in my_waitpid. */
5879
5880static void
5881sigchld_handler (int signo)
5882{
5883 int old_errno = errno;
5884
5885 if (debug_threads)
e581f2b4
PA
5886 {
5887 do
5888 {
a7e559cc
AH
5889 /* Use the async signal safe debug function. */
5890 if (debug_write ("sigchld_handler\n",
5891 sizeof ("sigchld_handler\n") - 1) < 0)
e581f2b4
PA
5892 break; /* just ignore */
5893 } while (0);
5894 }
bd99dc85
PA
5895
5896 if (target_is_async_p ())
5897 async_file_mark (); /* trigger a linux_wait */
5898
5899 errno = old_errno;
5900}
5901
0dc587d4
TBA
5902bool
5903linux_process_target::supports_non_stop ()
bd99dc85 5904{
0dc587d4 5905 return true;
bd99dc85
PA
5906}
5907
0dc587d4
TBA
5908bool
5909linux_process_target::async (bool enable)
bd99dc85 5910{
0dc587d4 5911 bool previous = target_is_async_p ();
bd99dc85 5912
c058728c
SM
5913 threads_debug_printf ("async (%d), previous=%d",
5914 enable, previous);
8336d594 5915
bd99dc85
PA
5916 if (previous != enable)
5917 {
5918 sigset_t mask;
5919 sigemptyset (&mask);
5920 sigaddset (&mask, SIGCHLD);
5921
21987b9c 5922 gdb_sigmask (SIG_BLOCK, &mask, NULL);
bd99dc85
PA
5923
5924 if (enable)
5925 {
8674f082 5926 if (!linux_event_pipe.open_pipe ())
aa96c426 5927 {
21987b9c 5928 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
aa96c426
GB
5929
5930 warning ("creating event pipe failed.");
5931 return previous;
5932 }
bd99dc85 5933
bd99dc85 5934 /* Register the event loop handler. */
cdc8e9b2 5935 add_file_handler (linux_event_pipe.event_fd (),
2554f6f5
SM
5936 handle_target_event, NULL,
5937 "linux-low");
bd99dc85
PA
5938
5939 /* Always trigger a linux_wait. */
5940 async_file_mark ();
5941 }
5942 else
5943 {
cdc8e9b2 5944 delete_file_handler (linux_event_pipe.event_fd ());
bd99dc85 5945
8674f082 5946 linux_event_pipe.close_pipe ();
bd99dc85
PA
5947 }
5948
21987b9c 5949 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
bd99dc85
PA
5950 }
5951
5952 return previous;
5953}
5954
0dc587d4
TBA
5955int
5956linux_process_target::start_non_stop (bool nonstop)
bd99dc85
PA
5957{
5958 /* Register or unregister from event-loop accordingly. */
0dc587d4 5959 target_async (nonstop);
aa96c426 5960
0dc587d4 5961 if (target_is_async_p () != (nonstop != false))
aa96c426
GB
5962 return -1;
5963
bd99dc85
PA
5964 return 0;
5965}
5966
652aef77
TBA
5967bool
5968linux_process_target::supports_multi_process ()
cf8fd78b 5969{
652aef77 5970 return true;
cf8fd78b
PA
5971}
5972
89245bc0
DB
5973/* Check if fork events are supported. */
5974
9690a72a
TBA
5975bool
5976linux_process_target::supports_fork_events ()
89245bc0 5977{
a2885186 5978 return true;
89245bc0
DB
5979}
5980
5981/* Check if vfork events are supported. */
5982
9690a72a
TBA
5983bool
5984linux_process_target::supports_vfork_events ()
89245bc0 5985{
a2885186 5986 return true;
89245bc0
DB
5987}
5988
393a6b59
PA
5989/* Return the set of supported thread options. */
5990
5991gdb_thread_options
5992linux_process_target::supported_thread_options ()
5993{
48989498 5994 return GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
393a6b59
PA
5995}
5996
94585166
DB
5997/* Check if exec events are supported. */
5998
9690a72a
TBA
5999bool
6000linux_process_target::supports_exec_events ()
94585166 6001{
a2885186 6002 return true;
94585166
DB
6003}
6004
de0d863e
DB
6005/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6006 ptrace flags for all inferiors. This is in case the new GDB connection
6007 doesn't support the same set of events that the previous one did. */
6008
fb00dfce
TBA
6009void
6010linux_process_target::handle_new_gdb_connection ()
de0d863e 6011{
de0d863e 6012 /* Request that all the lwps reset their ptrace options. */
bbf550d5
SM
6013 for_each_thread ([] (thread_info *thread)
6014 {
6015 struct lwp_info *lwp = get_thread_lwp (thread);
6016
6017 if (!lwp->stopped)
6018 {
6019 /* Stop the lwp so we can modify its ptrace options. */
6020 lwp->must_set_ptrace_flags = 1;
6021 linux_stop_lwp (lwp);
6022 }
6023 else
6024 {
6025 /* Already stopped; go ahead and set the ptrace options. */
6026 struct process_info *proc = find_process_pid (pid_of (thread));
6027 int options = linux_low_ptrace_options (proc->attached);
6028
6029 linux_enable_event_reporting (lwpid_of (thread), options);
6030 lwp->must_set_ptrace_flags = 0;
6031 }
6032 });
de0d863e
DB
6033}
6034
55cf3021
TBA
6035int
6036linux_process_target::handle_monitor_command (char *mon)
6037{
6038#ifdef USE_THREAD_DB
6039 return thread_db_handle_monitor_command (mon);
6040#else
6041 return 0;
6042#endif
6043}
6044
95a45fc1
TBA
6045int
6046linux_process_target::core_of_thread (ptid_t ptid)
6047{
6048 return linux_common_core_of_thread (ptid);
6049}
6050
c756403b
TBA
6051bool
6052linux_process_target::supports_disable_randomization ()
03583c20 6053{
c756403b 6054 return true;
03583c20 6055}
efcbbd14 6056
c0245cb9
TBA
6057bool
6058linux_process_target::supports_agent ()
d1feda86 6059{
c0245cb9 6060 return true;
d1feda86
YQ
6061}
6062
2526e0cd
TBA
6063bool
6064linux_process_target::supports_range_stepping ()
c2d6af84 6065{
7582c77c 6066 if (supports_software_single_step ())
2526e0cd 6067 return true;
c2d6af84 6068
9cfd8715
TBA
6069 return low_supports_range_stepping ();
6070}
6071
6072bool
6073linux_process_target::low_supports_range_stepping ()
6074{
6075 return false;
c2d6af84
PA
6076}
6077
8247b823
TBA
6078bool
6079linux_process_target::supports_pid_to_exec_file ()
6080{
6081 return true;
6082}
6083
04977957 6084const char *
8247b823
TBA
6085linux_process_target::pid_to_exec_file (int pid)
6086{
6087 return linux_proc_pid_to_exec_file (pid);
6088}
6089
c9b7b804
TBA
6090bool
6091linux_process_target::supports_multifs ()
6092{
6093 return true;
6094}
6095
6096int
6097linux_process_target::multifs_open (int pid, const char *filename,
6098 int flags, mode_t mode)
6099{
6100 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6101}
6102
6103int
6104linux_process_target::multifs_unlink (int pid, const char *filename)
6105{
6106 return linux_mntns_unlink (pid, filename);
6107}
6108
6109ssize_t
6110linux_process_target::multifs_readlink (int pid, const char *filename,
6111 char *buf, size_t bufsiz)
6112{
6113 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6114}
6115
723b724b 6116#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
6117struct target_loadseg
6118{
6119 /* Core address to which the segment is mapped. */
6120 Elf32_Addr addr;
6121 /* VMA recorded in the program header. */
6122 Elf32_Addr p_vaddr;
6123 /* Size of this segment in memory. */
6124 Elf32_Word p_memsz;
6125};
6126
723b724b 6127# if defined PT_GETDSBT
78d85199
YQ
6128struct target_loadmap
6129{
6130 /* Protocol version number, must be zero. */
6131 Elf32_Word version;
6132 /* Pointer to the DSBT table, its size, and the DSBT index. */
6133 unsigned *dsbt_table;
6134 unsigned dsbt_size, dsbt_index;
6135 /* Number of segments in this map. */
6136 Elf32_Word nsegs;
6137 /* The actual memory map. */
6138 struct target_loadseg segs[/*nsegs*/];
6139};
723b724b
MF
6140# define LINUX_LOADMAP PT_GETDSBT
6141# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6142# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6143# else
6144struct target_loadmap
6145{
6146 /* Protocol version number, must be zero. */
6147 Elf32_Half version;
6148 /* Number of segments in this map. */
6149 Elf32_Half nsegs;
6150 /* The actual memory map. */
6151 struct target_loadseg segs[/*nsegs*/];
6152};
6153# define LINUX_LOADMAP PTRACE_GETFDPIC
6154# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6155# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6156# endif
78d85199 6157
9da41fda
TBA
6158bool
6159linux_process_target::supports_read_loadmap ()
6160{
6161 return true;
6162}
6163
6164int
6165linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6166 unsigned char *myaddr, unsigned int len)
78d85199 6167{
0bfdf32f 6168 int pid = lwpid_of (current_thread);
78d85199
YQ
6169 int addr = -1;
6170 struct target_loadmap *data = NULL;
6171 unsigned int actual_length, copy_length;
6172
6173 if (strcmp (annex, "exec") == 0)
723b724b 6174 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 6175 else if (strcmp (annex, "interp") == 0)
723b724b 6176 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
6177 else
6178 return -1;
6179
723b724b 6180 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
6181 return -1;
6182
6183 if (data == NULL)
6184 return -1;
6185
6186 actual_length = sizeof (struct target_loadmap)
6187 + sizeof (struct target_loadseg) * data->nsegs;
6188
6189 if (offset < 0 || offset > actual_length)
6190 return -1;
6191
6192 copy_length = actual_length - offset < len ? actual_length - offset : len;
6193 memcpy (myaddr, (char *) data + offset, copy_length);
6194 return copy_length;
6195}
723b724b 6196#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 6197
bc8d3ae4
TBA
6198bool
6199linux_process_target::supports_catch_syscall ()
82075af2 6200{
a2885186 6201 return low_supports_catch_syscall ();
82075af2
JS
6202}
6203
9eedd27d
TBA
6204bool
6205linux_process_target::low_supports_catch_syscall ()
6206{
6207 return false;
6208}
6209
770d8f6a
TBA
6210CORE_ADDR
6211linux_process_target::read_pc (regcache *regcache)
219f2f23 6212{
bf9ae9d8 6213 if (!low_supports_breakpoints ())
219f2f23
PA
6214 return 0;
6215
bf9ae9d8 6216 return low_get_pc (regcache);
219f2f23
PA
6217}
6218
770d8f6a
TBA
6219void
6220linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
219f2f23 6221{
bf9ae9d8 6222 gdb_assert (low_supports_breakpoints ());
219f2f23 6223
bf9ae9d8 6224 low_set_pc (regcache, pc);
219f2f23
PA
6225}
6226
68119632
TBA
6227bool
6228linux_process_target::supports_thread_stopped ()
6229{
6230 return true;
6231}
6232
6233bool
6234linux_process_target::thread_stopped (thread_info *thread)
8336d594
PA
6235{
6236 return get_thread_lwp (thread)->stopped;
6237}
6238
ef980d65
PA
6239bool
6240linux_process_target::any_resumed ()
6241{
6242 bool any_resumed;
6243
6244 auto status_pending_p_any = [&] (thread_info *thread)
6245 {
6246 return status_pending_p_callback (thread, minus_one_ptid);
6247 };
6248
6249 auto not_stopped = [&] (thread_info *thread)
6250 {
6251 return not_stopped_callback (thread, minus_one_ptid);
6252 };
6253
6254 /* Find a resumed LWP, if any. */
6255 if (find_thread (status_pending_p_any) != NULL)
6256 any_resumed = 1;
6257 else if (find_thread (not_stopped) != NULL)
6258 any_resumed = 1;
6259 else
6260 any_resumed = 0;
6261
6262 return any_resumed;
6263}
6264
8336d594
PA
6265/* This exposes stop-all-threads functionality to other modules. */
6266
29e8dc09
TBA
6267void
6268linux_process_target::pause_all (bool freeze)
8336d594 6269{
7984d532
PA
6270 stop_all_lwps (freeze, NULL);
6271}
6272
6273/* This exposes unstop-all-threads functionality to other gdbserver
6274 modules. */
6275
29e8dc09
TBA
6276void
6277linux_process_target::unpause_all (bool unfreeze)
7984d532
PA
6278{
6279 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
6280}
6281
2268b414
JK
6282/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6283
6284static int
6285get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6286 CORE_ADDR *phdr_memaddr, int *num_phdr)
6287{
6288 char filename[PATH_MAX];
6289 int fd;
6290 const int auxv_size = is_elf64
6291 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6292 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6293
6294 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6295
6296 fd = open (filename, O_RDONLY);
6297 if (fd < 0)
6298 return 1;
6299
6300 *phdr_memaddr = 0;
6301 *num_phdr = 0;
6302 while (read (fd, buf, auxv_size) == auxv_size
6303 && (*phdr_memaddr == 0 || *num_phdr == 0))
6304 {
6305 if (is_elf64)
6306 {
6307 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6308
6309 switch (aux->a_type)
6310 {
6311 case AT_PHDR:
6312 *phdr_memaddr = aux->a_un.a_val;
6313 break;
6314 case AT_PHNUM:
6315 *num_phdr = aux->a_un.a_val;
6316 break;
6317 }
6318 }
6319 else
6320 {
6321 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6322
6323 switch (aux->a_type)
6324 {
6325 case AT_PHDR:
6326 *phdr_memaddr = aux->a_un.a_val;
6327 break;
6328 case AT_PHNUM:
6329 *num_phdr = aux->a_un.a_val;
6330 break;
6331 }
6332 }
6333 }
6334
6335 close (fd);
6336
6337 if (*phdr_memaddr == 0 || *num_phdr == 0)
6338 {
6339 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6340 "phdr_memaddr = %ld, phdr_num = %d",
6341 (long) *phdr_memaddr, *num_phdr);
6342 return 2;
6343 }
6344
6345 return 0;
6346}
6347
6348/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6349
6350static CORE_ADDR
6351get_dynamic (const int pid, const int is_elf64)
6352{
6353 CORE_ADDR phdr_memaddr, relocation;
db1ff28b 6354 int num_phdr, i;
2268b414 6355 unsigned char *phdr_buf;
db1ff28b 6356 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
2268b414
JK
6357
6358 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6359 return 0;
6360
6361 gdb_assert (num_phdr < 100); /* Basic sanity check. */
224c3ddb 6362 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
2268b414
JK
6363
6364 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6365 return 0;
6366
6367 /* Compute relocation: it is expected to be 0 for "regular" executables,
6368 non-zero for PIE ones. */
6369 relocation = -1;
db1ff28b
JK
6370 for (i = 0; relocation == -1 && i < num_phdr; i++)
6371 if (is_elf64)
6372 {
6373 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6374
6375 if (p->p_type == PT_PHDR)
6376 relocation = phdr_memaddr - p->p_vaddr;
6377 }
6378 else
6379 {
6380 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6381
6382 if (p->p_type == PT_PHDR)
6383 relocation = phdr_memaddr - p->p_vaddr;
6384 }
6385
2268b414
JK
6386 if (relocation == -1)
6387 {
e237a7e2
JK
6388 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6389 any real world executables, including PIE executables, have always
6390 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6391 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6392 or present DT_DEBUG anyway (fpc binaries are statically linked).
6393
6394 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6395
6396 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6397
2268b414
JK
6398 return 0;
6399 }
6400
db1ff28b
JK
6401 for (i = 0; i < num_phdr; i++)
6402 {
6403 if (is_elf64)
6404 {
6405 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6406
6407 if (p->p_type == PT_DYNAMIC)
6408 return p->p_vaddr + relocation;
6409 }
6410 else
6411 {
6412 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
2268b414 6413
db1ff28b
JK
6414 if (p->p_type == PT_DYNAMIC)
6415 return p->p_vaddr + relocation;
6416 }
6417 }
2268b414
JK
6418
6419 return 0;
6420}
6421
6422/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
6423 can be 0 if the inferior does not yet have the library list initialized.
6424 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6425 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
6426
6427static CORE_ADDR
6428get_r_debug (const int pid, const int is_elf64)
6429{
6430 CORE_ADDR dynamic_memaddr;
6431 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6432 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 6433 CORE_ADDR map = -1;
2268b414
JK
6434
6435 dynamic_memaddr = get_dynamic (pid, is_elf64);
6436 if (dynamic_memaddr == 0)
367ba2c2 6437 return map;
2268b414
JK
6438
6439 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6440 {
6441 if (is_elf64)
6442 {
6443 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
a738da3a 6444#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6445 union
6446 {
6447 Elf64_Xword map;
6448 unsigned char buf[sizeof (Elf64_Xword)];
6449 }
6450 rld_map;
a738da3a
MF
6451#endif
6452#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6453 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6454 {
6455 if (linux_read_memory (dyn->d_un.d_val,
6456 rld_map.buf, sizeof (rld_map.buf)) == 0)
6457 return rld_map.map;
6458 else
6459 break;
6460 }
75f62ce7 6461#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6462#ifdef DT_MIPS_RLD_MAP_REL
6463 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6464 {
6465 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6466 rld_map.buf, sizeof (rld_map.buf)) == 0)
6467 return rld_map.map;
6468 else
6469 break;
6470 }
6471#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6472
367ba2c2
MR
6473 if (dyn->d_tag == DT_DEBUG && map == -1)
6474 map = dyn->d_un.d_val;
2268b414
JK
6475
6476 if (dyn->d_tag == DT_NULL)
6477 break;
6478 }
6479 else
6480 {
6481 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
a738da3a 6482#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6483 union
6484 {
6485 Elf32_Word map;
6486 unsigned char buf[sizeof (Elf32_Word)];
6487 }
6488 rld_map;
a738da3a
MF
6489#endif
6490#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6491 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6492 {
6493 if (linux_read_memory (dyn->d_un.d_val,
6494 rld_map.buf, sizeof (rld_map.buf)) == 0)
6495 return rld_map.map;
6496 else
6497 break;
6498 }
75f62ce7 6499#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6500#ifdef DT_MIPS_RLD_MAP_REL
6501 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6502 {
6503 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6504 rld_map.buf, sizeof (rld_map.buf)) == 0)
6505 return rld_map.map;
6506 else
6507 break;
6508 }
6509#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6510
367ba2c2
MR
6511 if (dyn->d_tag == DT_DEBUG && map == -1)
6512 map = dyn->d_un.d_val;
2268b414
JK
6513
6514 if (dyn->d_tag == DT_NULL)
6515 break;
6516 }
6517
6518 dynamic_memaddr += dyn_size;
6519 }
6520
367ba2c2 6521 return map;
2268b414
JK
6522}
6523
6524/* Read one pointer from MEMADDR in the inferior. */
6525
6526static int
6527read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6528{
485f1ee4
PA
6529 int ret;
6530
6531 /* Go through a union so this works on either big or little endian
6532 hosts, when the inferior's pointer size is smaller than the size
6533 of CORE_ADDR. It is assumed the inferior's endianness is the
6534 same of the superior's. */
6535 union
6536 {
6537 CORE_ADDR core_addr;
6538 unsigned int ui;
6539 unsigned char uc;
6540 } addr;
6541
6542 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6543 if (ret == 0)
6544 {
6545 if (ptr_size == sizeof (CORE_ADDR))
6546 *ptr = addr.core_addr;
6547 else if (ptr_size == sizeof (unsigned int))
6548 *ptr = addr.ui;
6549 else
6550 gdb_assert_not_reached ("unhandled pointer size");
6551 }
6552 return ret;
2268b414
JK
6553}
6554
974387bb
TBA
6555bool
6556linux_process_target::supports_qxfer_libraries_svr4 ()
6557{
6558 return true;
6559}
6560
2268b414
JK
6561struct link_map_offsets
6562 {
6563 /* Offset and size of r_debug.r_version. */
6564 int r_version_offset;
6565
6566 /* Offset and size of r_debug.r_map. */
6567 int r_map_offset;
6568
8d56636a
MM
6569 /* Offset of r_debug_extended.r_next. */
6570 int r_next_offset;
6571
2268b414
JK
6572 /* Offset to l_addr field in struct link_map. */
6573 int l_addr_offset;
6574
6575 /* Offset to l_name field in struct link_map. */
6576 int l_name_offset;
6577
6578 /* Offset to l_ld field in struct link_map. */
6579 int l_ld_offset;
6580
6581 /* Offset to l_next field in struct link_map. */
6582 int l_next_offset;
6583
6584 /* Offset to l_prev field in struct link_map. */
6585 int l_prev_offset;
6586 };
6587
8d56636a
MM
6588static const link_map_offsets lmo_32bit_offsets =
6589 {
6590 0, /* r_version offset. */
6591 4, /* r_debug.r_map offset. */
6592 20, /* r_debug_extended.r_next. */
6593 0, /* l_addr offset in link_map. */
6594 4, /* l_name offset in link_map. */
6595 8, /* l_ld offset in link_map. */
6596 12, /* l_next offset in link_map. */
6597 16 /* l_prev offset in link_map. */
6598 };
6599
6600static const link_map_offsets lmo_64bit_offsets =
6601 {
6602 0, /* r_version offset. */
6603 8, /* r_debug.r_map offset. */
6604 40, /* r_debug_extended.r_next. */
6605 0, /* l_addr offset in link_map. */
6606 8, /* l_name offset in link_map. */
6607 16, /* l_ld offset in link_map. */
6608 24, /* l_next offset in link_map. */
6609 32 /* l_prev offset in link_map. */
6610 };
6611
6612/* Get the loaded shared libraries from one namespace. */
6613
6614static void
2733d9d5
MM
6615read_link_map (std::string &document, CORE_ADDR lmid, CORE_ADDR lm_addr,
6616 CORE_ADDR lm_prev, int ptr_size, const link_map_offsets *lmo)
8d56636a
MM
6617{
6618 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6619
6620 while (lm_addr
6621 && read_one_ptr (lm_addr + lmo->l_name_offset,
6622 &l_name, ptr_size) == 0
6623 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6624 &l_addr, ptr_size) == 0
6625 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6626 &l_ld, ptr_size) == 0
6627 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6628 &l_prev, ptr_size) == 0
6629 && read_one_ptr (lm_addr + lmo->l_next_offset,
6630 &l_next, ptr_size) == 0)
6631 {
6632 unsigned char libname[PATH_MAX];
6633
6634 if (lm_prev != l_prev)
6635 {
6636 warning ("Corrupted shared library list: 0x%s != 0x%s",
6637 paddress (lm_prev), paddress (l_prev));
6638 break;
6639 }
6640
ad10f44e
MM
6641 /* Not checking for error because reading may stop before we've got
6642 PATH_MAX worth of characters. */
6643 libname[0] = '\0';
6644 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6645 libname[sizeof (libname) - 1] = '\0';
6646 if (libname[0] != '\0')
8d56636a 6647 {
ad10f44e 6648 string_appendf (document, "<library name=\"");
de75275f 6649 xml_escape_text_append (document, (char *) libname);
ad10f44e 6650 string_appendf (document, "\" lm=\"0x%s\" l_addr=\"0x%s\" "
2733d9d5 6651 "l_ld=\"0x%s\" lmid=\"0x%s\"/>",
ad10f44e 6652 paddress (lm_addr), paddress (l_addr),
2733d9d5 6653 paddress (l_ld), paddress (lmid));
8d56636a
MM
6654 }
6655
6656 lm_prev = lm_addr;
6657 lm_addr = l_next;
6658 }
6659}
6660
fb723180 6661/* Construct qXfer:libraries-svr4:read reply. */
2268b414 6662
974387bb
TBA
6663int
6664linux_process_target::qxfer_libraries_svr4 (const char *annex,
6665 unsigned char *readbuf,
6666 unsigned const char *writebuf,
6667 CORE_ADDR offset, int len)
2268b414 6668{
fe978cb0 6669 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
6670 char filename[PATH_MAX];
6671 int pid, is_elf64;
214d508e 6672 unsigned int machine;
2733d9d5 6673 CORE_ADDR lmid = 0, lm_addr = 0, lm_prev = 0;
2268b414
JK
6674
6675 if (writebuf != NULL)
6676 return -2;
6677 if (readbuf == NULL)
6678 return -1;
6679
0bfdf32f 6680 pid = lwpid_of (current_thread);
2268b414 6681 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 6682 is_elf64 = elf_64_file_p (filename, &machine);
8d56636a
MM
6683 const link_map_offsets *lmo;
6684 int ptr_size;
6685 if (is_elf64)
6686 {
6687 lmo = &lmo_64bit_offsets;
6688 ptr_size = 8;
6689 }
6690 else
6691 {
6692 lmo = &lmo_32bit_offsets;
6693 ptr_size = 4;
6694 }
2268b414 6695
b1fbec62
GB
6696 while (annex[0] != '\0')
6697 {
6698 const char *sep;
6699 CORE_ADDR *addrp;
da4ae14a 6700 int name_len;
2268b414 6701
b1fbec62
GB
6702 sep = strchr (annex, '=');
6703 if (sep == NULL)
6704 break;
0c5bf5a9 6705
da4ae14a 6706 name_len = sep - annex;
2733d9d5
MM
6707 if (name_len == 4 && startswith (annex, "lmid"))
6708 addrp = &lmid;
6709 else if (name_len == 5 && startswith (annex, "start"))
b1fbec62 6710 addrp = &lm_addr;
da4ae14a 6711 else if (name_len == 4 && startswith (annex, "prev"))
b1fbec62
GB
6712 addrp = &lm_prev;
6713 else
6714 {
6715 annex = strchr (sep, ';');
6716 if (annex == NULL)
6717 break;
6718 annex++;
6719 continue;
6720 }
6721
6722 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 6723 }
b1fbec62 6724
8d56636a
MM
6725 std::string document = "<library-list-svr4 version=\"1.0\"";
6726
6727 /* When the starting LM_ADDR is passed in the annex, only traverse that
2733d9d5 6728 namespace, which is assumed to be identified by LMID.
8d56636a
MM
6729
6730 Otherwise, start with R_DEBUG and traverse all namespaces we find. */
6731 if (lm_addr != 0)
ad10f44e
MM
6732 {
6733 document += ">";
2733d9d5 6734 read_link_map (document, lmid, lm_addr, lm_prev, ptr_size, lmo);
ad10f44e 6735 }
8d56636a 6736 else
2268b414 6737 {
8d56636a
MM
6738 if (lm_prev != 0)
6739 warning ("ignoring prev=0x%s without start", paddress (lm_prev));
b1fbec62 6740
2733d9d5
MM
6741 /* We could interpret LMID as 'provide only the libraries for this
6742 namespace' but GDB is currently only providing lmid, start, and
6743 prev, or nothing. */
6744 if (lmid != 0)
6745 warning ("ignoring lmid=0x%s without start", paddress (lmid));
6746
8d56636a
MM
6747 CORE_ADDR r_debug = priv->r_debug;
6748 if (r_debug == 0)
6749 r_debug = priv->r_debug = get_r_debug (pid, is_elf64);
b1fbec62
GB
6750
6751 /* We failed to find DT_DEBUG. Such situation will not change
6752 for this inferior - do not retry it. Report it to GDB as
6753 E01, see for the reasons at the GDB solib-svr4.c side. */
8d56636a 6754 if (r_debug == (CORE_ADDR) -1)
b1fbec62
GB
6755 return -1;
6756
ad10f44e
MM
6757 /* Terminate the header if we end up with an empty list. */
6758 if (r_debug == 0)
6759 document += ">";
6760
8d56636a 6761 while (r_debug != 0)
2268b414 6762 {
8d56636a
MM
6763 int r_version = 0;
6764 if (linux_read_memory (r_debug + lmo->r_version_offset,
b1fbec62 6765 (unsigned char *) &r_version,
8d56636a
MM
6766 sizeof (r_version)) != 0)
6767 {
6768 warning ("unable to read r_version from 0x%s",
6769 paddress (r_debug + lmo->r_version_offset));
6770 break;
6771 }
6772
6773 if (r_version < 1)
b1fbec62
GB
6774 {
6775 warning ("unexpected r_debug version %d", r_version);
8d56636a 6776 break;
b1fbec62 6777 }
8d56636a
MM
6778
6779 if (read_one_ptr (r_debug + lmo->r_map_offset, &lm_addr,
6780 ptr_size) != 0)
b1fbec62 6781 {
8d56636a
MM
6782 warning ("unable to read r_map from 0x%s",
6783 paddress (r_debug + lmo->r_map_offset));
6784 break;
b1fbec62 6785 }
2268b414 6786
ad10f44e
MM
6787 /* We read the entire namespace. */
6788 lm_prev = 0;
6789
6790 /* The first entry corresponds to the main executable unless the
6791 dynamic loader was loaded late by a static executable. But
6792 in such case the main executable does not have PT_DYNAMIC
6793 present and we would not have gotten here. */
6794 if (r_debug == priv->r_debug)
6795 {
6796 if (lm_addr != 0)
6797 string_appendf (document, " main-lm=\"0x%s\">",
6798 paddress (lm_addr));
6799 else
6800 document += ">";
6801
6802 lm_prev = lm_addr;
6803 if (read_one_ptr (lm_addr + lmo->l_next_offset,
6804 &lm_addr, ptr_size) != 0)
6805 {
6806 warning ("unable to read l_next from 0x%s",
6807 paddress (lm_addr + lmo->l_next_offset));
6808 break;
6809 }
6810 }
6811
2733d9d5 6812 read_link_map (document, r_debug, lm_addr, lm_prev, ptr_size, lmo);
b1fbec62 6813
8d56636a
MM
6814 if (r_version < 2)
6815 break;
b1fbec62 6816
8d56636a
MM
6817 if (read_one_ptr (r_debug + lmo->r_next_offset, &r_debug,
6818 ptr_size) != 0)
2268b414 6819 {
8d56636a
MM
6820 warning ("unable to read r_next from 0x%s",
6821 paddress (r_debug + lmo->r_next_offset));
6822 break;
d878444c 6823 }
0afae3cf 6824 }
2268b414
JK
6825 }
6826
ad10f44e 6827 document += "</library-list-svr4>";
b1fbec62 6828
f6e8a41e 6829 int document_len = document.length ();
2268b414
JK
6830 if (offset < document_len)
6831 document_len -= offset;
6832 else
6833 document_len = 0;
6834 if (len > document_len)
6835 len = document_len;
6836
f6e8a41e 6837 memcpy (readbuf, document.data () + offset, len);
2268b414
JK
6838
6839 return len;
6840}
6841
9accd112
MM
6842#ifdef HAVE_LINUX_BTRACE
6843
8263b346
TBA
6844bool
6845linux_process_target::supports_btrace ()
6846{
6847 return true;
6848}
6849
79597bdd 6850btrace_target_info *
696c0d5e 6851linux_process_target::enable_btrace (thread_info *tp,
79597bdd
TBA
6852 const btrace_config *conf)
6853{
696c0d5e 6854 return linux_enable_btrace (tp->id, conf);
79597bdd
TBA
6855}
6856
969c39fb 6857/* See to_disable_btrace target method. */
9accd112 6858
79597bdd
TBA
6859int
6860linux_process_target::disable_btrace (btrace_target_info *tinfo)
969c39fb
MM
6861{
6862 enum btrace_error err;
6863
6864 err = linux_disable_btrace (tinfo);
6865 return (err == BTRACE_ERR_NONE ? 0 : -1);
6866}
6867
bc504a31 6868/* Encode an Intel Processor Trace configuration. */
b20a6524
MM
6869
6870static void
873a185b 6871linux_low_encode_pt_config (std::string *buffer,
b20a6524
MM
6872 const struct btrace_data_pt_config *config)
6873{
873a185b 6874 *buffer += "<pt-config>\n";
b20a6524
MM
6875
6876 switch (config->cpu.vendor)
6877 {
6878 case CV_INTEL:
873a185b
TT
6879 string_xml_appendf (*buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6880 "model=\"%u\" stepping=\"%u\"/>\n",
6881 config->cpu.family, config->cpu.model,
6882 config->cpu.stepping);
b20a6524
MM
6883 break;
6884
6885 default:
6886 break;
6887 }
6888
873a185b 6889 *buffer += "</pt-config>\n";
b20a6524
MM
6890}
6891
6892/* Encode a raw buffer. */
6893
6894static void
873a185b 6895linux_low_encode_raw (std::string *buffer, const gdb_byte *data,
b20a6524
MM
6896 unsigned int size)
6897{
6898 if (size == 0)
6899 return;
6900
268a13a5 6901 /* We use hex encoding - see gdbsupport/rsp-low.h. */
873a185b 6902 *buffer += "<raw>\n";
b20a6524
MM
6903
6904 while (size-- > 0)
6905 {
6906 char elem[2];
6907
6908 elem[0] = tohex ((*data >> 4) & 0xf);
6909 elem[1] = tohex (*data++ & 0xf);
6910
8b2d5ef8 6911 buffer->append (elem, 2);
b20a6524
MM
6912 }
6913
873a185b 6914 *buffer += "</raw>\n";
b20a6524
MM
6915}
6916
969c39fb
MM
6917/* See to_read_btrace target method. */
6918
79597bdd
TBA
6919int
6920linux_process_target::read_btrace (btrace_target_info *tinfo,
873a185b 6921 std::string *buffer,
79597bdd 6922 enum btrace_read_type type)
9accd112 6923{
734b0e4b 6924 struct btrace_data btrace;
969c39fb 6925 enum btrace_error err;
9accd112 6926
969c39fb
MM
6927 err = linux_read_btrace (&btrace, tinfo, type);
6928 if (err != BTRACE_ERR_NONE)
6929 {
6930 if (err == BTRACE_ERR_OVERFLOW)
873a185b 6931 *buffer += "E.Overflow.";
969c39fb 6932 else
873a185b 6933 *buffer += "E.Generic Error.";
969c39fb 6934
8dcc53b3 6935 return -1;
969c39fb 6936 }
9accd112 6937
734b0e4b
MM
6938 switch (btrace.format)
6939 {
6940 case BTRACE_FORMAT_NONE:
873a185b 6941 *buffer += "E.No Trace.";
8dcc53b3 6942 return -1;
734b0e4b
MM
6943
6944 case BTRACE_FORMAT_BTS:
873a185b
TT
6945 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6946 *buffer += "<btrace version=\"1.0\">\n";
9accd112 6947
46f29a9a 6948 for (const btrace_block &block : *btrace.variant.bts.blocks)
873a185b
TT
6949 string_xml_appendf (*buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6950 paddress (block.begin), paddress (block.end));
9accd112 6951
873a185b 6952 *buffer += "</btrace>\n";
734b0e4b
MM
6953 break;
6954
b20a6524 6955 case BTRACE_FORMAT_PT:
873a185b
TT
6956 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6957 *buffer += "<btrace version=\"1.0\">\n";
6958 *buffer += "<pt>\n";
b20a6524
MM
6959
6960 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
9accd112 6961
b20a6524
MM
6962 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6963 btrace.variant.pt.size);
6964
873a185b
TT
6965 *buffer += "</pt>\n";
6966 *buffer += "</btrace>\n";
b20a6524
MM
6967 break;
6968
6969 default:
873a185b 6970 *buffer += "E.Unsupported Trace Format.";
8dcc53b3 6971 return -1;
734b0e4b 6972 }
969c39fb
MM
6973
6974 return 0;
9accd112 6975}
f4abbc16
MM
6976
6977/* See to_btrace_conf target method. */
6978
79597bdd
TBA
6979int
6980linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
873a185b 6981 std::string *buffer)
f4abbc16
MM
6982{
6983 const struct btrace_config *conf;
6984
873a185b
TT
6985 *buffer += "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n";
6986 *buffer += "<btrace-conf version=\"1.0\">\n";
f4abbc16
MM
6987
6988 conf = linux_btrace_conf (tinfo);
6989 if (conf != NULL)
6990 {
6991 switch (conf->format)
6992 {
6993 case BTRACE_FORMAT_NONE:
6994 break;
6995
6996 case BTRACE_FORMAT_BTS:
873a185b
TT
6997 string_xml_appendf (*buffer, "<bts");
6998 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->bts.size);
6999 string_xml_appendf (*buffer, " />\n");
f4abbc16 7000 break;
b20a6524
MM
7001
7002 case BTRACE_FORMAT_PT:
873a185b
TT
7003 string_xml_appendf (*buffer, "<pt");
7004 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->pt.size);
7005 string_xml_appendf (*buffer, "/>\n");
b20a6524 7006 break;
f4abbc16
MM
7007 }
7008 }
7009
873a185b 7010 *buffer += "</btrace-conf>\n";
f4abbc16
MM
7011 return 0;
7012}
9accd112
MM
7013#endif /* HAVE_LINUX_BTRACE */
7014
7b669087
GB
7015/* See nat/linux-nat.h. */
7016
7017ptid_t
7018current_lwp_ptid (void)
7019{
7020 return ptid_of (current_thread);
7021}
7022
07b3255c 7023/* A helper function that copies NAME to DEST, replacing non-printable
862180a2
TT
7024 characters with '?'. Returns the original DEST as a
7025 convenience. */
07b3255c
TT
7026
7027static const char *
7028replace_non_ascii (char *dest, const char *name)
7029{
862180a2 7030 const char *result = dest;
07b3255c
TT
7031 while (*name != '\0')
7032 {
7033 if (!ISPRINT (*name))
7034 *dest++ = '?';
7035 else
7036 *dest++ = *name;
7037 ++name;
7038 }
862180a2
TT
7039 *dest = '\0';
7040 return result;
07b3255c
TT
7041}
7042
7f63b89b
TBA
7043const char *
7044linux_process_target::thread_name (ptid_t thread)
7045{
07b3255c
TT
7046 static char dest[100];
7047
7048 const char *name = linux_proc_tid_get_name (thread);
7049 if (name == nullptr)
7050 return nullptr;
7051
7052 /* Linux limits the comm file to 16 bytes (including the trailing
7053 \0. If the program or thread name is set when using a multi-byte
7054 encoding, this might cause it to be truncated mid-character. In
7055 this situation, sending the truncated form in an XML <thread>
7056 response will cause a parse error in gdb. So, instead convert
7057 from the locale's encoding (we can't be sure this is the correct
7058 encoding, but it's as good a guess as we have) to UTF-8, but in a
7059 way that ignores any encoding errors. See PR remote/30618. */
7060 const char *cset = nl_langinfo (CODESET);
7061 iconv_t handle = iconv_open ("UTF-8//IGNORE", cset);
7062 if (handle == (iconv_t) -1)
7063 return replace_non_ascii (dest, name);
7064
7065 size_t inbytes = strlen (name);
7066 char *inbuf = const_cast<char *> (name);
7067 size_t outbytes = sizeof (dest);
7068 char *outbuf = dest;
7069 size_t result = iconv (handle, &inbuf, &inbytes, &outbuf, &outbytes);
7070
7071 if (result == (size_t) -1)
7072 {
7073 if (errno == E2BIG)
7074 outbuf = &dest[sizeof (dest) - 1];
7075 else if ((errno == EILSEQ || errno == EINVAL)
7076 && outbuf < &dest[sizeof (dest) - 2])
7077 *outbuf++ = '?';
07b3255c 7078 }
862180a2 7079 *outbuf = '\0';
07b3255c
TT
7080
7081 iconv_close (handle);
7082 return *dest == '\0' ? nullptr : dest;
7f63b89b
TBA
7083}
7084
7085#if USE_THREAD_DB
7086bool
7087linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7088 int *handle_len)
7089{
7090 return thread_db_thread_handle (ptid, handle, handle_len);
7091}
7092#endif
7093
7b961964
SM
7094thread_info *
7095linux_process_target::thread_pending_parent (thread_info *thread)
7096{
7097 lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
7098
7099 if (parent == nullptr)
7100 return nullptr;
7101
7102 return get_lwp_thread (parent);
7103}
7104
df5ad102 7105thread_info *
faf44a31
PA
7106linux_process_target::thread_pending_child (thread_info *thread,
7107 target_waitkind *kind)
df5ad102 7108{
faf44a31 7109 lwp_info *child = get_thread_lwp (thread)->pending_child (kind);
df5ad102
SM
7110
7111 if (child == nullptr)
7112 return nullptr;
7113
7114 return get_lwp_thread (child);
7115}
7116
276d4552
YQ
7117/* Default implementation of linux_target_ops method "set_pc" for
7118 32-bit pc register which is literally named "pc". */
7119
7120void
7121linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7122{
7123 uint32_t newpc = pc;
7124
7125 supply_register_by_name (regcache, "pc", &newpc);
7126}
7127
7128/* Default implementation of linux_target_ops method "get_pc" for
7129 32-bit pc register which is literally named "pc". */
7130
7131CORE_ADDR
7132linux_get_pc_32bit (struct regcache *regcache)
7133{
7134 uint32_t pc;
7135
7136 collect_register_by_name (regcache, "pc", &pc);
c058728c 7137 threads_debug_printf ("stop pc is 0x%" PRIx32, pc);
276d4552
YQ
7138 return pc;
7139}
7140
6f69e520
YQ
7141/* Default implementation of linux_target_ops method "set_pc" for
7142 64-bit pc register which is literally named "pc". */
7143
7144void
7145linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7146{
7147 uint64_t newpc = pc;
7148
7149 supply_register_by_name (regcache, "pc", &newpc);
7150}
7151
7152/* Default implementation of linux_target_ops method "get_pc" for
7153 64-bit pc register which is literally named "pc". */
7154
7155CORE_ADDR
7156linux_get_pc_64bit (struct regcache *regcache)
7157{
7158 uint64_t pc;
7159
7160 collect_register_by_name (regcache, "pc", &pc);
c058728c 7161 threads_debug_printf ("stop pc is 0x%" PRIx64, pc);
6f69e520
YQ
7162 return pc;
7163}
7164
0570503d 7165/* See linux-low.h. */
974c89e0 7166
0570503d 7167int
43e5fbd8 7168linux_get_auxv (int pid, int wordsize, CORE_ADDR match, CORE_ADDR *valp)
974c89e0
AH
7169{
7170 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7171 int offset = 0;
7172
7173 gdb_assert (wordsize == 4 || wordsize == 8);
7174
43e5fbd8
TJB
7175 while (the_target->read_auxv (pid, offset, data, 2 * wordsize)
7176 == 2 * wordsize)
974c89e0
AH
7177 {
7178 if (wordsize == 4)
7179 {
0570503d 7180 uint32_t *data_p = (uint32_t *) data;
974c89e0 7181 if (data_p[0] == match)
0570503d
PFC
7182 {
7183 *valp = data_p[1];
7184 return 1;
7185 }
974c89e0
AH
7186 }
7187 else
7188 {
0570503d 7189 uint64_t *data_p = (uint64_t *) data;
974c89e0 7190 if (data_p[0] == match)
0570503d
PFC
7191 {
7192 *valp = data_p[1];
7193 return 1;
7194 }
974c89e0
AH
7195 }
7196
7197 offset += 2 * wordsize;
7198 }
7199
7200 return 0;
7201}
7202
7203/* See linux-low.h. */
7204
7205CORE_ADDR
43e5fbd8 7206linux_get_hwcap (int pid, int wordsize)
974c89e0 7207{
0570503d 7208 CORE_ADDR hwcap = 0;
43e5fbd8 7209 linux_get_auxv (pid, wordsize, AT_HWCAP, &hwcap);
0570503d 7210 return hwcap;
974c89e0
AH
7211}
7212
7213/* See linux-low.h. */
7214
7215CORE_ADDR
43e5fbd8 7216linux_get_hwcap2 (int pid, int wordsize)
974c89e0 7217{
0570503d 7218 CORE_ADDR hwcap2 = 0;
43e5fbd8 7219 linux_get_auxv (pid, wordsize, AT_HWCAP2, &hwcap2);
0570503d 7220 return hwcap2;
974c89e0 7221}
6f69e520 7222
3aee8918
PA
7223#ifdef HAVE_LINUX_REGSETS
7224void
7225initialize_regsets_info (struct regsets_info *info)
7226{
7227 for (info->num_regsets = 0;
7228 info->regsets[info->num_regsets].size >= 0;
7229 info->num_regsets++)
7230 ;
3aee8918
PA
7231}
7232#endif
7233
da6d8c04
DJ
7234void
7235initialize_low (void)
7236{
bd99dc85 7237 struct sigaction sigchld_action;
dd373349 7238
bd99dc85 7239 memset (&sigchld_action, 0, sizeof (sigchld_action));
ef0478f6 7240 set_target_ops (the_linux_target);
dd373349 7241
aa7c7447 7242 linux_ptrace_init_warnings ();
1b919490 7243 linux_proc_init_warnings ();
bd99dc85
PA
7244
7245 sigchld_action.sa_handler = sigchld_handler;
7246 sigemptyset (&sigchld_action.sa_mask);
7247 sigchld_action.sa_flags = SA_RESTART;
7248 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
7249
7250 initialize_low_arch ();
89245bc0
DB
7251
7252 linux_check_ptrace_features ();
da6d8c04 7253}