]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdbserver/linux-low.cc
Use unrelocated_addr in dwarf2_base_index_functions::find_per_cu
[thirdparty/binutils-gdb.git] / gdbserver / linux-low.cc
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
213516ef 2 Copyright (C) 1995-2023 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
268a13a5 22#include "gdbsupport/agent.h"
de0d863e 23#include "tdesc.h"
cdc8e9b2
JB
24#include "gdbsupport/event-loop.h"
25#include "gdbsupport/event-pipe.h"
268a13a5
TT
26#include "gdbsupport/rsp-low.h"
27#include "gdbsupport/signals-state-save-restore.h"
96d7229d
LM
28#include "nat/linux-nat.h"
29#include "nat/linux-waitpid.h"
268a13a5 30#include "gdbsupport/gdb_wait.h"
5826e159 31#include "nat/gdb_ptrace.h"
125f8a3d
GB
32#include "nat/linux-ptrace.h"
33#include "nat/linux-procfs.h"
8cc73a39 34#include "nat/linux-personality.h"
da6d8c04
DJ
35#include <signal.h>
36#include <sys/ioctl.h>
37#include <fcntl.h>
0a30fbc4 38#include <unistd.h>
fd500816 39#include <sys/syscall.h>
f9387fc3 40#include <sched.h>
07e059b5
VP
41#include <pwd.h>
42#include <sys/types.h>
43#include <dirent.h>
53ce3c39 44#include <sys/stat.h>
efcbbd14 45#include <sys/vfs.h>
1570b33e 46#include <sys/uio.h>
07b3255c
TT
47#include <langinfo.h>
48#include <iconv.h>
268a13a5 49#include "gdbsupport/filestuff.h"
07b3255c 50#include "gdbsupport/gdb-safe-ctype.h"
c144c7a0 51#include "tracepoint.h"
276d4552 52#include <inttypes.h>
268a13a5 53#include "gdbsupport/common-inferior.h"
2090129c 54#include "nat/fork-inferior.h"
268a13a5 55#include "gdbsupport/environ.h"
21987b9c 56#include "gdbsupport/gdb-sigmask.h"
268a13a5 57#include "gdbsupport/scoped_restore.h"
957f3f49
DE
58#ifndef ELFMAG0
59/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
60 then ELFMAG0 will have been defined. If it didn't get included by
61 gdb_proc_service.h then including it will likely introduce a duplicate
62 definition of elf_fpregset_t. */
63#include <elf.h>
64#endif
14d2069a 65#include "nat/linux-namespaces.h"
efcbbd14 66
fd462a61
DJ
67#ifndef O_LARGEFILE
68#define O_LARGEFILE 0
69#endif
1a981360 70
69f4c9cc
AH
71#ifndef AT_HWCAP2
72#define AT_HWCAP2 26
73#endif
74
db0dfaa0
LM
75/* Some targets did not define these ptrace constants from the start,
76 so gdbserver defines them locally here. In the future, these may
77 be removed after they are added to asm/ptrace.h. */
78#if !(defined(PT_TEXT_ADDR) \
79 || defined(PT_DATA_ADDR) \
80 || defined(PT_TEXT_END_ADDR))
81#if defined(__mcoldfire__)
82/* These are still undefined in 3.10 kernels. */
83#define PT_TEXT_ADDR 49*4
84#define PT_DATA_ADDR 50*4
85#define PT_TEXT_END_ADDR 51*4
db0dfaa0
LM
86/* These are still undefined in 3.10 kernels. */
87#elif defined(__TMS320C6X__)
88#define PT_TEXT_ADDR (0x10000*4)
89#define PT_DATA_ADDR (0x10004*4)
90#define PT_TEXT_END_ADDR (0x10008*4)
91#endif
92#endif
93
5203ae1e
TBA
94#if (defined(__UCLIBC__) \
95 && defined(HAS_NOMMU) \
96 && defined(PT_TEXT_ADDR) \
97 && defined(PT_DATA_ADDR) \
98 && defined(PT_TEXT_END_ADDR))
99#define SUPPORTS_READ_OFFSETS
100#endif
101
9accd112 102#ifdef HAVE_LINUX_BTRACE
125f8a3d 103# include "nat/linux-btrace.h"
268a13a5 104# include "gdbsupport/btrace-common.h"
9accd112
MM
105#endif
106
8365dcf5
TJB
107#ifndef HAVE_ELF32_AUXV_T
108/* Copied from glibc's elf.h. */
109typedef struct
110{
111 uint32_t a_type; /* Entry type */
112 union
113 {
114 uint32_t a_val; /* Integer value */
115 /* We use to have pointer elements added here. We cannot do that,
116 though, since it does not work when using 32-bit definitions
117 on 64-bit platforms and vice versa. */
118 } a_un;
119} Elf32_auxv_t;
120#endif
121
122#ifndef HAVE_ELF64_AUXV_T
123/* Copied from glibc's elf.h. */
124typedef struct
125{
126 uint64_t a_type; /* Entry type */
127 union
128 {
129 uint64_t a_val; /* Integer value */
130 /* We use to have pointer elements added here. We cannot do that,
131 though, since it does not work when using 32-bit definitions
132 on 64-bit platforms and vice versa. */
133 } a_un;
134} Elf64_auxv_t;
135#endif
136
ded48a5e
YQ
137/* Does the current host support PTRACE_GETREGSET? */
138int have_ptrace_getregset = -1;
139
8a841a35
PA
140/* Return TRUE if THREAD is the leader thread of the process. */
141
142static bool
143is_leader (thread_info *thread)
144{
145 ptid_t ptid = ptid_of (thread);
146 return ptid.pid () == ptid.lwp ();
147}
148
48989498
PA
149/* Return true if we should report thread exit events to GDB, for
150 THR. */
151
152static bool
153report_exit_events_for (thread_info *thr)
154{
155 client_state &cs = get_client_state ();
156
157 return (cs.report_thread_events
158 || (thr->thread_options & GDB_THREAD_OPTION_EXIT) != 0);
159}
160
cff068da
GB
161/* LWP accessors. */
162
163/* See nat/linux-nat.h. */
164
165ptid_t
166ptid_of_lwp (struct lwp_info *lwp)
167{
168 return ptid_of (get_lwp_thread (lwp));
169}
170
171/* See nat/linux-nat.h. */
172
4b134ca1
GB
173void
174lwp_set_arch_private_info (struct lwp_info *lwp,
175 struct arch_lwp_info *info)
176{
177 lwp->arch_private = info;
178}
179
180/* See nat/linux-nat.h. */
181
182struct arch_lwp_info *
183lwp_arch_private_info (struct lwp_info *lwp)
184{
185 return lwp->arch_private;
186}
187
188/* See nat/linux-nat.h. */
189
cff068da
GB
190int
191lwp_is_stopped (struct lwp_info *lwp)
192{
193 return lwp->stopped;
194}
195
196/* See nat/linux-nat.h. */
197
198enum target_stop_reason
199lwp_stop_reason (struct lwp_info *lwp)
200{
201 return lwp->stop_reason;
202}
203
0e00e962
AA
204/* See nat/linux-nat.h. */
205
206int
207lwp_is_stepping (struct lwp_info *lwp)
208{
209 return lwp->stepping;
210}
211
05044653
PA
212/* A list of all unknown processes which receive stop signals. Some
213 other process will presumably claim each of these as forked
214 children momentarily. */
24a09b5f 215
05044653
PA
216struct simple_pid_list
217{
218 /* The process ID. */
219 int pid;
220
221 /* The status as reported by waitpid. */
222 int status;
223
224 /* Next in chain. */
225 struct simple_pid_list *next;
226};
05c309a8 227static struct simple_pid_list *stopped_pids;
05044653
PA
228
229/* Trivial list manipulation functions to keep track of a list of new
230 stopped processes. */
231
232static void
233add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
234{
8d749320 235 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
05044653
PA
236
237 new_pid->pid = pid;
238 new_pid->status = status;
239 new_pid->next = *listp;
240 *listp = new_pid;
241}
242
243static int
244pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
245{
246 struct simple_pid_list **p;
247
248 for (p = listp; *p != NULL; p = &(*p)->next)
249 if ((*p)->pid == pid)
250 {
251 struct simple_pid_list *next = (*p)->next;
252
253 *statusp = (*p)->status;
254 xfree (*p);
255 *p = next;
256 return 1;
257 }
258 return 0;
259}
24a09b5f 260
bde24c0a
PA
261enum stopping_threads_kind
262 {
263 /* Not stopping threads presently. */
264 NOT_STOPPING_THREADS,
265
266 /* Stopping threads. */
267 STOPPING_THREADS,
268
269 /* Stopping and suspending threads. */
270 STOPPING_AND_SUSPENDING_THREADS
271 };
272
273/* This is set while stop_all_lwps is in effect. */
6bd434d6 274static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
275
276/* FIXME make into a target method? */
24a09b5f 277int using_threads = 1;
24a09b5f 278
fa593d66
PA
279/* True if we're presently stabilizing threads (moving them out of
280 jump pads). */
281static int stabilizing_threads;
282
f50bf8e5 283static void unsuspend_all_lwps (struct lwp_info *except);
e8a625d1
PA
284static void mark_lwp_dead (struct lwp_info *lwp, int wstat,
285 bool thread_event);
00db26fa 286static int lwp_is_marked_dead (struct lwp_info *lwp);
d50171e4 287static int kill_lwp (unsigned long lwpid, int signo);
863d01bd 288static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
ece66d65 289static int linux_low_ptrace_options (int attached);
ced2dffb 290static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
d50171e4 291
582511be
PA
292/* When the event-loop is doing a step-over, this points at the thread
293 being stepped. */
6bd434d6 294static ptid_t step_over_bkpt;
582511be 295
bf9ae9d8
TBA
296bool
297linux_process_target::low_supports_breakpoints ()
298{
299 return false;
300}
d50171e4 301
bf9ae9d8
TBA
302CORE_ADDR
303linux_process_target::low_get_pc (regcache *regcache)
304{
305 return 0;
306}
307
308void
309linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
d50171e4 310{
bf9ae9d8 311 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
d50171e4 312}
0d62e5e8 313
7582c77c
TBA
314std::vector<CORE_ADDR>
315linux_process_target::low_get_next_pcs (regcache *regcache)
316{
317 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
318 "implemented");
319}
320
d4807ea2
TBA
321int
322linux_process_target::low_decr_pc_after_break ()
323{
324 return 0;
325}
326
c2d6af84
PA
327/* True if LWP is stopped in its stepping range. */
328
329static int
330lwp_in_step_range (struct lwp_info *lwp)
331{
332 CORE_ADDR pc = lwp->stop_pc;
333
334 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
335}
336
cdc8e9b2
JB
337/* The event pipe registered as a waitable file in the event loop. */
338static event_pipe linux_event_pipe;
bd99dc85
PA
339
340/* True if we're currently in async mode. */
cdc8e9b2 341#define target_is_async_p() (linux_event_pipe.is_open ())
bd99dc85 342
02fc4de7 343static void send_sigstop (struct lwp_info *lwp);
bd99dc85 344
d0722149
DE
345/* Return non-zero if HEADER is a 64-bit ELF file. */
346
347static int
214d508e 348elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 349{
214d508e
L
350 if (header->e_ident[EI_MAG0] == ELFMAG0
351 && header->e_ident[EI_MAG1] == ELFMAG1
352 && header->e_ident[EI_MAG2] == ELFMAG2
353 && header->e_ident[EI_MAG3] == ELFMAG3)
354 {
355 *machine = header->e_machine;
356 return header->e_ident[EI_CLASS] == ELFCLASS64;
357
358 }
359 *machine = EM_NONE;
360 return -1;
d0722149
DE
361}
362
363/* Return non-zero if FILE is a 64-bit ELF file,
364 zero if the file is not a 64-bit ELF file,
365 and -1 if the file is not accessible or doesn't exist. */
366
be07f1a2 367static int
214d508e 368elf_64_file_p (const char *file, unsigned int *machine)
d0722149 369{
957f3f49 370 Elf64_Ehdr header;
d0722149
DE
371 int fd;
372
373 fd = open (file, O_RDONLY);
374 if (fd < 0)
375 return -1;
376
377 if (read (fd, &header, sizeof (header)) != sizeof (header))
378 {
379 close (fd);
380 return 0;
381 }
382 close (fd);
383
214d508e 384 return elf_64_header_p (&header, machine);
d0722149
DE
385}
386
be07f1a2
PA
387/* Accepts an integer PID; Returns true if the executable PID is
388 running is a 64-bit ELF file.. */
389
390int
214d508e 391linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 392{
d8d2a3ee 393 char file[PATH_MAX];
be07f1a2
PA
394
395 sprintf (file, "/proc/%d/exe", pid);
214d508e 396 return elf_64_file_p (file, machine);
be07f1a2
PA
397}
398
fd000fb3
TBA
399void
400linux_process_target::delete_lwp (lwp_info *lwp)
bd99dc85 401{
fa96cb38
PA
402 struct thread_info *thr = get_lwp_thread (lwp);
403
c058728c 404 threads_debug_printf ("deleting %ld", lwpid_of (thr));
fa96cb38
PA
405
406 remove_thread (thr);
466eecee 407
fd000fb3 408 low_delete_thread (lwp->arch_private);
466eecee 409
013e3554 410 delete lwp;
bd99dc85
PA
411}
412
fd000fb3
TBA
413void
414linux_process_target::low_delete_thread (arch_lwp_info *info)
415{
416 /* Default implementation should be overridden if architecture-specific
417 info is being used. */
418 gdb_assert (info == nullptr);
419}
95954743 420
421490af
PA
421/* Open the /proc/PID/mem file for PROC. */
422
423static void
424open_proc_mem_file (process_info *proc)
425{
426 gdb_assert (proc->priv->mem_fd == -1);
427
428 char filename[64];
429 xsnprintf (filename, sizeof filename, "/proc/%d/mem", proc->pid);
430
431 proc->priv->mem_fd
432 = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
433}
434
fd000fb3 435process_info *
421490af 436linux_process_target::add_linux_process_no_mem_file (int pid, int attached)
95954743
PA
437{
438 struct process_info *proc;
439
95954743 440 proc = add_process (pid, attached);
8d749320 441 proc->priv = XCNEW (struct process_info_private);
95954743 442
fd000fb3 443 proc->priv->arch_private = low_new_process ();
421490af
PA
444 proc->priv->mem_fd = -1;
445
446 return proc;
447}
448
aa5ca48f 449
421490af
PA
450process_info *
451linux_process_target::add_linux_process (int pid, int attached)
452{
453 process_info *proc = add_linux_process_no_mem_file (pid, attached);
454 open_proc_mem_file (proc);
95954743
PA
455 return proc;
456}
457
f551c8ef
SM
458void
459linux_process_target::remove_linux_process (process_info *proc)
460{
461 if (proc->priv->mem_fd >= 0)
462 close (proc->priv->mem_fd);
463
464 this->low_delete_process (proc->priv->arch_private);
465
466 xfree (proc->priv);
467 proc->priv = nullptr;
468
469 remove_process (proc);
470}
471
fd000fb3
TBA
472arch_process_info *
473linux_process_target::low_new_process ()
474{
475 return nullptr;
476}
477
478void
479linux_process_target::low_delete_process (arch_process_info *info)
480{
481 /* Default implementation must be overridden if architecture-specific
482 info exists. */
483 gdb_assert (info == nullptr);
484}
485
486void
487linux_process_target::low_new_fork (process_info *parent, process_info *child)
488{
489 /* Nop. */
490}
491
797bcff5
TBA
492void
493linux_process_target::arch_setup_thread (thread_info *thread)
94585166 494{
24583e45
TBA
495 scoped_restore_current_thread restore_thread;
496 switch_to_thread (thread);
94585166 497
797bcff5 498 low_arch_setup ();
94585166
DB
499}
500
d16f3f6c
TBA
501int
502linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
503 int wstat)
24a09b5f 504{
c12a5089 505 client_state &cs = get_client_state ();
94585166 506 struct lwp_info *event_lwp = *orig_event_lwp;
89a5711c 507 int event = linux_ptrace_get_extended_event (wstat);
de0d863e 508 struct thread_info *event_thr = get_lwp_thread (event_lwp);
24a09b5f 509
183be222 510 gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
65706a29 511
82075af2
JS
512 /* All extended events we currently use are mid-syscall. Only
513 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
514 you have to be using PTRACE_SEIZE to get that. */
515 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
516
c269dbdb
DB
517 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
518 || (event == PTRACE_EVENT_CLONE))
24a09b5f
DJ
519 {
520 unsigned long new_pid;
05044653 521 int ret, status;
24a09b5f 522
de0d863e 523 /* Get the pid of the new lwp. */
d86d4aaf 524 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 525 &new_pid);
24a09b5f
DJ
526
527 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 528 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
529 {
530 /* The new child has a pending SIGSTOP. We can't affect it until it
531 hits the SIGSTOP, but we're already attached. */
532
97438e3f 533 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
534
535 if (ret == -1)
536 perror_with_name ("waiting for new child");
537 else if (ret != new_pid)
538 warning ("wait returned unexpected PID %d", ret);
da5898ce 539 else if (!WIFSTOPPED (status))
24a09b5f
DJ
540 warning ("wait returned unexpected status 0x%x", status);
541 }
542
393a6b59 543 if (debug_threads)
de0d863e 544 {
393a6b59
PA
545 debug_printf ("HEW: Got %s event from LWP %ld, new child is %ld\n",
546 (event == PTRACE_EVENT_FORK ? "fork"
547 : event == PTRACE_EVENT_VFORK ? "vfork"
548 : event == PTRACE_EVENT_CLONE ? "clone"
549 : "???"),
550 ptid_of (event_thr).lwp (),
551 new_pid);
552 }
553
554 ptid_t child_ptid = (event != PTRACE_EVENT_CLONE
555 ? ptid_t (new_pid, new_pid)
556 : ptid_t (ptid_of (event_thr).pid (), new_pid));
de0d863e 557
393a6b59
PA
558 lwp_info *child_lwp = add_lwp (child_ptid);
559 gdb_assert (child_lwp != NULL);
560 child_lwp->stopped = 1;
561 if (event != PTRACE_EVENT_CLONE)
562 child_lwp->must_set_ptrace_flags = 1;
563 child_lwp->status_pending_p = 0;
de0d863e 564
393a6b59 565 thread_info *child_thr = get_lwp_thread (child_lwp);
de0d863e 566
393a6b59
PA
567 /* If we're suspending all threads, leave this one suspended
568 too. If the fork/clone parent is stepping over a breakpoint,
569 all other threads have been suspended already. Leave the
570 child suspended too. */
571 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
572 || event_lwp->bp_reinsert != 0)
573 {
574 threads_debug_printf ("leaving child suspended");
575 child_lwp->suspended = 1;
576 }
577
578 if (event_lwp->bp_reinsert != 0
579 && supports_software_single_step ()
580 && event == PTRACE_EVENT_VFORK)
581 {
582 /* If we leave single-step breakpoints there, child will
583 hit it, so uninsert single-step breakpoints from parent
584 (and child). Once vfork child is done, reinsert
585 them back to parent. */
586 uninsert_single_step_breakpoints (event_thr);
587 }
588
589 if (event != PTRACE_EVENT_CLONE)
590 {
de0d863e
DB
591 /* Add the new process to the tables and clone the breakpoint
592 lists of the parent. We need to do this even if the new process
593 will be detached, since we will need the process object and the
594 breakpoints to remove any breakpoints from memory when we
595 detach, and the client side will access registers. */
393a6b59 596 process_info *child_proc = add_linux_process (new_pid, 0);
de0d863e 597 gdb_assert (child_proc != NULL);
863d01bd 598
393a6b59 599 process_info *parent_proc = get_thread_process (event_thr);
de0d863e 600 child_proc->attached = parent_proc->attached;
2e7b624b 601
63c40ec7 602 clone_all_breakpoints (child_thr, event_thr);
de0d863e 603
51a948fd
AB
604 target_desc_up tdesc = allocate_target_description ();
605 copy_target_description (tdesc.get (), parent_proc->tdesc);
606 child_proc->tdesc = tdesc.release ();
de0d863e 607
3a8a0396 608 /* Clone arch-specific process data. */
fd000fb3 609 low_new_fork (parent_proc, child_proc);
393a6b59 610 }
3a8a0396 611
393a6b59
PA
612 /* Save fork/clone info in the parent thread. */
613 if (event == PTRACE_EVENT_FORK)
614 event_lwp->waitstatus.set_forked (child_ptid);
615 else if (event == PTRACE_EVENT_VFORK)
616 event_lwp->waitstatus.set_vforked (child_ptid);
617 else if (event == PTRACE_EVENT_CLONE
618 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
619 event_lwp->waitstatus.set_thread_cloned (child_ptid);
620
621 if (event != PTRACE_EVENT_CLONE
622 || (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
623 {
de0d863e 624 /* The status_pending field contains bits denoting the
393a6b59
PA
625 extended event, so when the pending event is handled, the
626 handler will look at lwp->waitstatus. */
de0d863e
DB
627 event_lwp->status_pending_p = 1;
628 event_lwp->status_pending = wstat;
629
393a6b59
PA
630 /* Link the threads until the parent's event is passed on to
631 GDB. */
632 event_lwp->relative = child_lwp;
633 child_lwp->relative = event_lwp;
de0d863e
DB
634 }
635
393a6b59
PA
636 /* If the parent thread is doing step-over with single-step
637 breakpoints, the list of single-step breakpoints are cloned
638 from the parent's. Remove them from the child process.
639 In case of vfork, we'll reinsert them back once vforked
640 child is done. */
641 if (event_lwp->bp_reinsert != 0
642 && supports_software_single_step ())
643 {
644 /* The child process is forked and stopped, so it is safe
645 to access its memory without stopping all other threads
646 from other processes. */
647 delete_single_step_breakpoints (child_thr);
e27d73f6 648
393a6b59
PA
649 gdb_assert (has_single_step_breakpoints (event_thr));
650 gdb_assert (!has_single_step_breakpoints (child_thr));
651 }
bde24c0a 652
da5898ce
DJ
653 /* Normally we will get the pending SIGSTOP. But in some cases
654 we might get another signal delivered to the group first.
f21cc1a2 655 If we do get another signal, be sure not to lose it. */
20ba1ce6 656 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 657 {
393a6b59
PA
658 child_lwp->stop_expected = 1;
659 child_lwp->status_pending_p = 1;
660 child_lwp->status_pending = status;
da5898ce 661 }
393a6b59 662 else if (event == PTRACE_EVENT_CLONE && cs.report_thread_events)
65706a29 663 {
393a6b59
PA
664 child_lwp->waitstatus.set_thread_created ();
665 child_lwp->status_pending_p = 1;
666 child_lwp->status_pending = status;
65706a29 667 }
de0d863e 668
393a6b59
PA
669 if (event == PTRACE_EVENT_CLONE)
670 {
a0aad537 671#ifdef USE_THREAD_DB
393a6b59 672 thread_db_notice_clone (event_thr, child_ptid);
a0aad537 673#endif
393a6b59 674 }
86299109 675
393a6b59
PA
676 if (event == PTRACE_EVENT_CLONE
677 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) == 0)
678 {
679 threads_debug_printf
680 ("not reporting clone event from LWP %ld, new child is %ld\n",
681 ptid_of (event_thr).lwp (),
682 new_pid);
683 return 1;
684 }
685
686 /* Leave the child stopped until GDB processes the parent
687 event. */
688 child_thr->last_resume_kind = resume_stop;
689 child_thr->last_status.set_stopped (GDB_SIGNAL_0);
690
691 /* Report the event. */
692 threads_debug_printf
693 ("reporting %s event from LWP %ld, new child is %ld\n",
694 (event == PTRACE_EVENT_FORK ? "fork"
695 : event == PTRACE_EVENT_VFORK ? "vfork"
696 : event == PTRACE_EVENT_CLONE ? "clone"
697 : "???"),
698 ptid_of (event_thr).lwp (),
699 new_pid);
700 return 0;
24a09b5f 701 }
c269dbdb
DB
702 else if (event == PTRACE_EVENT_VFORK_DONE)
703 {
183be222 704 event_lwp->waitstatus.set_vfork_done ();
c269dbdb 705
7582c77c 706 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
2e7b624b 707 {
3b9a79ef 708 reinsert_single_step_breakpoints (event_thr);
2e7b624b 709
3b9a79ef 710 gdb_assert (has_single_step_breakpoints (event_thr));
2e7b624b
YQ
711 }
712
c269dbdb
DB
713 /* Report the event. */
714 return 0;
715 }
c12a5089 716 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
94585166
DB
717 {
718 struct process_info *proc;
f27866ba 719 std::vector<int> syscalls_to_catch;
94585166
DB
720 ptid_t event_ptid;
721 pid_t event_pid;
722
c058728c
SM
723 threads_debug_printf ("Got exec event from LWP %ld",
724 lwpid_of (event_thr));
94585166
DB
725
726 /* Get the event ptid. */
727 event_ptid = ptid_of (event_thr);
e99b03dc 728 event_pid = event_ptid.pid ();
94585166 729
82075af2 730 /* Save the syscall list from the execing process. */
94585166 731 proc = get_thread_process (event_thr);
f27866ba 732 syscalls_to_catch = std::move (proc->syscalls_to_catch);
82075af2
JS
733
734 /* Delete the execing process and all its threads. */
d16f3f6c 735 mourn (proc);
24583e45 736 switch_to_thread (nullptr);
94585166
DB
737
738 /* Create a new process/lwp/thread. */
fd000fb3 739 proc = add_linux_process (event_pid, 0);
94585166
DB
740 event_lwp = add_lwp (event_ptid);
741 event_thr = get_lwp_thread (event_lwp);
742 gdb_assert (current_thread == event_thr);
797bcff5 743 arch_setup_thread (event_thr);
94585166
DB
744
745 /* Set the event status. */
183be222
SM
746 event_lwp->waitstatus.set_execd
747 (make_unique_xstrdup
748 (linux_proc_pid_to_exec_file (lwpid_of (event_thr))));
94585166
DB
749
750 /* Mark the exec status as pending. */
751 event_lwp->stopped = 1;
752 event_lwp->status_pending_p = 1;
753 event_lwp->status_pending = wstat;
754 event_thr->last_resume_kind = resume_continue;
183be222 755 event_thr->last_status.set_ignore ();
94585166 756
82075af2
JS
757 /* Update syscall state in the new lwp, effectively mid-syscall too. */
758 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
759
760 /* Restore the list to catch. Don't rely on the client, which is free
761 to avoid sending a new list when the architecture doesn't change.
762 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
f27866ba 763 proc->syscalls_to_catch = std::move (syscalls_to_catch);
82075af2 764
94585166
DB
765 /* Report the event. */
766 *orig_event_lwp = event_lwp;
767 return 0;
768 }
de0d863e 769
f34652de 770 internal_error (_("unknown ptrace event %d"), event);
24a09b5f
DJ
771}
772
df95181f
TBA
773CORE_ADDR
774linux_process_target::get_pc (lwp_info *lwp)
d50171e4 775{
a9deee17
PA
776 process_info *proc = get_thread_process (get_lwp_thread (lwp));
777 gdb_assert (!proc->starting_up);
d50171e4 778
bf9ae9d8 779 if (!low_supports_breakpoints ())
d50171e4
PA
780 return 0;
781
24583e45
TBA
782 scoped_restore_current_thread restore_thread;
783 switch_to_thread (get_lwp_thread (lwp));
d50171e4 784
a9deee17
PA
785 struct regcache *regcache = get_thread_regcache (current_thread, 1);
786 CORE_ADDR pc = low_get_pc (regcache);
d50171e4 787
c058728c 788 threads_debug_printf ("pc is 0x%lx", (long) pc);
d50171e4 789
d50171e4
PA
790 return pc;
791}
792
9eedd27d
TBA
793void
794linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
82075af2 795{
82075af2
JS
796 struct regcache *regcache;
797
24583e45
TBA
798 scoped_restore_current_thread restore_thread;
799 switch_to_thread (get_lwp_thread (lwp));
82075af2
JS
800
801 regcache = get_thread_regcache (current_thread, 1);
9eedd27d 802 low_get_syscall_trapinfo (regcache, sysno);
82075af2 803
c058728c 804 threads_debug_printf ("get_syscall_trapinfo sysno %d", *sysno);
82075af2
JS
805}
806
9eedd27d
TBA
807void
808linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
809{
810 /* By default, report an unknown system call number. */
811 *sysno = UNKNOWN_SYSCALL;
812}
813
df95181f
TBA
814bool
815linux_process_target::save_stop_reason (lwp_info *lwp)
0d62e5e8 816{
582511be
PA
817 CORE_ADDR pc;
818 CORE_ADDR sw_breakpoint_pc;
3e572f71
PA
819#if USE_SIGTRAP_SIGINFO
820 siginfo_t siginfo;
821#endif
d50171e4 822
bf9ae9d8 823 if (!low_supports_breakpoints ())
df95181f 824 return false;
0d62e5e8 825
a9deee17
PA
826 process_info *proc = get_thread_process (get_lwp_thread (lwp));
827 if (proc->starting_up)
828 {
829 /* Claim we have the stop PC so that the caller doesn't try to
830 fetch it itself. */
831 return true;
832 }
833
582511be 834 pc = get_pc (lwp);
d4807ea2 835 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
d50171e4 836
582511be 837 /* breakpoint_at reads from the current thread. */
24583e45
TBA
838 scoped_restore_current_thread restore_thread;
839 switch_to_thread (get_lwp_thread (lwp));
47c0c975 840
3e572f71
PA
841#if USE_SIGTRAP_SIGINFO
842 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
843 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
844 {
845 if (siginfo.si_signo == SIGTRAP)
846 {
e7ad2f14
PA
847 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
848 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 849 {
e7ad2f14
PA
850 /* The si_code is ambiguous on this arch -- check debug
851 registers. */
852 if (!check_stopped_by_watchpoint (lwp))
853 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
854 }
855 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
856 {
857 /* If we determine the LWP stopped for a SW breakpoint,
858 trust it. Particularly don't check watchpoint
859 registers, because at least on s390, we'd find
860 stopped-by-watchpoint as long as there's a watchpoint
861 set. */
3e572f71 862 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
3e572f71 863 }
e7ad2f14 864 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 865 {
e7ad2f14
PA
866 /* This can indicate either a hardware breakpoint or
867 hardware watchpoint. Check debug registers. */
868 if (!check_stopped_by_watchpoint (lwp))
869 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
3e572f71 870 }
2bf6fb9d
PA
871 else if (siginfo.si_code == TRAP_TRACE)
872 {
e7ad2f14
PA
873 /* We may have single stepped an instruction that
874 triggered a watchpoint. In that case, on some
875 architectures (such as x86), instead of TRAP_HWBKPT,
876 si_code indicates TRAP_TRACE, and we need to check
877 the debug registers separately. */
878 if (!check_stopped_by_watchpoint (lwp))
879 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
2bf6fb9d 880 }
3e572f71
PA
881 }
882 }
883#else
582511be
PA
884 /* We may have just stepped a breakpoint instruction. E.g., in
885 non-stop mode, GDB first tells the thread A to step a range, and
886 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
887 case we need to report the breakpoint PC. */
888 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
d7146cda 889 && low_breakpoint_at (sw_breakpoint_pc))
e7ad2f14
PA
890 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
891
892 if (hardware_breakpoint_inserted_here (pc))
893 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
894
895 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
896 check_stopped_by_watchpoint (lwp);
897#endif
898
899 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
582511be 900 {
c058728c
SM
901 threads_debug_printf
902 ("%s stopped by software breakpoint",
903 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
582511be
PA
904
905 /* Back up the PC if necessary. */
906 if (pc != sw_breakpoint_pc)
e7ad2f14 907 {
582511be
PA
908 struct regcache *regcache
909 = get_thread_regcache (current_thread, 1);
bf9ae9d8 910 low_set_pc (regcache, sw_breakpoint_pc);
582511be
PA
911 }
912
e7ad2f14
PA
913 /* Update this so we record the correct stop PC below. */
914 pc = sw_breakpoint_pc;
582511be 915 }
e7ad2f14 916 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
c058728c
SM
917 threads_debug_printf
918 ("%s stopped by hardware breakpoint",
919 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14 920 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
c058728c
SM
921 threads_debug_printf
922 ("%s stopped by hardware watchpoint",
923 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14 924 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
c058728c
SM
925 threads_debug_printf
926 ("%s stopped by trace",
927 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14
PA
928
929 lwp->stop_pc = pc;
df95181f 930 return true;
0d62e5e8 931}
ce3a066d 932
fd000fb3
TBA
933lwp_info *
934linux_process_target::add_lwp (ptid_t ptid)
611cb4a5 935{
c360a473 936 lwp_info *lwp = new lwp_info;
0d62e5e8 937
754e3168
AH
938 lwp->thread = add_thread (ptid, lwp);
939
fd000fb3 940 low_new_thread (lwp);
aa5ca48f 941
54a0b537 942 return lwp;
0d62e5e8 943}
611cb4a5 944
fd000fb3
TBA
945void
946linux_process_target::low_new_thread (lwp_info *info)
947{
948 /* Nop. */
949}
950
2090129c
SDJ
951/* Callback to be used when calling fork_inferior, responsible for
952 actually initiating the tracing of the inferior. */
953
954static void
955linux_ptrace_fun ()
956{
957 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
958 (PTRACE_TYPE_ARG4) 0) < 0)
50fa3001 959 trace_start_error_with_name ("ptrace");
2090129c
SDJ
960
961 if (setpgid (0, 0) < 0)
962 trace_start_error_with_name ("setpgid");
963
964 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
965 stdout to stderr so that inferior i/o doesn't corrupt the connection.
966 Also, redirect stdin to /dev/null. */
967 if (remote_connection_is_stdio ())
968 {
969 if (close (0) < 0)
970 trace_start_error_with_name ("close");
971 if (open ("/dev/null", O_RDONLY) < 0)
972 trace_start_error_with_name ("open");
973 if (dup2 (2, 1) < 0)
974 trace_start_error_with_name ("dup2");
975 if (write (2, "stdin/stdout redirected\n",
976 sizeof ("stdin/stdout redirected\n") - 1) < 0)
977 {
978 /* Errors ignored. */;
979 }
980 }
981}
982
da6d8c04 983/* Start an inferior process and returns its pid.
2090129c
SDJ
984 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
985 are its arguments. */
da6d8c04 986
15295543
TBA
987int
988linux_process_target::create_inferior (const char *program,
989 const std::vector<char *> &program_args)
da6d8c04 990{
c12a5089 991 client_state &cs = get_client_state ();
a6dbe5df 992 struct lwp_info *new_lwp;
da6d8c04 993 int pid;
95954743 994 ptid_t ptid;
03583c20 995
41272101
TT
996 {
997 maybe_disable_address_space_randomization restore_personality
c12a5089 998 (cs.disable_randomization);
bea571eb 999 std::string str_program_args = construct_inferior_arguments (program_args);
41272101
TT
1000
1001 pid = fork_inferior (program,
1002 str_program_args.c_str (),
1003 get_environ ()->envp (), linux_ptrace_fun,
1004 NULL, NULL, NULL, NULL);
1005 }
03583c20 1006
421490af
PA
1007 /* When spawning a new process, we can't open the mem file yet. We
1008 still have to nurse the process through the shell, and that execs
1009 a couple times. The address space a /proc/PID/mem file is
1010 accessing is destroyed on exec. */
1011 process_info *proc = add_linux_process_no_mem_file (pid, 0);
95954743 1012
184ea2f7 1013 ptid = ptid_t (pid, pid);
95954743 1014 new_lwp = add_lwp (ptid);
a6dbe5df 1015 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 1016
2090129c
SDJ
1017 post_fork_inferior (pid, program);
1018
421490af
PA
1019 /* PROC is now past the shell running the program we want, so we can
1020 open the /proc/PID/mem file. */
1021 open_proc_mem_file (proc);
1022
a9fa9f7d 1023 return pid;
da6d8c04
DJ
1024}
1025
ece66d65
JS
1026/* Implement the post_create_inferior target_ops method. */
1027
6dee9afb
TBA
1028void
1029linux_process_target::post_create_inferior ()
ece66d65
JS
1030{
1031 struct lwp_info *lwp = get_thread_lwp (current_thread);
1032
797bcff5 1033 low_arch_setup ();
ece66d65
JS
1034
1035 if (lwp->must_set_ptrace_flags)
1036 {
1037 struct process_info *proc = current_process ();
1038 int options = linux_low_ptrace_options (proc->attached);
1039
1040 linux_enable_event_reporting (lwpid_of (current_thread), options);
1041 lwp->must_set_ptrace_flags = 0;
1042 }
1043}
1044
7ae1a6a6 1045int
fd000fb3 1046linux_process_target::attach_lwp (ptid_t ptid)
da6d8c04 1047{
54a0b537 1048 struct lwp_info *new_lwp;
e38504b3 1049 int lwpid = ptid.lwp ();
611cb4a5 1050
b8e1b30e 1051 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 1052 != 0)
7ae1a6a6 1053 return errno;
24a09b5f 1054
b3312d80 1055 new_lwp = add_lwp (ptid);
0d62e5e8 1056
a6dbe5df
PA
1057 /* We need to wait for SIGSTOP before being able to make the next
1058 ptrace call on this LWP. */
1059 new_lwp->must_set_ptrace_flags = 1;
1060
644cebc9 1061 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2 1062 {
c058728c 1063 threads_debug_printf ("Attached to a stopped process");
c14d7ab2
PA
1064
1065 /* The process is definitely stopped. It is in a job control
1066 stop, unless the kernel predates the TASK_STOPPED /
1067 TASK_TRACED distinction, in which case it might be in a
1068 ptrace stop. Make sure it is in a ptrace stop; from there we
1069 can kill it, signal it, et cetera.
1070
1071 First make sure there is a pending SIGSTOP. Since we are
1072 already attached, the process can not transition from stopped
1073 to running without a PTRACE_CONT; so we know this signal will
1074 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1075 probably already in the queue (unless this kernel is old
1076 enough to use TASK_STOPPED for ptrace stops); but since
1077 SIGSTOP is not an RT signal, it can only be queued once. */
1078 kill_lwp (lwpid, SIGSTOP);
1079
1080 /* Finally, resume the stopped process. This will deliver the
1081 SIGSTOP (or a higher priority signal, just like normal
1082 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 1083 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
1084 }
1085
0d62e5e8 1086 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
1087 brings it to a halt.
1088
1089 There are several cases to consider here:
1090
1091 1) gdbserver has already attached to the process and is being notified
1b3f6016 1092 of a new thread that is being created.
d50171e4
PA
1093 In this case we should ignore that SIGSTOP and resume the
1094 process. This is handled below by setting stop_expected = 1,
8336d594 1095 and the fact that add_thread sets last_resume_kind ==
d50171e4 1096 resume_continue.
0e21c1ec
DE
1097
1098 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
1099 to it via attach_inferior.
1100 In this case we want the process thread to stop.
d50171e4
PA
1101 This is handled by having linux_attach set last_resume_kind ==
1102 resume_stop after we return.
e3deef73
LM
1103
1104 If the pid we are attaching to is also the tgid, we attach to and
1105 stop all the existing threads. Otherwise, we attach to pid and
1106 ignore any other threads in the same group as this pid.
0e21c1ec
DE
1107
1108 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
1109 existing threads.
1110 In this case we want the thread to stop.
1111 FIXME: This case is currently not properly handled.
1112 We should wait for the SIGSTOP but don't. Things work apparently
1113 because enough time passes between when we ptrace (ATTACH) and when
1114 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
1115
1116 On the other hand, if we are currently trying to stop all threads, we
1117 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 1118 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
1119 end of the list, and so the new thread has not yet reached
1120 wait_for_sigstop (but will). */
d50171e4 1121 new_lwp->stop_expected = 1;
0d62e5e8 1122
7ae1a6a6 1123 return 0;
95954743
PA
1124}
1125
8784d563
PA
1126/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1127 already attached. Returns true if a new LWP is found, false
1128 otherwise. */
1129
1130static int
1131attach_proc_task_lwp_callback (ptid_t ptid)
1132{
1133 /* Is this a new thread? */
1134 if (find_thread_ptid (ptid) == NULL)
1135 {
e38504b3 1136 int lwpid = ptid.lwp ();
8784d563
PA
1137 int err;
1138
c058728c 1139 threads_debug_printf ("Found new lwp %d", lwpid);
8784d563 1140
fd000fb3 1141 err = the_linux_target->attach_lwp (ptid);
8784d563
PA
1142
1143 /* Be quiet if we simply raced with the thread exiting. EPERM
1144 is returned if the thread's task still exists, and is marked
1145 as exited or zombie, as well as other conditions, so in that
1146 case, confirm the status in /proc/PID/status. */
1147 if (err == ESRCH
1148 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
c058728c
SM
1149 threads_debug_printf
1150 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1151 lwpid, err, safe_strerror (err));
8784d563
PA
1152 else if (err != 0)
1153 {
4d9b86e1 1154 std::string reason
50fa3001 1155 = linux_ptrace_attach_fail_reason_string (ptid, err);
4d9b86e1 1156
c6f7f9c8 1157 error (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
8784d563
PA
1158 }
1159
1160 return 1;
1161 }
1162 return 0;
1163}
1164
500c1d85
PA
1165static void async_file_mark (void);
1166
e3deef73
LM
1167/* Attach to PID. If PID is the tgid, attach to it and all
1168 of its threads. */
1169
ef03dad8
TBA
1170int
1171linux_process_target::attach (unsigned long pid)
0d62e5e8 1172{
500c1d85
PA
1173 struct process_info *proc;
1174 struct thread_info *initial_thread;
184ea2f7 1175 ptid_t ptid = ptid_t (pid, pid);
7ae1a6a6
PA
1176 int err;
1177
421490af
PA
1178 /* Delay opening the /proc/PID/mem file until we've successfully
1179 attached. */
1180 proc = add_linux_process_no_mem_file (pid, 1);
df0da8a2 1181
e3deef73
LM
1182 /* Attach to PID. We will check for other threads
1183 soon. */
fd000fb3 1184 err = attach_lwp (ptid);
7ae1a6a6 1185 if (err != 0)
4d9b86e1 1186 {
f551c8ef 1187 this->remove_linux_process (proc);
4d9b86e1 1188
50fa3001
SDJ
1189 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1190 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
4d9b86e1 1191 }
7ae1a6a6 1192
421490af
PA
1193 open_proc_mem_file (proc);
1194
500c1d85
PA
1195 /* Don't ignore the initial SIGSTOP if we just attached to this
1196 process. It will be collected by wait shortly. */
184ea2f7 1197 initial_thread = find_thread_ptid (ptid_t (pid, pid));
59487af3 1198 gdb_assert (initial_thread != nullptr);
500c1d85 1199 initial_thread->last_resume_kind = resume_stop;
0d62e5e8 1200
8784d563
PA
1201 /* We must attach to every LWP. If /proc is mounted, use that to
1202 find them now. On the one hand, the inferior may be using raw
1203 clone instead of using pthreads. On the other hand, even if it
1204 is using pthreads, GDB may not be connected yet (thread_db needs
1205 to do symbol lookups, through qSymbol). Also, thread_db walks
1206 structures in the inferior's address space to find the list of
1207 threads/LWPs, and those structures may well be corrupted. Note
1208 that once thread_db is loaded, we'll still use it to list threads
1209 and associate pthread info with each LWP. */
c6f7f9c8
TT
1210 try
1211 {
1212 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1213 }
1214 catch (const gdb_exception_error &)
1215 {
1216 /* Make sure we do not deliver the SIGSTOP to the process. */
1217 initial_thread->last_resume_kind = resume_continue;
1218
1219 this->detach (proc);
1220 throw;
1221 }
500c1d85
PA
1222
1223 /* GDB will shortly read the xml target description for this
1224 process, to figure out the process' architecture. But the target
1225 description is only filled in when the first process/thread in
1226 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1227 that now, otherwise, if GDB is fast enough, it could read the
1228 target description _before_ that initial stop. */
1229 if (non_stop)
1230 {
1231 struct lwp_info *lwp;
1232 int wstat, lwpid;
f2907e49 1233 ptid_t pid_ptid = ptid_t (pid);
500c1d85 1234
d16f3f6c 1235 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
500c1d85
PA
1236 gdb_assert (lwpid > 0);
1237
f2907e49 1238 lwp = find_lwp_pid (ptid_t (lwpid));
59487af3 1239 gdb_assert (lwp != nullptr);
500c1d85
PA
1240
1241 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1242 {
1243 lwp->status_pending_p = 1;
1244 lwp->status_pending = wstat;
1245 }
1246
1247 initial_thread->last_resume_kind = resume_continue;
1248
1249 async_file_mark ();
1250
1251 gdb_assert (proc->tdesc != NULL);
1252 }
1253
95954743
PA
1254 return 0;
1255}
1256
95954743 1257static int
e4eb0dec 1258last_thread_of_process_p (int pid)
95954743 1259{
e4eb0dec 1260 bool seen_one = false;
95954743 1261
da4ae14a 1262 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
95954743 1263 {
e4eb0dec
SM
1264 if (!seen_one)
1265 {
1266 /* This is the first thread of this process we see. */
1267 seen_one = true;
1268 return false;
1269 }
1270 else
1271 {
1272 /* This is the second thread of this process we see. */
1273 return true;
1274 }
1275 });
da6d8c04 1276
e4eb0dec 1277 return thread == NULL;
95954743
PA
1278}
1279
da84f473
PA
1280/* Kill LWP. */
1281
1282static void
1283linux_kill_one_lwp (struct lwp_info *lwp)
1284{
d86d4aaf
DE
1285 struct thread_info *thr = get_lwp_thread (lwp);
1286 int pid = lwpid_of (thr);
da84f473
PA
1287
1288 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1289 there is no signal context, and ptrace(PTRACE_KILL) (or
1290 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1291 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1292 alternative is to kill with SIGKILL. We only need one SIGKILL
1293 per process, not one for each thread. But since we still support
4a6ed09b
PA
1294 support debugging programs using raw clone without CLONE_THREAD,
1295 we send one for each thread. For years, we used PTRACE_KILL
1296 only, so we're being a bit paranoid about some old kernels where
1297 PTRACE_KILL might work better (dubious if there are any such, but
1298 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1299 second, and so we're fine everywhere. */
da84f473
PA
1300
1301 errno = 0;
69ff6be5 1302 kill_lwp (pid, SIGKILL);
da84f473 1303 if (debug_threads)
ce9e3fe7
PA
1304 {
1305 int save_errno = errno;
1306
c058728c
SM
1307 threads_debug_printf ("kill_lwp (SIGKILL) %s, 0, 0 (%s)",
1308 target_pid_to_str (ptid_of (thr)).c_str (),
1309 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1310 }
da84f473
PA
1311
1312 errno = 0;
b8e1b30e 1313 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 1314 if (debug_threads)
ce9e3fe7
PA
1315 {
1316 int save_errno = errno;
1317
c058728c
SM
1318 threads_debug_printf ("PTRACE_KILL %s, 0, 0 (%s)",
1319 target_pid_to_str (ptid_of (thr)).c_str (),
1320 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1321 }
da84f473
PA
1322}
1323
e76126e8
PA
1324/* Kill LWP and wait for it to die. */
1325
1326static void
1327kill_wait_lwp (struct lwp_info *lwp)
1328{
1329 struct thread_info *thr = get_lwp_thread (lwp);
e99b03dc 1330 int pid = ptid_of (thr).pid ();
e38504b3 1331 int lwpid = ptid_of (thr).lwp ();
e76126e8
PA
1332 int wstat;
1333 int res;
1334
c058728c 1335 threads_debug_printf ("killing lwp %d, for pid: %d", lwpid, pid);
e76126e8
PA
1336
1337 do
1338 {
1339 linux_kill_one_lwp (lwp);
1340
1341 /* Make sure it died. Notes:
1342
1343 - The loop is most likely unnecessary.
1344
d16f3f6c 1345 - We don't use wait_for_event as that could delete lwps
e76126e8
PA
1346 while we're iterating over them. We're not interested in
1347 any pending status at this point, only in making sure all
1348 wait status on the kernel side are collected until the
1349 process is reaped.
1350
1351 - We don't use __WALL here as the __WALL emulation relies on
1352 SIGCHLD, and killing a stopped process doesn't generate
1353 one, nor an exit status.
1354 */
1355 res = my_waitpid (lwpid, &wstat, 0);
1356 if (res == -1 && errno == ECHILD)
1357 res = my_waitpid (lwpid, &wstat, __WCLONE);
1358 } while (res > 0 && WIFSTOPPED (wstat));
1359
586b02a9
PA
1360 /* Even if it was stopped, the child may have already disappeared.
1361 E.g., if it was killed by SIGKILL. */
1362 if (res < 0 && errno != ECHILD)
1363 perror_with_name ("kill_wait_lwp");
e76126e8
PA
1364}
1365
578290ec 1366/* Callback for `for_each_thread'. Kills an lwp of a given process,
da84f473 1367 except the leader. */
95954743 1368
578290ec
SM
1369static void
1370kill_one_lwp_callback (thread_info *thread, int pid)
da6d8c04 1371{
54a0b537 1372 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 1373
fd500816
DJ
1374 /* We avoid killing the first thread here, because of a Linux kernel (at
1375 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1376 the children get a chance to be reaped, it will remain a zombie
1377 forever. */
95954743 1378
d86d4aaf 1379 if (lwpid_of (thread) == pid)
95954743 1380 {
c058728c
SM
1381 threads_debug_printf ("is last of process %s",
1382 target_pid_to_str (thread->id).c_str ());
578290ec 1383 return;
95954743 1384 }
fd500816 1385
e76126e8 1386 kill_wait_lwp (lwp);
da6d8c04
DJ
1387}
1388
c6885a57
TBA
1389int
1390linux_process_target::kill (process_info *process)
0d62e5e8 1391{
a780ef4f 1392 int pid = process->pid;
9d606399 1393
f9e39928
PA
1394 /* If we're killing a running inferior, make sure it is stopped
1395 first, as PTRACE_KILL will not work otherwise. */
7984d532 1396 stop_all_lwps (0, NULL);
f9e39928 1397
578290ec
SM
1398 for_each_thread (pid, [&] (thread_info *thread)
1399 {
1400 kill_one_lwp_callback (thread, pid);
1401 });
fd500816 1402
54a0b537 1403 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1404 thread in the list, so do so now. */
a780ef4f 1405 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
bd99dc85 1406
784867a5 1407 if (lwp == NULL)
c058728c 1408 threads_debug_printf ("cannot find lwp for pid: %d", pid);
784867a5 1409 else
e76126e8 1410 kill_wait_lwp (lwp);
2d717e4f 1411
8adb37b9 1412 mourn (process);
f9e39928
PA
1413
1414 /* Since we presently can only stop all lwps of all processes, we
1415 need to unstop lwps of other processes. */
7984d532 1416 unstop_all_lwps (0, NULL);
95954743 1417 return 0;
0d62e5e8
DJ
1418}
1419
9b224c5e
PA
1420/* Get pending signal of THREAD, for detaching purposes. This is the
1421 signal the thread last stopped for, which we need to deliver to the
1422 thread when detaching, otherwise, it'd be suppressed/lost. */
1423
1424static int
1425get_detach_signal (struct thread_info *thread)
1426{
c12a5089 1427 client_state &cs = get_client_state ();
a493e3e2 1428 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1429 int status;
1430 struct lwp_info *lp = get_thread_lwp (thread);
1431
1432 if (lp->status_pending_p)
1433 status = lp->status_pending;
1434 else
1435 {
1436 /* If the thread had been suspended by gdbserver, and it stopped
1437 cleanly, then it'll have stopped with SIGSTOP. But we don't
1438 want to deliver that SIGSTOP. */
183be222
SM
1439 if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1440 || thread->last_status.sig () == GDB_SIGNAL_0)
9b224c5e
PA
1441 return 0;
1442
1443 /* Otherwise, we may need to deliver the signal we
1444 intercepted. */
1445 status = lp->last_status;
1446 }
1447
1448 if (!WIFSTOPPED (status))
1449 {
c058728c
SM
1450 threads_debug_printf ("lwp %s hasn't stopped: no pending signal",
1451 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e
PA
1452 return 0;
1453 }
1454
1455 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1456 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e 1457 {
c058728c
SM
1458 threads_debug_printf ("lwp %s had stopped with extended "
1459 "status: no pending signal",
1460 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e
PA
1461 return 0;
1462 }
1463
2ea28649 1464 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e 1465
c12a5089 1466 if (cs.program_signals_p && !cs.program_signals[signo])
9b224c5e 1467 {
c058728c
SM
1468 threads_debug_printf ("lwp %s had signal %s, but it is in nopass state",
1469 target_pid_to_str (ptid_of (thread)).c_str (),
1470 gdb_signal_to_string (signo));
9b224c5e
PA
1471 return 0;
1472 }
c12a5089 1473 else if (!cs.program_signals_p
9b224c5e
PA
1474 /* If we have no way to know which signals GDB does not
1475 want to have passed to the program, assume
1476 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1477 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e 1478 {
c058728c
SM
1479 threads_debug_printf ("lwp %s had signal %s, "
1480 "but we don't know if we should pass it. "
1481 "Default to not.",
1482 target_pid_to_str (ptid_of (thread)).c_str (),
1483 gdb_signal_to_string (signo));
9b224c5e
PA
1484 return 0;
1485 }
1486 else
1487 {
c058728c
SM
1488 threads_debug_printf ("lwp %s has pending signal %s: delivering it",
1489 target_pid_to_str (ptid_of (thread)).c_str (),
1490 gdb_signal_to_string (signo));
9b224c5e
PA
1491
1492 return WSTOPSIG (status);
1493 }
1494}
1495
fd000fb3
TBA
1496void
1497linux_process_target::detach_one_lwp (lwp_info *lwp)
6ad8ae5c 1498{
ced2dffb 1499 struct thread_info *thread = get_lwp_thread (lwp);
9b224c5e 1500 int sig;
ced2dffb 1501 int lwpid;
6ad8ae5c 1502
9b224c5e 1503 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1504 if (lwp->stop_expected)
ae13219e 1505 {
c058728c
SM
1506 threads_debug_printf ("Sending SIGCONT to %s",
1507 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e 1508
d86d4aaf 1509 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1510 lwp->stop_expected = 0;
ae13219e
DJ
1511 }
1512
9b224c5e
PA
1513 /* Pass on any pending signal for this thread. */
1514 sig = get_detach_signal (thread);
1515
ced2dffb
PA
1516 /* Preparing to resume may try to write registers, and fail if the
1517 lwp is zombie. If that happens, ignore the error. We'll handle
1518 it below, when detach fails with ESRCH. */
a70b8144 1519 try
ced2dffb
PA
1520 {
1521 /* Flush any pending changes to the process's registers. */
1522 regcache_invalidate_thread (thread);
1523
1524 /* Finally, let it resume. */
d7599cc0 1525 low_prepare_to_resume (lwp);
ced2dffb 1526 }
230d2906 1527 catch (const gdb_exception_error &ex)
ced2dffb
PA
1528 {
1529 if (!check_ptrace_stopped_lwp_gone (lwp))
eedc3f4f 1530 throw;
ced2dffb 1531 }
ced2dffb
PA
1532
1533 lwpid = lwpid_of (thread);
1534 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1535 (PTRACE_TYPE_ARG4) (long) sig) < 0)
ced2dffb
PA
1536 {
1537 int save_errno = errno;
1538
1539 /* We know the thread exists, so ESRCH must mean the lwp is
1540 zombie. This can happen if one of the already-detached
1541 threads exits the whole thread group. In that case we're
1542 still attached, and must reap the lwp. */
1543 if (save_errno == ESRCH)
1544 {
1545 int ret, status;
1546
1547 ret = my_waitpid (lwpid, &status, __WALL);
1548 if (ret == -1)
1549 {
1550 warning (_("Couldn't reap LWP %d while detaching: %s"),
6d91ce9a 1551 lwpid, safe_strerror (errno));
ced2dffb
PA
1552 }
1553 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1554 {
1555 warning (_("Reaping LWP %d while detaching "
1556 "returned unexpected status 0x%x"),
1557 lwpid, status);
1558 }
1559 }
1560 else
1561 {
1562 error (_("Can't detach %s: %s"),
61d7f128 1563 target_pid_to_str (ptid_of (thread)).c_str (),
6d91ce9a 1564 safe_strerror (save_errno));
ced2dffb
PA
1565 }
1566 }
c058728c
SM
1567 else
1568 threads_debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)",
1569 target_pid_to_str (ptid_of (thread)).c_str (),
1570 strsignal (sig));
bd99dc85
PA
1571
1572 delete_lwp (lwp);
ced2dffb
PA
1573}
1574
9061c9cf
TBA
1575int
1576linux_process_target::detach (process_info *process)
95954743 1577{
ced2dffb 1578 struct lwp_info *main_lwp;
95954743 1579
863d01bd
PA
1580 /* As there's a step over already in progress, let it finish first,
1581 otherwise nesting a stabilize_threads operation on top gets real
1582 messy. */
1583 complete_ongoing_step_over ();
1584
f9e39928 1585 /* Stop all threads before detaching. First, ptrace requires that
30baf67b 1586 the thread is stopped to successfully detach. Second, thread_db
f9e39928
PA
1587 may need to uninstall thread event breakpoints from memory, which
1588 only works with a stopped process anyway. */
7984d532 1589 stop_all_lwps (0, NULL);
f9e39928 1590
ca5c370d 1591#ifdef USE_THREAD_DB
8336d594 1592 thread_db_detach (process);
ca5c370d
PA
1593#endif
1594
fa593d66 1595 /* Stabilize threads (move out of jump pads). */
5c9eb2f2 1596 target_stabilize_threads ();
fa593d66 1597
ced2dffb
PA
1598 /* Detach from the clone lwps first. If the thread group exits just
1599 while we're detaching, we must reap the clone lwps before we're
1600 able to reap the leader. */
fd000fb3
TBA
1601 for_each_thread (process->pid, [this] (thread_info *thread)
1602 {
1603 /* We don't actually detach from the thread group leader just yet.
1604 If the thread group exits, we must reap the zombie clone lwps
1605 before we're able to reap the leader. */
1606 if (thread->id.pid () == thread->id.lwp ())
1607 return;
1608
1609 lwp_info *lwp = get_thread_lwp (thread);
1610 detach_one_lwp (lwp);
1611 });
ced2dffb 1612
ef2ddb33 1613 main_lwp = find_lwp_pid (ptid_t (process->pid));
59487af3 1614 gdb_assert (main_lwp != nullptr);
fd000fb3 1615 detach_one_lwp (main_lwp);
8336d594 1616
8adb37b9 1617 mourn (process);
f9e39928
PA
1618
1619 /* Since we presently can only stop all lwps of all processes, we
1620 need to unstop lwps of other processes. */
7984d532 1621 unstop_all_lwps (0, NULL);
f9e39928
PA
1622 return 0;
1623}
1624
1625/* Remove all LWPs that belong to process PROC from the lwp list. */
1626
8adb37b9
TBA
1627void
1628linux_process_target::mourn (process_info *process)
8336d594 1629{
8336d594
PA
1630#ifdef USE_THREAD_DB
1631 thread_db_mourn (process);
1632#endif
1633
fd000fb3 1634 for_each_thread (process->pid, [this] (thread_info *thread)
6b2a85da
SM
1635 {
1636 delete_lwp (get_thread_lwp (thread));
1637 });
f9e39928 1638
f551c8ef 1639 this->remove_linux_process (process);
8336d594
PA
1640}
1641
95a49a39
TBA
1642void
1643linux_process_target::join (int pid)
444d6139 1644{
444d6139
PA
1645 int status, ret;
1646
1647 do {
d105de22 1648 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1649 if (WIFEXITED (status) || WIFSIGNALED (status))
1650 break;
1651 } while (ret != -1 || errno != ECHILD);
1652}
1653
13d3d99b
TBA
1654/* Return true if the given thread is still alive. */
1655
1656bool
1657linux_process_target::thread_alive (ptid_t ptid)
0d62e5e8 1658{
95954743
PA
1659 struct lwp_info *lwp = find_lwp_pid (ptid);
1660
1661 /* We assume we always know if a thread exits. If a whole process
1662 exited but we still haven't been able to report it to GDB, we'll
1663 hold on to the last lwp of the dead process. */
1664 if (lwp != NULL)
00db26fa 1665 return !lwp_is_marked_dead (lwp);
0d62e5e8
DJ
1666 else
1667 return 0;
1668}
1669
df95181f
TBA
1670bool
1671linux_process_target::thread_still_has_status_pending (thread_info *thread)
582511be
PA
1672{
1673 struct lwp_info *lp = get_thread_lwp (thread);
1674
1675 if (!lp->status_pending_p)
1676 return 0;
1677
582511be 1678 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1679 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1680 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be 1681 {
582511be
PA
1682 CORE_ADDR pc;
1683 int discard = 0;
1684
1685 gdb_assert (lp->last_status != 0);
1686
1687 pc = get_pc (lp);
1688
24583e45
TBA
1689 scoped_restore_current_thread restore_thread;
1690 switch_to_thread (thread);
582511be
PA
1691
1692 if (pc != lp->stop_pc)
1693 {
c058728c
SM
1694 threads_debug_printf ("PC of %ld changed",
1695 lwpid_of (thread));
582511be
PA
1696 discard = 1;
1697 }
3e572f71
PA
1698
1699#if !USE_SIGTRAP_SIGINFO
15c66dd6 1700 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
d7146cda 1701 && !low_breakpoint_at (pc))
582511be 1702 {
c058728c
SM
1703 threads_debug_printf ("previous SW breakpoint of %ld gone",
1704 lwpid_of (thread));
582511be
PA
1705 discard = 1;
1706 }
15c66dd6 1707 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1708 && !hardware_breakpoint_inserted_here (pc))
1709 {
c058728c
SM
1710 threads_debug_printf ("previous HW breakpoint of %ld gone",
1711 lwpid_of (thread));
582511be
PA
1712 discard = 1;
1713 }
3e572f71 1714#endif
582511be 1715
582511be
PA
1716 if (discard)
1717 {
c058728c 1718 threads_debug_printf ("discarding pending breakpoint status");
582511be
PA
1719 lp->status_pending_p = 0;
1720 return 0;
1721 }
1722 }
1723
1724 return 1;
1725}
1726
a681f9c9
PA
1727/* Returns true if LWP is resumed from the client's perspective. */
1728
1729static int
1730lwp_resumed (struct lwp_info *lwp)
1731{
1732 struct thread_info *thread = get_lwp_thread (lwp);
1733
1734 if (thread->last_resume_kind != resume_stop)
1735 return 1;
1736
1737 /* Did gdb send us a `vCont;t', but we haven't reported the
1738 corresponding stop to gdb yet? If so, the thread is still
1739 resumed/running from gdb's perspective. */
1740 if (thread->last_resume_kind == resume_stop
183be222 1741 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
a681f9c9
PA
1742 return 1;
1743
1744 return 0;
1745}
1746
df95181f
TBA
1747bool
1748linux_process_target::status_pending_p_callback (thread_info *thread,
1749 ptid_t ptid)
0d62e5e8 1750{
582511be 1751 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1752
1753 /* Check if we're only interested in events from a specific process
afa8d396 1754 or a specific LWP. */
83e1b6c1 1755 if (!thread->id.matches (ptid))
95954743 1756 return 0;
0d62e5e8 1757
a681f9c9
PA
1758 if (!lwp_resumed (lp))
1759 return 0;
1760
582511be 1761 if (lp->status_pending_p
df95181f 1762 && !thread_still_has_status_pending (thread))
582511be 1763 {
df95181f 1764 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
582511be
PA
1765 return 0;
1766 }
0d62e5e8 1767
582511be 1768 return lp->status_pending_p;
0d62e5e8
DJ
1769}
1770
95954743
PA
1771struct lwp_info *
1772find_lwp_pid (ptid_t ptid)
1773{
d4895ba2
SM
1774 long lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1775 thread_info *thread = find_thread ([lwp] (thread_info *thr_arg)
454296a2 1776 {
da4ae14a 1777 return thr_arg->id.lwp () == lwp;
454296a2 1778 });
d86d4aaf
DE
1779
1780 if (thread == NULL)
1781 return NULL;
1782
9c80ecd6 1783 return get_thread_lwp (thread);
95954743
PA
1784}
1785
fa96cb38 1786/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1787
fa96cb38
PA
1788static int
1789num_lwps (int pid)
1790{
fa96cb38 1791 int count = 0;
0d62e5e8 1792
4d3bb80e
SM
1793 for_each_thread (pid, [&] (thread_info *thread)
1794 {
9c80ecd6 1795 count++;
4d3bb80e 1796 });
3aee8918 1797
fa96cb38
PA
1798 return count;
1799}
d61ddec4 1800
6d4ee8c6
GB
1801/* See nat/linux-nat.h. */
1802
1803struct lwp_info *
1804iterate_over_lwps (ptid_t filter,
d3a70e03 1805 gdb::function_view<iterate_over_lwps_ftype> callback)
6d4ee8c6 1806{
da4ae14a 1807 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
6d1e5673 1808 {
da4ae14a 1809 lwp_info *lwp = get_thread_lwp (thr_arg);
6d1e5673 1810
d3a70e03 1811 return callback (lwp);
6d1e5673 1812 });
6d4ee8c6 1813
9c80ecd6 1814 if (thread == NULL)
6d4ee8c6
GB
1815 return NULL;
1816
9c80ecd6 1817 return get_thread_lwp (thread);
6d4ee8c6
GB
1818}
1819
e8a625d1 1820bool
fd000fb3 1821linux_process_target::check_zombie_leaders ()
fa96cb38 1822{
e8a625d1
PA
1823 bool new_pending_event = false;
1824
1825 for_each_process ([&] (process_info *proc)
aa40a989
PA
1826 {
1827 pid_t leader_pid = pid_of (proc);
1828 lwp_info *leader_lp = find_lwp_pid (ptid_t (leader_pid));
1829
1830 threads_debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1831 "num_lwps=%d, zombie=%d",
1832 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1833 linux_proc_pid_is_zombie (leader_pid));
1834
1835 if (leader_lp != NULL && !leader_lp->stopped
1836 /* Check if there are other threads in the group, as we may
8a841a35
PA
1837 have raced with the inferior simply exiting. Note this
1838 isn't a watertight check. If the inferior is
1839 multi-threaded and is exiting, it may be we see the
1840 leader as zombie before we reap all the non-leader
1841 threads. See comments below. */
aa40a989
PA
1842 && !last_thread_of_process_p (leader_pid)
1843 && linux_proc_pid_is_zombie (leader_pid))
1844 {
8a841a35
PA
1845 /* A zombie leader in a multi-threaded program can mean one
1846 of three things:
1847
1848 #1 - Only the leader exited, not the whole program, e.g.,
1849 with pthread_exit. Since we can't reap the leader's exit
1850 status until all other threads are gone and reaped too,
1851 we want to delete the zombie leader right away, as it
1852 can't be debugged, we can't read its registers, etc.
1853 This is the main reason we check for zombie leaders
1854 disappearing.
1855
1856 #2 - The whole thread-group/process exited (a group exit,
1857 via e.g. exit(3), and there is (or will be shortly) an
1858 exit reported for each thread in the process, and then
1859 finally an exit for the leader once the non-leaders are
1860 reaped.
1861
1862 #3 - There are 3 or more threads in the group, and a
1863 thread other than the leader exec'd. See comments on
1864 exec events at the top of the file.
1865
1866 Ideally we would never delete the leader for case #2.
1867 Instead, we want to collect the exit status of each
1868 non-leader thread, and then finally collect the exit
1869 status of the leader as normal and use its exit code as
1870 whole-process exit code. Unfortunately, there's no
1871 race-free way to distinguish cases #1 and #2. We can't
1872 assume the exit events for the non-leaders threads are
1873 already pending in the kernel, nor can we assume the
1874 non-leader threads are in zombie state already. Between
1875 the leader becoming zombie and the non-leaders exiting
1876 and becoming zombie themselves, there's a small time
1877 window, so such a check would be racy. Temporarily
1878 pausing all threads and checking to see if all threads
1879 exit or not before re-resuming them would work in the
1880 case that all threads are running right now, but it
1881 wouldn't work if some thread is currently already
1882 ptrace-stopped, e.g., due to scheduler-locking.
1883
1884 So what we do is we delete the leader anyhow, and then
1885 later on when we see its exit status, we re-add it back.
1886 We also make sure that we only report a whole-process
1887 exit when we see the leader exiting, as opposed to when
1888 the last LWP in the LWP list exits, which can be a
1889 non-leader if we deleted the leader here. */
aa40a989 1890 threads_debug_printf ("Thread group leader %d zombie "
8a841a35
PA
1891 "(it exited, or another thread execd), "
1892 "deleting it.",
aa40a989 1893 leader_pid);
e8a625d1
PA
1894
1895 thread_info *leader_thread = get_lwp_thread (leader_lp);
1896 if (report_exit_events_for (leader_thread))
1897 {
1898 mark_lwp_dead (leader_lp, W_EXITCODE (0, 0), true);
1899 new_pending_event = true;
1900 }
1901 else
1902 delete_lwp (leader_lp);
aa40a989 1903 }
9179355e 1904 });
e8a625d1
PA
1905
1906 return new_pending_event;
fa96cb38 1907}
c3adc08c 1908
a1385b7b
SM
1909/* Callback for `find_thread'. Returns the first LWP that is not
1910 stopped. */
d50171e4 1911
a1385b7b
SM
1912static bool
1913not_stopped_callback (thread_info *thread, ptid_t filter)
fa96cb38 1914{
a1385b7b
SM
1915 if (!thread->id.matches (filter))
1916 return false;
47c0c975 1917
a1385b7b 1918 lwp_info *lwp = get_thread_lwp (thread);
fa96cb38 1919
a1385b7b 1920 return !lwp->stopped;
0d62e5e8 1921}
611cb4a5 1922
863d01bd
PA
1923/* Increment LWP's suspend count. */
1924
1925static void
1926lwp_suspended_inc (struct lwp_info *lwp)
1927{
1928 lwp->suspended++;
1929
c058728c
SM
1930 if (lwp->suspended > 4)
1931 threads_debug_printf
1932 ("LWP %ld has a suspiciously high suspend count, suspended=%d",
1933 lwpid_of (get_lwp_thread (lwp)), lwp->suspended);
863d01bd
PA
1934}
1935
1936/* Decrement LWP's suspend count. */
1937
1938static void
1939lwp_suspended_decr (struct lwp_info *lwp)
1940{
1941 lwp->suspended--;
1942
1943 if (lwp->suspended < 0)
1944 {
1945 struct thread_info *thread = get_lwp_thread (lwp);
1946
f34652de 1947 internal_error ("unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
863d01bd
PA
1948 lwp->suspended);
1949 }
1950}
1951
219f2f23
PA
1952/* This function should only be called if the LWP got a SIGTRAP.
1953
1954 Handle any tracepoint steps or hits. Return true if a tracepoint
1955 event was handled, 0 otherwise. */
1956
1957static int
1958handle_tracepoints (struct lwp_info *lwp)
1959{
1960 struct thread_info *tinfo = get_lwp_thread (lwp);
1961 int tpoint_related_event = 0;
1962
582511be
PA
1963 gdb_assert (lwp->suspended == 0);
1964
7984d532
PA
1965 /* If this tracepoint hit causes a tracing stop, we'll immediately
1966 uninsert tracepoints. To do this, we temporarily pause all
1967 threads, unpatch away, and then unpause threads. We need to make
1968 sure the unpausing doesn't resume LWP too. */
863d01bd 1969 lwp_suspended_inc (lwp);
7984d532 1970
219f2f23
PA
1971 /* And we need to be sure that any all-threads-stopping doesn't try
1972 to move threads out of the jump pads, as it could deadlock the
1973 inferior (LWP could be in the jump pad, maybe even holding the
1974 lock.) */
1975
1976 /* Do any necessary step collect actions. */
1977 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1978
fa593d66
PA
1979 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1980
219f2f23
PA
1981 /* See if we just hit a tracepoint and do its main collect
1982 actions. */
1983 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1984
863d01bd 1985 lwp_suspended_decr (lwp);
7984d532
PA
1986
1987 gdb_assert (lwp->suspended == 0);
229d26fc
SM
1988 gdb_assert (!stabilizing_threads
1989 || (lwp->collecting_fast_tracepoint
1990 != fast_tpoint_collect_result::not_collecting));
7984d532 1991
219f2f23
PA
1992 if (tpoint_related_event)
1993 {
c058728c 1994 threads_debug_printf ("got a tracepoint event");
219f2f23
PA
1995 return 1;
1996 }
1997
1998 return 0;
1999}
2000
13e567af
TBA
2001fast_tpoint_collect_result
2002linux_process_target::linux_fast_tracepoint_collecting
2003 (lwp_info *lwp, fast_tpoint_collect_status *status)
fa593d66
PA
2004{
2005 CORE_ADDR thread_area;
d86d4aaf 2006 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66 2007
fa593d66
PA
2008 /* Get the thread area address. This is used to recognize which
2009 thread is which when tracing with the in-process agent library.
2010 We don't read anything from the address, and treat it as opaque;
2011 it's the address itself that we assume is unique per-thread. */
13e567af 2012 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
229d26fc 2013 return fast_tpoint_collect_result::not_collecting;
fa593d66
PA
2014
2015 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2016}
2017
13e567af
TBA
2018int
2019linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
2020{
2021 return -1;
2022}
2023
d16f3f6c
TBA
2024bool
2025linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
fa593d66 2026{
24583e45
TBA
2027 scoped_restore_current_thread restore_thread;
2028 switch_to_thread (get_lwp_thread (lwp));
fa593d66
PA
2029
2030 if ((wstat == NULL
2031 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2032 && supports_fast_tracepoints ()
58b4daa5 2033 && agent_loaded_p ())
fa593d66
PA
2034 {
2035 struct fast_tpoint_collect_status status;
fa593d66 2036
c058728c
SM
2037 threads_debug_printf
2038 ("Checking whether LWP %ld needs to move out of the jump pad.",
2039 lwpid_of (current_thread));
fa593d66 2040
229d26fc
SM
2041 fast_tpoint_collect_result r
2042 = linux_fast_tracepoint_collecting (lwp, &status);
fa593d66
PA
2043
2044 if (wstat == NULL
2045 || (WSTOPSIG (*wstat) != SIGILL
2046 && WSTOPSIG (*wstat) != SIGFPE
2047 && WSTOPSIG (*wstat) != SIGSEGV
2048 && WSTOPSIG (*wstat) != SIGBUS))
2049 {
2050 lwp->collecting_fast_tracepoint = r;
2051
229d26fc 2052 if (r != fast_tpoint_collect_result::not_collecting)
fa593d66 2053 {
229d26fc
SM
2054 if (r == fast_tpoint_collect_result::before_insn
2055 && lwp->exit_jump_pad_bkpt == NULL)
fa593d66
PA
2056 {
2057 /* Haven't executed the original instruction yet.
2058 Set breakpoint there, and wait till it's hit,
2059 then single-step until exiting the jump pad. */
2060 lwp->exit_jump_pad_bkpt
2061 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2062 }
2063
c058728c
SM
2064 threads_debug_printf
2065 ("Checking whether LWP %ld needs to move out of the jump pad..."
2066 " it does", lwpid_of (current_thread));
fa593d66 2067
d16f3f6c 2068 return true;
fa593d66
PA
2069 }
2070 }
2071 else
2072 {
2073 /* If we get a synchronous signal while collecting, *and*
2074 while executing the (relocated) original instruction,
2075 reset the PC to point at the tpoint address, before
2076 reporting to GDB. Otherwise, it's an IPA lib bug: just
2077 report the signal to GDB, and pray for the best. */
2078
229d26fc
SM
2079 lwp->collecting_fast_tracepoint
2080 = fast_tpoint_collect_result::not_collecting;
fa593d66 2081
229d26fc 2082 if (r != fast_tpoint_collect_result::not_collecting
fa593d66
PA
2083 && (status.adjusted_insn_addr <= lwp->stop_pc
2084 && lwp->stop_pc < status.adjusted_insn_addr_end))
2085 {
2086 siginfo_t info;
2087 struct regcache *regcache;
2088
2089 /* The si_addr on a few signals references the address
2090 of the faulting instruction. Adjust that as
2091 well. */
2092 if ((WSTOPSIG (*wstat) == SIGILL
2093 || WSTOPSIG (*wstat) == SIGFPE
2094 || WSTOPSIG (*wstat) == SIGBUS
2095 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 2096 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2097 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
2098 /* Final check just to make sure we don't clobber
2099 the siginfo of non-kernel-sent signals. */
2100 && (uintptr_t) info.si_addr == lwp->stop_pc)
2101 {
2102 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 2103 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2104 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
2105 }
2106
0bfdf32f 2107 regcache = get_thread_regcache (current_thread, 1);
bf9ae9d8 2108 low_set_pc (regcache, status.tpoint_addr);
fa593d66
PA
2109 lwp->stop_pc = status.tpoint_addr;
2110
2111 /* Cancel any fast tracepoint lock this thread was
2112 holding. */
2113 force_unlock_trace_buffer ();
2114 }
2115
2116 if (lwp->exit_jump_pad_bkpt != NULL)
2117 {
c058728c
SM
2118 threads_debug_printf
2119 ("Cancelling fast exit-jump-pad: removing bkpt."
2120 "stopping all threads momentarily.");
fa593d66
PA
2121
2122 stop_all_lwps (1, lwp);
fa593d66
PA
2123
2124 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2125 lwp->exit_jump_pad_bkpt = NULL;
2126
2127 unstop_all_lwps (1, lwp);
2128
2129 gdb_assert (lwp->suspended >= 0);
2130 }
2131 }
2132 }
2133
c058728c
SM
2134 threads_debug_printf
2135 ("Checking whether LWP %ld needs to move out of the jump pad... no",
2136 lwpid_of (current_thread));
0cccb683 2137
d16f3f6c 2138 return false;
fa593d66
PA
2139}
2140
2141/* Enqueue one signal in the "signals to report later when out of the
2142 jump pad" list. */
2143
2144static void
2145enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2146{
d86d4aaf 2147 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66 2148
c058728c
SM
2149 threads_debug_printf ("Deferring signal %d for LWP %ld.",
2150 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2151
2152 if (debug_threads)
2153 {
013e3554 2154 for (const auto &sig : lwp->pending_signals_to_report)
c058728c 2155 threads_debug_printf (" Already queued %d", sig.signal);
fa593d66 2156
c058728c 2157 threads_debug_printf (" (no more currently queued signals)");
fa593d66
PA
2158 }
2159
1a981360
PA
2160 /* Don't enqueue non-RT signals if they are already in the deferred
2161 queue. (SIGSTOP being the easiest signal to see ending up here
2162 twice) */
2163 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2164 {
013e3554 2165 for (const auto &sig : lwp->pending_signals_to_report)
1a981360 2166 {
013e3554 2167 if (sig.signal == WSTOPSIG (*wstat))
1a981360 2168 {
c058728c
SM
2169 threads_debug_printf
2170 ("Not requeuing already queued non-RT signal %d for LWP %ld",
2171 sig.signal, lwpid_of (thread));
1a981360
PA
2172 return;
2173 }
2174 }
2175 }
2176
013e3554 2177 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
8d749320 2178
d86d4aaf 2179 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554 2180 &lwp->pending_signals_to_report.back ().info);
fa593d66
PA
2181}
2182
2183/* Dequeue one signal from the "signals to report later when out of
2184 the jump pad" list. */
2185
2186static int
2187dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2188{
d86d4aaf
DE
2189 struct thread_info *thread = get_lwp_thread (lwp);
2190
013e3554 2191 if (!lwp->pending_signals_to_report.empty ())
fa593d66 2192 {
013e3554 2193 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
fa593d66 2194
013e3554
TBA
2195 *wstat = W_STOPCODE (p_sig.signal);
2196 if (p_sig.info.si_signo != 0)
d86d4aaf 2197 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554
TBA
2198 &p_sig.info);
2199
2200 lwp->pending_signals_to_report.pop_front ();
fa593d66 2201
c058728c
SM
2202 threads_debug_printf ("Reporting deferred signal %d for LWP %ld.",
2203 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2204
2205 if (debug_threads)
2206 {
013e3554 2207 for (const auto &sig : lwp->pending_signals_to_report)
c058728c 2208 threads_debug_printf (" Still queued %d", sig.signal);
fa593d66 2209
c058728c 2210 threads_debug_printf (" (no more queued signals)");
fa593d66
PA
2211 }
2212
2213 return 1;
2214 }
2215
2216 return 0;
2217}
2218
ac1bbaca
TBA
2219bool
2220linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
582511be 2221{
24583e45
TBA
2222 scoped_restore_current_thread restore_thread;
2223 switch_to_thread (get_lwp_thread (child));
d50171e4 2224
ac1bbaca
TBA
2225 if (low_stopped_by_watchpoint ())
2226 {
2227 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2228 child->stopped_data_address = low_stopped_data_address ();
2229 }
582511be 2230
ac1bbaca
TBA
2231 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2232}
d50171e4 2233
ac1bbaca
TBA
2234bool
2235linux_process_target::low_stopped_by_watchpoint ()
2236{
2237 return false;
2238}
d50171e4 2239
ac1bbaca
TBA
2240CORE_ADDR
2241linux_process_target::low_stopped_data_address ()
2242{
2243 return 0;
c4d9ceb6
YQ
2244}
2245
de0d863e
DB
2246/* Return the ptrace options that we want to try to enable. */
2247
2248static int
2249linux_low_ptrace_options (int attached)
2250{
c12a5089 2251 client_state &cs = get_client_state ();
de0d863e
DB
2252 int options = 0;
2253
2254 if (!attached)
2255 options |= PTRACE_O_EXITKILL;
2256
c12a5089 2257 if (cs.report_fork_events)
de0d863e
DB
2258 options |= PTRACE_O_TRACEFORK;
2259
c12a5089 2260 if (cs.report_vfork_events)
c269dbdb
DB
2261 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2262
c12a5089 2263 if (cs.report_exec_events)
94585166
DB
2264 options |= PTRACE_O_TRACEEXEC;
2265
82075af2
JS
2266 options |= PTRACE_O_TRACESYSGOOD;
2267
de0d863e
DB
2268 return options;
2269}
2270
1a48f002 2271void
d16f3f6c 2272linux_process_target::filter_event (int lwpid, int wstat)
fa96cb38
PA
2273{
2274 struct lwp_info *child;
2275 struct thread_info *thread;
582511be 2276 int have_stop_pc = 0;
fa96cb38 2277
f2907e49 2278 child = find_lwp_pid (ptid_t (lwpid));
fa96cb38 2279
5406bc3f
PA
2280 /* Check for events reported by anything not in our LWP list. */
2281 if (child == nullptr)
94585166 2282 {
5406bc3f
PA
2283 if (WIFSTOPPED (wstat))
2284 {
2285 if (WSTOPSIG (wstat) == SIGTRAP
2286 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2287 {
2288 /* A non-leader thread exec'ed after we've seen the
2289 leader zombie, and removed it from our lists (in
2290 check_zombie_leaders). The non-leader thread changes
2291 its tid to the tgid. */
2292 threads_debug_printf
2293 ("Re-adding thread group leader LWP %d after exec.",
2294 lwpid);
94585166 2295
5406bc3f
PA
2296 child = add_lwp (ptid_t (lwpid, lwpid));
2297 child->stopped = 1;
2298 switch_to_thread (child->thread);
2299 }
2300 else
2301 {
2302 /* A process we are controlling has forked and the new
2303 child's stop was reported to us by the kernel. Save
2304 its PID and go back to waiting for the fork event to
2305 be reported - the stopped process might be returned
2306 from waitpid before or after the fork event is. */
2307 threads_debug_printf
2308 ("Saving LWP %d status %s in stopped_pids list",
2309 lwpid, status_to_str (wstat).c_str ());
2310 add_to_pid_list (&stopped_pids, lwpid, wstat);
2311 }
2312 }
2313 else
2314 {
2315 /* Don't report an event for the exit of an LWP not in our
2316 list, i.e. not part of any inferior we're debugging.
2317 This can happen if we detach from a program we originally
8a841a35
PA
2318 forked and then it exits. However, note that we may have
2319 earlier deleted a leader of an inferior we're debugging,
2320 in check_zombie_leaders. Re-add it back here if so. */
2321 find_process ([&] (process_info *proc)
2322 {
2323 if (proc->pid == lwpid)
2324 {
2325 threads_debug_printf
2326 ("Re-adding thread group leader LWP %d after exit.",
2327 lwpid);
2328
2329 child = add_lwp (ptid_t (lwpid, lwpid));
2330 return true;
2331 }
2332 return false;
2333 });
5406bc3f 2334 }
94585166 2335
5406bc3f
PA
2336 if (child == nullptr)
2337 return;
fa96cb38 2338 }
fa96cb38
PA
2339
2340 thread = get_lwp_thread (child);
2341
2342 child->stopped = 1;
2343
2344 child->last_status = wstat;
2345
582511be
PA
2346 /* Check if the thread has exited. */
2347 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2348 {
c058728c 2349 threads_debug_printf ("%d exited", lwpid);
f50bf8e5
YQ
2350
2351 if (finish_step_over (child))
2352 {
2353 /* Unsuspend all other LWPs, and set them back running again. */
2354 unsuspend_all_lwps (child);
2355 }
2356
8a841a35
PA
2357 /* If this is not the leader LWP, then the exit signal was not
2358 the end of the debugged application and should be ignored,
2359 unless GDB wants to hear about thread exits. */
48989498 2360 if (report_exit_events_for (thread) || is_leader (thread))
582511be 2361 {
65706a29
PA
2362 /* Since events are serialized to GDB core, and we can't
2363 report this one right now. Leave the status pending for
2364 the next time we're able to report it. */
e8a625d1 2365 mark_lwp_dead (child, wstat, false);
1a48f002 2366 return;
582511be
PA
2367 }
2368 else
2369 {
65706a29 2370 delete_lwp (child);
1a48f002 2371 return;
582511be
PA
2372 }
2373 }
2374
2375 gdb_assert (WIFSTOPPED (wstat));
2376
fa96cb38
PA
2377 if (WIFSTOPPED (wstat))
2378 {
2379 struct process_info *proc;
2380
c06cbd92 2381 /* Architecture-specific setup after inferior is running. */
fa96cb38 2382 proc = find_process_pid (pid_of (thread));
c06cbd92 2383 if (proc->tdesc == NULL)
fa96cb38 2384 {
c06cbd92
YQ
2385 if (proc->attached)
2386 {
c06cbd92
YQ
2387 /* This needs to happen after we have attached to the
2388 inferior and it is stopped for the first time, but
2389 before we access any inferior registers. */
797bcff5 2390 arch_setup_thread (thread);
c06cbd92
YQ
2391 }
2392 else
2393 {
2394 /* The process is started, but GDBserver will do
2395 architecture-specific setup after the program stops at
2396 the first instruction. */
2397 child->status_pending_p = 1;
2398 child->status_pending = wstat;
1a48f002 2399 return;
c06cbd92 2400 }
fa96cb38
PA
2401 }
2402 }
2403
fa96cb38
PA
2404 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2405 {
beed38b8 2406 struct process_info *proc = find_process_pid (pid_of (thread));
de0d863e 2407 int options = linux_low_ptrace_options (proc->attached);
beed38b8 2408
de0d863e 2409 linux_enable_event_reporting (lwpid, options);
fa96cb38
PA
2410 child->must_set_ptrace_flags = 0;
2411 }
2412
82075af2
JS
2413 /* Always update syscall_state, even if it will be filtered later. */
2414 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2415 {
2416 child->syscall_state
2417 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2418 ? TARGET_WAITKIND_SYSCALL_RETURN
2419 : TARGET_WAITKIND_SYSCALL_ENTRY);
2420 }
2421 else
2422 {
2423 /* Almost all other ptrace-stops are known to be outside of system
2424 calls, with further exceptions in handle_extended_wait. */
2425 child->syscall_state = TARGET_WAITKIND_IGNORE;
2426 }
2427
e7ad2f14
PA
2428 /* Be careful to not overwrite stop_pc until save_stop_reason is
2429 called. */
fa96cb38 2430 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2431 && linux_is_extended_waitstatus (wstat))
fa96cb38 2432 {
582511be 2433 child->stop_pc = get_pc (child);
94585166 2434 if (handle_extended_wait (&child, wstat))
de0d863e
DB
2435 {
2436 /* The event has been handled, so just return without
2437 reporting it. */
1a48f002 2438 return;
de0d863e 2439 }
fa96cb38
PA
2440 }
2441
80aea927 2442 if (linux_wstatus_maybe_breakpoint (wstat))
582511be 2443 {
e7ad2f14 2444 if (save_stop_reason (child))
582511be
PA
2445 have_stop_pc = 1;
2446 }
2447
2448 if (!have_stop_pc)
2449 child->stop_pc = get_pc (child);
2450
fa96cb38
PA
2451 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2452 && child->stop_expected)
2453 {
c058728c
SM
2454 threads_debug_printf ("Expected stop.");
2455
fa96cb38
PA
2456 child->stop_expected = 0;
2457
2458 if (thread->last_resume_kind == resume_stop)
2459 {
2460 /* We want to report the stop to the core. Treat the
2461 SIGSTOP as a normal event. */
c058728c
SM
2462 threads_debug_printf ("resume_stop SIGSTOP caught for %s.",
2463 target_pid_to_str (ptid_of (thread)).c_str ());
fa96cb38
PA
2464 }
2465 else if (stopping_threads != NOT_STOPPING_THREADS)
2466 {
2467 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2468 pending. */
c058728c
SM
2469 threads_debug_printf ("SIGSTOP caught for %s while stopping threads.",
2470 target_pid_to_str (ptid_of (thread)).c_str ());
1a48f002 2471 return;
fa96cb38
PA
2472 }
2473 else
2474 {
2bf6fb9d 2475 /* This is a delayed SIGSTOP. Filter out the event. */
c058728c 2476 threads_debug_printf ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2bf6fb9d 2477 child->stepping ? "step" : "continue",
61d7f128 2478 target_pid_to_str (ptid_of (thread)).c_str ());
2bf6fb9d 2479
df95181f 2480 resume_one_lwp (child, child->stepping, 0, NULL);
1a48f002 2481 return;
fa96cb38
PA
2482 }
2483 }
2484
582511be
PA
2485 child->status_pending_p = 1;
2486 child->status_pending = wstat;
1a48f002 2487 return;
fa96cb38
PA
2488}
2489
b31cdfa6
TBA
2490bool
2491linux_process_target::maybe_hw_step (thread_info *thread)
f79b145d 2492{
b31cdfa6
TBA
2493 if (supports_hardware_single_step ())
2494 return true;
f79b145d
YQ
2495 else
2496 {
3b9a79ef 2497 /* GDBserver must insert single-step breakpoint for software
f79b145d 2498 single step. */
3b9a79ef 2499 gdb_assert (has_single_step_breakpoints (thread));
b31cdfa6 2500 return false;
f79b145d
YQ
2501 }
2502}
2503
df95181f
TBA
2504void
2505linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
20ba1ce6 2506{
20ba1ce6
PA
2507 struct lwp_info *lp = get_thread_lwp (thread);
2508
2509 if (lp->stopped
863d01bd 2510 && !lp->suspended
20ba1ce6 2511 && !lp->status_pending_p
183be222 2512 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
20ba1ce6 2513 {
8901d193
YQ
2514 int step = 0;
2515
2516 if (thread->last_resume_kind == resume_step)
b6d8d612
KB
2517 {
2518 if (supports_software_single_step ())
2519 install_software_single_step_breakpoints (lp);
2520
2521 step = maybe_hw_step (thread);
2522 }
20ba1ce6 2523
c058728c
SM
2524 threads_debug_printf ("resuming stopped-resumed LWP %s at %s: step=%d",
2525 target_pid_to_str (ptid_of (thread)).c_str (),
2526 paddress (lp->stop_pc), step);
20ba1ce6 2527
df95181f 2528 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
20ba1ce6
PA
2529 }
2530}
2531
d16f3f6c
TBA
2532int
2533linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2534 ptid_t filter_ptid,
2535 int *wstatp, int options)
0d62e5e8 2536{
d86d4aaf 2537 struct thread_info *event_thread;
d50171e4 2538 struct lwp_info *event_child, *requested_child;
fa96cb38 2539 sigset_t block_mask, prev_mask;
d50171e4 2540
fa96cb38 2541 retry:
d86d4aaf
DE
2542 /* N.B. event_thread points to the thread_info struct that contains
2543 event_child. Keep them in sync. */
2544 event_thread = NULL;
d50171e4
PA
2545 event_child = NULL;
2546 requested_child = NULL;
0d62e5e8 2547
95954743 2548 /* Check for a lwp with a pending status. */
bd99dc85 2549
d7e15655 2550 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
0d62e5e8 2551 {
83e1b6c1
SM
2552 event_thread = find_thread_in_random ([&] (thread_info *thread)
2553 {
2554 return status_pending_p_callback (thread, filter_ptid);
2555 });
2556
d86d4aaf 2557 if (event_thread != NULL)
c058728c
SM
2558 {
2559 event_child = get_thread_lwp (event_thread);
2560 threads_debug_printf ("Got a pending child %ld", lwpid_of (event_thread));
2561 }
0d62e5e8 2562 }
d7e15655 2563 else if (filter_ptid != null_ptid)
0d62e5e8 2564 {
fa96cb38 2565 requested_child = find_lwp_pid (filter_ptid);
59487af3 2566 gdb_assert (requested_child != nullptr);
d50171e4 2567
bde24c0a 2568 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66 2569 && requested_child->status_pending_p
229d26fc
SM
2570 && (requested_child->collecting_fast_tracepoint
2571 != fast_tpoint_collect_result::not_collecting))
fa593d66
PA
2572 {
2573 enqueue_one_deferred_signal (requested_child,
2574 &requested_child->status_pending);
2575 requested_child->status_pending_p = 0;
2576 requested_child->status_pending = 0;
df95181f 2577 resume_one_lwp (requested_child, 0, 0, NULL);
fa593d66
PA
2578 }
2579
2580 if (requested_child->suspended
2581 && requested_child->status_pending_p)
38e08fca 2582 {
f34652de 2583 internal_error ("requesting an event out of a"
38e08fca
GB
2584 " suspended child?");
2585 }
fa593d66 2586
d50171e4 2587 if (requested_child->status_pending_p)
d86d4aaf
DE
2588 {
2589 event_child = requested_child;
2590 event_thread = get_lwp_thread (event_child);
2591 }
0d62e5e8 2592 }
611cb4a5 2593
0d62e5e8
DJ
2594 if (event_child != NULL)
2595 {
c058728c
SM
2596 threads_debug_printf ("Got an event from pending child %ld (%04x)",
2597 lwpid_of (event_thread),
2598 event_child->status_pending);
2599
fa96cb38 2600 *wstatp = event_child->status_pending;
bd99dc85
PA
2601 event_child->status_pending_p = 0;
2602 event_child->status_pending = 0;
24583e45 2603 switch_to_thread (event_thread);
d86d4aaf 2604 return lwpid_of (event_thread);
0d62e5e8
DJ
2605 }
2606
fa96cb38
PA
2607 /* But if we don't find a pending event, we'll have to wait.
2608
2609 We only enter this loop if no process has a pending wait status.
2610 Thus any action taken in response to a wait status inside this
2611 loop is responding as soon as we detect the status, not after any
2612 pending events. */
d8301ad1 2613
fa96cb38
PA
2614 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2615 all signals while here. */
2616 sigfillset (&block_mask);
21987b9c 2617 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
fa96cb38 2618
582511be
PA
2619 /* Always pull all events out of the kernel. We'll randomly select
2620 an event LWP out of all that have events, to prevent
2621 starvation. */
fa96cb38 2622 while (event_child == NULL)
0d62e5e8 2623 {
fa96cb38 2624 pid_t ret = 0;
0d62e5e8 2625
fa96cb38
PA
2626 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2627 quirks:
0d62e5e8 2628
fa96cb38
PA
2629 - If the thread group leader exits while other threads in the
2630 thread group still exist, waitpid(TGID, ...) hangs. That
2631 waitpid won't return an exit status until the other threads
2632 in the group are reaped.
611cb4a5 2633
fa96cb38
PA
2634 - When a non-leader thread execs, that thread just vanishes
2635 without reporting an exit (so we'd hang if we waited for it
2636 explicitly in that case). The exec event is reported to
94585166 2637 the TGID pid. */
fa96cb38
PA
2638 errno = 0;
2639 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2640
c058728c
SM
2641 threads_debug_printf ("waitpid(-1, ...) returned %d, %s",
2642 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
0d62e5e8 2643
fa96cb38 2644 if (ret > 0)
0d62e5e8 2645 {
c058728c
SM
2646 threads_debug_printf ("waitpid %ld received %s",
2647 (long) ret, status_to_str (*wstatp).c_str ());
89be2091 2648
582511be
PA
2649 /* Filter all events. IOW, leave all events pending. We'll
2650 randomly select an event LWP out of all that have events
2651 below. */
d16f3f6c 2652 filter_event (ret, *wstatp);
fa96cb38
PA
2653 /* Retry until nothing comes out of waitpid. A single
2654 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2655 continue;
2656 }
2657
20ba1ce6
PA
2658 /* Now that we've pulled all events out of the kernel, resume
2659 LWPs that don't have an interesting event to report. */
2660 if (stopping_threads == NOT_STOPPING_THREADS)
df95181f
TBA
2661 for_each_thread ([this] (thread_info *thread)
2662 {
2663 resume_stopped_resumed_lwps (thread);
2664 });
20ba1ce6
PA
2665
2666 /* ... and find an LWP with a status to report to the core, if
2667 any. */
83e1b6c1
SM
2668 event_thread = find_thread_in_random ([&] (thread_info *thread)
2669 {
2670 return status_pending_p_callback (thread, filter_ptid);
2671 });
2672
582511be
PA
2673 if (event_thread != NULL)
2674 {
2675 event_child = get_thread_lwp (event_thread);
2676 *wstatp = event_child->status_pending;
2677 event_child->status_pending_p = 0;
2678 event_child->status_pending = 0;
2679 break;
2680 }
2681
fa96cb38
PA
2682 /* Check for zombie thread group leaders. Those can't be reaped
2683 until all other threads in the thread group are. */
e8a625d1
PA
2684 if (check_zombie_leaders ())
2685 goto retry;
fa96cb38 2686
a1385b7b
SM
2687 auto not_stopped = [&] (thread_info *thread)
2688 {
2689 return not_stopped_callback (thread, wait_ptid);
2690 };
2691
fa96cb38
PA
2692 /* If there are no resumed children left in the set of LWPs we
2693 want to wait for, bail. We can't just block in
2694 waitpid/sigsuspend, because lwps might have been left stopped
2695 in trace-stop state, and we'd be stuck forever waiting for
2696 their status to change (which would only happen if we resumed
2697 them). Even if WNOHANG is set, this return code is preferred
2698 over 0 (below), as it is more detailed. */
a1385b7b 2699 if (find_thread (not_stopped) == NULL)
a6dbe5df 2700 {
c058728c
SM
2701 threads_debug_printf ("exit (no unwaited-for LWP)");
2702
21987b9c 2703 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2704 return -1;
a6dbe5df
PA
2705 }
2706
fa96cb38
PA
2707 /* No interesting event to report to the caller. */
2708 if ((options & WNOHANG))
24a09b5f 2709 {
c058728c 2710 threads_debug_printf ("WNOHANG set, no event found");
fa96cb38 2711
21987b9c 2712 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2713 return 0;
24a09b5f
DJ
2714 }
2715
fa96cb38 2716 /* Block until we get an event reported with SIGCHLD. */
c058728c 2717 threads_debug_printf ("sigsuspend'ing");
d50171e4 2718
fa96cb38 2719 sigsuspend (&prev_mask);
21987b9c 2720 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38
PA
2721 goto retry;
2722 }
d50171e4 2723
21987b9c 2724 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2725
24583e45 2726 switch_to_thread (event_thread);
d50171e4 2727
fa96cb38
PA
2728 return lwpid_of (event_thread);
2729}
2730
d16f3f6c
TBA
2731int
2732linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
fa96cb38 2733{
d16f3f6c 2734 return wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2735}
2736
6bf5e0ba
PA
2737/* Select one LWP out of those that have events pending. */
2738
2739static void
2740select_event_lwp (struct lwp_info **orig_lp)
2741{
582511be
PA
2742 struct thread_info *event_thread = NULL;
2743
2744 /* In all-stop, give preference to the LWP that is being
2745 single-stepped. There will be at most one, and it's the LWP that
2746 the core is most interested in. If we didn't do this, then we'd
2747 have to handle pending step SIGTRAPs somehow in case the core
2748 later continues the previously-stepped thread, otherwise we'd
2749 report the pending SIGTRAP, and the core, not having stepped the
2750 thread, wouldn't understand what the trap was for, and therefore
2751 would report it to the user as a random signal. */
2752 if (!non_stop)
6bf5e0ba 2753 {
39a64da5
SM
2754 event_thread = find_thread ([] (thread_info *thread)
2755 {
2756 lwp_info *lp = get_thread_lwp (thread);
2757
183be222 2758 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
39a64da5
SM
2759 && thread->last_resume_kind == resume_step
2760 && lp->status_pending_p);
2761 });
2762
582511be 2763 if (event_thread != NULL)
c058728c
SM
2764 threads_debug_printf
2765 ("Select single-step %s",
2766 target_pid_to_str (ptid_of (event_thread)).c_str ());
6bf5e0ba 2767 }
582511be 2768 if (event_thread == NULL)
6bf5e0ba
PA
2769 {
2770 /* No single-stepping LWP. Select one at random, out of those
dda83cd7 2771 which have had events. */
6bf5e0ba 2772
b0319eaa 2773 event_thread = find_thread_in_random ([&] (thread_info *thread)
39a64da5
SM
2774 {
2775 lwp_info *lp = get_thread_lwp (thread);
2776
b0319eaa 2777 /* Only resumed LWPs that have an event pending. */
183be222 2778 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
b0319eaa 2779 && lp->status_pending_p);
39a64da5 2780 });
6bf5e0ba
PA
2781 }
2782
d86d4aaf 2783 if (event_thread != NULL)
6bf5e0ba 2784 {
d86d4aaf
DE
2785 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2786
6bf5e0ba
PA
2787 /* Switch the event LWP. */
2788 *orig_lp = event_lp;
2789 }
2790}
2791
7984d532
PA
2792/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2793 NULL. */
2794
2795static void
2796unsuspend_all_lwps (struct lwp_info *except)
2797{
139720c5
SM
2798 for_each_thread ([&] (thread_info *thread)
2799 {
2800 lwp_info *lwp = get_thread_lwp (thread);
2801
2802 if (lwp != except)
2803 lwp_suspended_decr (lwp);
2804 });
7984d532
PA
2805}
2806
5a6b0a41 2807static bool lwp_running (thread_info *thread);
fa593d66
PA
2808
2809/* Stabilize threads (move out of jump pads).
2810
2811 If a thread is midway collecting a fast tracepoint, we need to
2812 finish the collection and move it out of the jump pad before
2813 reporting the signal.
2814
2815 This avoids recursion while collecting (when a signal arrives
2816 midway, and the signal handler itself collects), which would trash
2817 the trace buffer. In case the user set a breakpoint in a signal
2818 handler, this avoids the backtrace showing the jump pad, etc..
2819 Most importantly, there are certain things we can't do safely if
2820 threads are stopped in a jump pad (or in its callee's). For
2821 example:
2822
2823 - starting a new trace run. A thread still collecting the
2824 previous run, could trash the trace buffer when resumed. The trace
2825 buffer control structures would have been reset but the thread had
2826 no way to tell. The thread could even midway memcpy'ing to the
2827 buffer, which would mean that when resumed, it would clobber the
2828 trace buffer that had been set for a new run.
2829
2830 - we can't rewrite/reuse the jump pads for new tracepoints
2831 safely. Say you do tstart while a thread is stopped midway while
2832 collecting. When the thread is later resumed, it finishes the
2833 collection, and returns to the jump pad, to execute the original
2834 instruction that was under the tracepoint jump at the time the
2835 older run had been started. If the jump pad had been rewritten
2836 since for something else in the new run, the thread would now
2837 execute the wrong / random instructions. */
2838
5c9eb2f2
TBA
2839void
2840linux_process_target::stabilize_threads ()
fa593d66 2841{
13e567af
TBA
2842 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2843 {
2844 return stuck_in_jump_pad (thread);
2845 });
fa593d66 2846
d86d4aaf 2847 if (thread_stuck != NULL)
fa593d66 2848 {
c058728c
SM
2849 threads_debug_printf ("can't stabilize, LWP %ld is stuck in jump pad",
2850 lwpid_of (thread_stuck));
fa593d66
PA
2851 return;
2852 }
2853
24583e45 2854 scoped_restore_current_thread restore_thread;
fa593d66
PA
2855
2856 stabilizing_threads = 1;
2857
2858 /* Kick 'em all. */
d16f3f6c
TBA
2859 for_each_thread ([this] (thread_info *thread)
2860 {
2861 move_out_of_jump_pad (thread);
2862 });
fa593d66
PA
2863
2864 /* Loop until all are stopped out of the jump pads. */
5a6b0a41 2865 while (find_thread (lwp_running) != NULL)
fa593d66
PA
2866 {
2867 struct target_waitstatus ourstatus;
2868 struct lwp_info *lwp;
fa593d66
PA
2869 int wstat;
2870
2871 /* Note that we go through the full wait even loop. While
2872 moving threads out of jump pad, we need to be able to step
2873 over internal breakpoints and such. */
d16f3f6c 2874 wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66 2875
183be222 2876 if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
fa593d66 2877 {
0bfdf32f 2878 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2879
2880 /* Lock it. */
863d01bd 2881 lwp_suspended_inc (lwp);
fa593d66 2882
183be222 2883 if (ourstatus.sig () != GDB_SIGNAL_0
0bfdf32f 2884 || current_thread->last_resume_kind == resume_stop)
fa593d66 2885 {
183be222 2886 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
fa593d66
PA
2887 enqueue_one_deferred_signal (lwp, &wstat);
2888 }
2889 }
2890 }
2891
fcdad592 2892 unsuspend_all_lwps (NULL);
fa593d66
PA
2893
2894 stabilizing_threads = 0;
2895
b4d51a55 2896 if (debug_threads)
fa593d66 2897 {
13e567af
TBA
2898 thread_stuck = find_thread ([this] (thread_info *thread)
2899 {
2900 return stuck_in_jump_pad (thread);
2901 });
fcb056a5 2902
d86d4aaf 2903 if (thread_stuck != NULL)
c058728c
SM
2904 threads_debug_printf
2905 ("couldn't stabilize, LWP %ld got stuck in jump pad",
2906 lwpid_of (thread_stuck));
fa593d66
PA
2907 }
2908}
2909
582511be
PA
2910/* Convenience function that is called when the kernel reports an
2911 event that is not passed out to GDB. */
2912
2913static ptid_t
2914ignore_event (struct target_waitstatus *ourstatus)
2915{
2916 /* If we got an event, there may still be others, as a single
2917 SIGCHLD can indicate more than one child stopped. This forces
2918 another target_wait call. */
2919 async_file_mark ();
2920
183be222 2921 ourstatus->set_ignore ();
582511be
PA
2922 return null_ptid;
2923}
2924
fd000fb3
TBA
2925ptid_t
2926linux_process_target::filter_exit_event (lwp_info *event_child,
2927 target_waitstatus *ourstatus)
65706a29
PA
2928{
2929 struct thread_info *thread = get_lwp_thread (event_child);
2930 ptid_t ptid = ptid_of (thread);
2931
e8a625d1
PA
2932 if (ourstatus->kind () == TARGET_WAITKIND_THREAD_EXITED)
2933 {
2934 /* We're reporting a thread exit for the leader. The exit was
2935 detected by check_zombie_leaders. */
2936 gdb_assert (is_leader (thread));
2937 gdb_assert (report_exit_events_for (thread));
2938
2939 delete_lwp (event_child);
2940 return ptid;
2941 }
2942
48989498
PA
2943 /* Note we must filter TARGET_WAITKIND_SIGNALLED as well, otherwise
2944 if a non-leader thread exits with a signal, we'd report it to the
2945 core which would interpret it as the whole-process exiting.
2946 There is no TARGET_WAITKIND_THREAD_SIGNALLED event kind. */
2947 if (ourstatus->kind () != TARGET_WAITKIND_EXITED
2948 && ourstatus->kind () != TARGET_WAITKIND_SIGNALLED)
2949 return ptid;
2950
8a841a35 2951 if (!is_leader (thread))
65706a29 2952 {
48989498 2953 if (report_exit_events_for (thread))
183be222 2954 ourstatus->set_thread_exited (0);
65706a29 2955 else
183be222 2956 ourstatus->set_ignore ();
65706a29
PA
2957
2958 delete_lwp (event_child);
2959 }
2960 return ptid;
2961}
2962
82075af2
JS
2963/* Returns 1 if GDB is interested in any event_child syscalls. */
2964
2965static int
2966gdb_catching_syscalls_p (struct lwp_info *event_child)
2967{
2968 struct thread_info *thread = get_lwp_thread (event_child);
2969 struct process_info *proc = get_thread_process (thread);
2970
f27866ba 2971 return !proc->syscalls_to_catch.empty ();
82075af2
JS
2972}
2973
9eedd27d
TBA
2974bool
2975linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
82075af2 2976{
4cc32bec 2977 int sysno;
82075af2
JS
2978 struct thread_info *thread = get_lwp_thread (event_child);
2979 struct process_info *proc = get_thread_process (thread);
2980
f27866ba 2981 if (proc->syscalls_to_catch.empty ())
9eedd27d 2982 return false;
82075af2 2983
f27866ba 2984 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
9eedd27d 2985 return true;
82075af2 2986
4cc32bec 2987 get_syscall_trapinfo (event_child, &sysno);
f27866ba
SM
2988
2989 for (int iter : proc->syscalls_to_catch)
82075af2 2990 if (iter == sysno)
9eedd27d 2991 return true;
82075af2 2992
9eedd27d 2993 return false;
82075af2
JS
2994}
2995
d16f3f6c
TBA
2996ptid_t
2997linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
b60cea74 2998 target_wait_flags target_options)
da6d8c04 2999{
c058728c
SM
3000 THREADS_SCOPED_DEBUG_ENTER_EXIT;
3001
c12a5089 3002 client_state &cs = get_client_state ();
e5f1222d 3003 int w;
fc7238bb 3004 struct lwp_info *event_child;
bd99dc85 3005 int options;
bd99dc85 3006 int pid;
6bf5e0ba
PA
3007 int step_over_finished;
3008 int bp_explains_trap;
3009 int maybe_internal_trap;
3010 int report_to_gdb;
219f2f23 3011 int trace_event;
c2d6af84 3012 int in_step_range;
bd99dc85 3013
c058728c 3014 threads_debug_printf ("[%s]", target_pid_to_str (ptid).c_str ());
87ce2a04 3015
bd99dc85
PA
3016 /* Translate generic target options into linux options. */
3017 options = __WALL;
3018 if (target_options & TARGET_WNOHANG)
3019 options |= WNOHANG;
0d62e5e8 3020
fa593d66
PA
3021 bp_explains_trap = 0;
3022 trace_event = 0;
c2d6af84 3023 in_step_range = 0;
183be222 3024 ourstatus->set_ignore ();
bd99dc85 3025
ef980d65 3026 bool was_any_resumed = any_resumed ();
f2faf941 3027
d7e15655 3028 if (step_over_bkpt == null_ptid)
d16f3f6c 3029 pid = wait_for_event (ptid, &w, options);
6bf5e0ba
PA
3030 else
3031 {
c058728c
SM
3032 threads_debug_printf ("step_over_bkpt set [%s], doing a blocking wait",
3033 target_pid_to_str (step_over_bkpt).c_str ());
d16f3f6c 3034 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
6bf5e0ba
PA
3035 }
3036
ef980d65 3037 if (pid == 0 || (pid == -1 && !was_any_resumed))
87ce2a04 3038 {
fa96cb38
PA
3039 gdb_assert (target_options & TARGET_WNOHANG);
3040
c058728c 3041 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_IGNORE");
fa96cb38 3042
183be222 3043 ourstatus->set_ignore ();
87ce2a04
DE
3044 return null_ptid;
3045 }
fa96cb38
PA
3046 else if (pid == -1)
3047 {
c058728c 3048 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_NO_RESUMED");
bd99dc85 3049
183be222 3050 ourstatus->set_no_resumed ();
fa96cb38
PA
3051 return null_ptid;
3052 }
0d62e5e8 3053
0bfdf32f 3054 event_child = get_thread_lwp (current_thread);
0d62e5e8 3055
d16f3f6c 3056 /* wait_for_event only returns an exit status for the last
fa96cb38
PA
3057 child of a process. Report it. */
3058 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 3059 {
fa96cb38 3060 if (WIFEXITED (w))
0d62e5e8 3061 {
e8a625d1
PA
3062 /* If we already have the exit recorded in waitstatus, use
3063 it. This will happen when we detect a zombie leader,
3064 when we had GDB_THREAD_OPTION_EXIT enabled for it. We
3065 want to report its exit as TARGET_WAITKIND_THREAD_EXITED,
3066 as the whole process hasn't exited yet. */
3067 const target_waitstatus &ws = event_child->waitstatus;
3068 if (ws.kind () != TARGET_WAITKIND_IGNORE)
3069 {
3070 gdb_assert (ws.kind () == TARGET_WAITKIND_EXITED
3071 || ws.kind () == TARGET_WAITKIND_THREAD_EXITED);
3072 *ourstatus = ws;
3073 }
3074 else
3075 ourstatus->set_exited (WEXITSTATUS (w));
bd99dc85 3076
c058728c
SM
3077 threads_debug_printf
3078 ("ret = %s, exited with retcode %d",
3079 target_pid_to_str (ptid_of (current_thread)).c_str (),
3080 WEXITSTATUS (w));
fa96cb38
PA
3081 }
3082 else
3083 {
183be222 3084 ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
5b1c542e 3085
c058728c
SM
3086 threads_debug_printf
3087 ("ret = %s, terminated with signal %d",
3088 target_pid_to_str (ptid_of (current_thread)).c_str (),
3089 WTERMSIG (w));
0d62e5e8 3090 }
fa96cb38 3091
48989498 3092 return filter_exit_event (event_child, ourstatus);
da6d8c04
DJ
3093 }
3094
2d97cd35
AT
3095 /* If step-over executes a breakpoint instruction, in the case of a
3096 hardware single step it means a gdb/gdbserver breakpoint had been
3097 planted on top of a permanent breakpoint, in the case of a software
3098 single step it may just mean that gdbserver hit the reinsert breakpoint.
e7ad2f14 3099 The PC has been adjusted by save_stop_reason to point at
2d97cd35
AT
3100 the breakpoint address.
3101 So in the case of the hardware single step advance the PC manually
3102 past the breakpoint and in the case of software single step advance only
3b9a79ef 3103 if it's not the single_step_breakpoint we are hitting.
2d97cd35
AT
3104 This avoids that a program would keep trapping a permanent breakpoint
3105 forever. */
d7e15655 3106 if (step_over_bkpt != null_ptid
2d97cd35
AT
3107 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3108 && (event_child->stepping
3b9a79ef 3109 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
8090aef2 3110 {
dd373349
AT
3111 int increment_pc = 0;
3112 int breakpoint_kind = 0;
3113 CORE_ADDR stop_pc = event_child->stop_pc;
3114
d16f3f6c
TBA
3115 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3116 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
8090aef2 3117
c058728c
SM
3118 threads_debug_printf
3119 ("step-over for %s executed software breakpoint",
3120 target_pid_to_str (ptid_of (current_thread)).c_str ());
8090aef2
PA
3121
3122 if (increment_pc != 0)
3123 {
3124 struct regcache *regcache
3125 = get_thread_regcache (current_thread, 1);
3126
3127 event_child->stop_pc += increment_pc;
bf9ae9d8 3128 low_set_pc (regcache, event_child->stop_pc);
8090aef2 3129
d7146cda 3130 if (!low_breakpoint_at (event_child->stop_pc))
15c66dd6 3131 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
3132 }
3133 }
3134
6bf5e0ba
PA
3135 /* If this event was not handled before, and is not a SIGTRAP, we
3136 report it. SIGILL and SIGSEGV are also treated as traps in case
3137 a breakpoint is inserted at the current PC. If this target does
3138 not support internal breakpoints at all, we also report the
3139 SIGTRAP without further processing; it's of no concern to us. */
3140 maybe_internal_trap
bf9ae9d8 3141 = (low_supports_breakpoints ()
6bf5e0ba
PA
3142 && (WSTOPSIG (w) == SIGTRAP
3143 || ((WSTOPSIG (w) == SIGILL
3144 || WSTOPSIG (w) == SIGSEGV)
d7146cda 3145 && low_breakpoint_at (event_child->stop_pc))));
6bf5e0ba
PA
3146
3147 if (maybe_internal_trap)
3148 {
3149 /* Handle anything that requires bookkeeping before deciding to
3150 report the event or continue waiting. */
3151
3152 /* First check if we can explain the SIGTRAP with an internal
3153 breakpoint, or if we should possibly report the event to GDB.
3154 Do this before anything that may remove or insert a
3155 breakpoint. */
3156 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3157
3158 /* We have a SIGTRAP, possibly a step-over dance has just
3159 finished. If so, tweak the state machine accordingly,
3b9a79ef
YQ
3160 reinsert breakpoints and delete any single-step
3161 breakpoints. */
6bf5e0ba
PA
3162 step_over_finished = finish_step_over (event_child);
3163
3164 /* Now invoke the callbacks of any internal breakpoints there. */
3165 check_breakpoints (event_child->stop_pc);
3166
219f2f23
PA
3167 /* Handle tracepoint data collecting. This may overflow the
3168 trace buffer, and cause a tracing stop, removing
3169 breakpoints. */
3170 trace_event = handle_tracepoints (event_child);
3171
6bf5e0ba 3172 if (bp_explains_trap)
c058728c 3173 threads_debug_printf ("Hit a gdbserver breakpoint.");
6bf5e0ba
PA
3174 }
3175 else
3176 {
3177 /* We have some other signal, possibly a step-over dance was in
3178 progress, and it should be cancelled too. */
3179 step_over_finished = finish_step_over (event_child);
fa593d66
PA
3180 }
3181
3182 /* We have all the data we need. Either report the event to GDB, or
3183 resume threads and keep waiting for more. */
3184
3185 /* If we're collecting a fast tracepoint, finish the collection and
3186 move out of the jump pad before delivering a signal. See
3187 linux_stabilize_threads. */
3188
3189 if (WIFSTOPPED (w)
3190 && WSTOPSIG (w) != SIGTRAP
3191 && supports_fast_tracepoints ()
58b4daa5 3192 && agent_loaded_p ())
fa593d66 3193 {
c058728c
SM
3194 threads_debug_printf ("Got signal %d for LWP %ld. Check if we need "
3195 "to defer or adjust it.",
3196 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3197
3198 /* Allow debugging the jump pad itself. */
0bfdf32f 3199 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
3200 && maybe_move_out_of_jump_pad (event_child, &w))
3201 {
3202 enqueue_one_deferred_signal (event_child, &w);
3203
c058728c
SM
3204 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad)",
3205 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66 3206
df95181f 3207 resume_one_lwp (event_child, 0, 0, NULL);
582511be
PA
3208
3209 return ignore_event (ourstatus);
fa593d66
PA
3210 }
3211 }
219f2f23 3212
229d26fc
SM
3213 if (event_child->collecting_fast_tracepoint
3214 != fast_tpoint_collect_result::not_collecting)
fa593d66 3215 {
c058728c
SM
3216 threads_debug_printf
3217 ("LWP %ld was trying to move out of the jump pad (%d). "
3218 "Check if we're already there.",
3219 lwpid_of (current_thread),
3220 (int) event_child->collecting_fast_tracepoint);
fa593d66
PA
3221
3222 trace_event = 1;
3223
3224 event_child->collecting_fast_tracepoint
3225 = linux_fast_tracepoint_collecting (event_child, NULL);
3226
229d26fc
SM
3227 if (event_child->collecting_fast_tracepoint
3228 != fast_tpoint_collect_result::before_insn)
fa593d66
PA
3229 {
3230 /* No longer need this breakpoint. */
3231 if (event_child->exit_jump_pad_bkpt != NULL)
3232 {
c058728c
SM
3233 threads_debug_printf
3234 ("No longer need exit-jump-pad bkpt; removing it."
3235 "stopping all threads momentarily.");
fa593d66
PA
3236
3237 /* Other running threads could hit this breakpoint.
3238 We don't handle moribund locations like GDB does,
3239 instead we always pause all threads when removing
3240 breakpoints, so that any step-over or
3241 decr_pc_after_break adjustment is always taken
3242 care of while the breakpoint is still
3243 inserted. */
3244 stop_all_lwps (1, event_child);
fa593d66
PA
3245
3246 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3247 event_child->exit_jump_pad_bkpt = NULL;
3248
3249 unstop_all_lwps (1, event_child);
3250
3251 gdb_assert (event_child->suspended >= 0);
3252 }
3253 }
3254
229d26fc
SM
3255 if (event_child->collecting_fast_tracepoint
3256 == fast_tpoint_collect_result::not_collecting)
fa593d66 3257 {
c058728c
SM
3258 threads_debug_printf
3259 ("fast tracepoint finished collecting successfully.");
fa593d66
PA
3260
3261 /* We may have a deferred signal to report. */
3262 if (dequeue_one_deferred_signal (event_child, &w))
c058728c 3263 threads_debug_printf ("dequeued one signal.");
3c11dd79 3264 else
fa593d66 3265 {
c058728c 3266 threads_debug_printf ("no deferred signals.");
fa593d66
PA
3267
3268 if (stabilizing_threads)
3269 {
183be222 3270 ourstatus->set_stopped (GDB_SIGNAL_0);
87ce2a04 3271
c058728c
SM
3272 threads_debug_printf
3273 ("ret = %s, stopped while stabilizing threads",
3274 target_pid_to_str (ptid_of (current_thread)).c_str ());
87ce2a04 3275
0bfdf32f 3276 return ptid_of (current_thread);
fa593d66
PA
3277 }
3278 }
3279 }
6bf5e0ba
PA
3280 }
3281
e471f25b
PA
3282 /* Check whether GDB would be interested in this event. */
3283
82075af2
JS
3284 /* Check if GDB is interested in this syscall. */
3285 if (WIFSTOPPED (w)
3286 && WSTOPSIG (w) == SYSCALL_SIGTRAP
9eedd27d 3287 && !gdb_catch_this_syscall (event_child))
82075af2 3288 {
c058728c
SM
3289 threads_debug_printf ("Ignored syscall for LWP %ld.",
3290 lwpid_of (current_thread));
82075af2 3291
df95181f 3292 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
edeeb602 3293
82075af2
JS
3294 return ignore_event (ourstatus);
3295 }
3296
e471f25b
PA
3297 /* If GDB is not interested in this signal, don't stop other
3298 threads, and don't report it to GDB. Just resume the inferior
3299 right away. We do this for threading-related signals as well as
3300 any that GDB specifically requested we ignore. But never ignore
3301 SIGSTOP if we sent it ourselves, and do not ignore signals when
3302 stepping - they may require special handling to skip the signal
c9587f88
AT
3303 handler. Also never ignore signals that could be caused by a
3304 breakpoint. */
e471f25b 3305 if (WIFSTOPPED (w)
0bfdf32f 3306 && current_thread->last_resume_kind != resume_step
e471f25b 3307 && (
1a981360 3308#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 3309 (current_process ()->priv->thread_db != NULL
e471f25b
PA
3310 && (WSTOPSIG (w) == __SIGRTMIN
3311 || WSTOPSIG (w) == __SIGRTMIN + 1))
3312 ||
3313#endif
c12a5089 3314 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 3315 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
3316 && current_thread->last_resume_kind == resume_stop)
3317 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
3318 {
3319 siginfo_t info, *info_p;
3320
c058728c
SM
3321 threads_debug_printf ("Ignored signal %d for LWP %ld.",
3322 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 3323
0bfdf32f 3324 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 3325 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
3326 info_p = &info;
3327 else
3328 info_p = NULL;
863d01bd
PA
3329
3330 if (step_over_finished)
3331 {
3332 /* We cancelled this thread's step-over above. We still
3333 need to unsuspend all other LWPs, and set them back
3334 running again while the signal handler runs. */
3335 unsuspend_all_lwps (event_child);
3336
3337 /* Enqueue the pending signal info so that proceed_all_lwps
3338 doesn't lose it. */
3339 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3340
3341 proceed_all_lwps ();
3342 }
3343 else
3344 {
df95181f
TBA
3345 resume_one_lwp (event_child, event_child->stepping,
3346 WSTOPSIG (w), info_p);
863d01bd 3347 }
edeeb602 3348
582511be 3349 return ignore_event (ourstatus);
e471f25b
PA
3350 }
3351
c2d6af84
PA
3352 /* Note that all addresses are always "out of the step range" when
3353 there's no range to begin with. */
3354 in_step_range = lwp_in_step_range (event_child);
3355
3356 /* If GDB wanted this thread to single step, and the thread is out
3357 of the step range, we always want to report the SIGTRAP, and let
3358 GDB handle it. Watchpoints should always be reported. So should
3359 signals we can't explain. A SIGTRAP we can't explain could be a
3360 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3361 do, we're be able to handle GDB breakpoints on top of internal
3362 breakpoints, by handling the internal breakpoint and still
3363 reporting the event to GDB. If we don't, we're out of luck, GDB
863d01bd
PA
3364 won't see the breakpoint hit. If we see a single-step event but
3365 the thread should be continuing, don't pass the trap to gdb.
3366 That indicates that we had previously finished a single-step but
3367 left the single-step pending -- see
3368 complete_ongoing_step_over. */
6bf5e0ba 3369 report_to_gdb = (!maybe_internal_trap
0bfdf32f 3370 || (current_thread->last_resume_kind == resume_step
c2d6af84 3371 && !in_step_range)
15c66dd6 3372 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
863d01bd
PA
3373 || (!in_step_range
3374 && !bp_explains_trap
3375 && !trace_event
3376 && !step_over_finished
3377 && !(current_thread->last_resume_kind == resume_continue
3378 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
9f3a5c85 3379 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5 3380 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
de0d863e 3381 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
183be222 3382 || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
d3ce09f5
SS
3383
3384 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
3385
3386 /* We found no reason GDB would want us to stop. We either hit one
3387 of our own breakpoints, or finished an internal step GDB
3388 shouldn't know about. */
3389 if (!report_to_gdb)
3390 {
c058728c
SM
3391 if (bp_explains_trap)
3392 threads_debug_printf ("Hit a gdbserver breakpoint.");
3393
3394 if (step_over_finished)
3395 threads_debug_printf ("Step-over finished.");
3396
3397 if (trace_event)
3398 threads_debug_printf ("Tracepoint event.");
3399
3400 if (lwp_in_step_range (event_child))
3401 threads_debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).",
3402 paddress (event_child->stop_pc),
3403 paddress (event_child->step_range_start),
3404 paddress (event_child->step_range_end));
6bf5e0ba
PA
3405
3406 /* We're not reporting this breakpoint to GDB, so apply the
3407 decr_pc_after_break adjustment to the inferior's regcache
3408 ourselves. */
3409
bf9ae9d8 3410 if (low_supports_breakpoints ())
6bf5e0ba
PA
3411 {
3412 struct regcache *regcache
0bfdf32f 3413 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3414 low_set_pc (regcache, event_child->stop_pc);
6bf5e0ba
PA
3415 }
3416
7984d532 3417 if (step_over_finished)
e3652c84
YQ
3418 {
3419 /* If we have finished stepping over a breakpoint, we've
3420 stopped and suspended all LWPs momentarily except the
3421 stepping one. This is where we resume them all again.
3422 We're going to keep waiting, so use proceed, which
3423 handles stepping over the next breakpoint. */
3424 unsuspend_all_lwps (event_child);
3425 }
3426 else
3427 {
3428 /* Remove the single-step breakpoints if any. Note that
3429 there isn't single-step breakpoint if we finished stepping
3430 over. */
7582c77c 3431 if (supports_software_single_step ()
e3652c84
YQ
3432 && has_single_step_breakpoints (current_thread))
3433 {
3434 stop_all_lwps (0, event_child);
3435 delete_single_step_breakpoints (current_thread);
3436 unstop_all_lwps (0, event_child);
3437 }
3438 }
7984d532 3439
c058728c 3440 threads_debug_printf ("proceeding all threads.");
edeeb602 3441
c058728c 3442 proceed_all_lwps ();
edeeb602 3443
582511be 3444 return ignore_event (ourstatus);
6bf5e0ba
PA
3445 }
3446
c058728c
SM
3447 if (debug_threads)
3448 {
3449 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3450 threads_debug_printf ("LWP %ld: extended event with waitstatus %s",
3451 lwpid_of (get_lwp_thread (event_child)),
3452 event_child->waitstatus.to_string ().c_str ());
3453
3454 if (current_thread->last_resume_kind == resume_step)
3455 {
3456 if (event_child->step_range_start == event_child->step_range_end)
3457 threads_debug_printf
3458 ("GDB wanted to single-step, reporting event.");
3459 else if (!lwp_in_step_range (event_child))
3460 threads_debug_printf ("Out of step range, reporting event.");
3461 }
3462
3463 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3464 threads_debug_printf ("Stopped by watchpoint.");
3465 else if (gdb_breakpoint_here (event_child->stop_pc))
3466 threads_debug_printf ("Stopped by GDB breakpoint.");
3467 }
3468
3469 threads_debug_printf ("Hit a non-gdbserver trap event.");
6bf5e0ba
PA
3470
3471 /* Alright, we're going to report a stop. */
3472
3b9a79ef 3473 /* Remove single-step breakpoints. */
7582c77c 3474 if (supports_software_single_step ())
8901d193 3475 {
3b9a79ef 3476 /* Remove single-step breakpoints or not. It it is true, stop all
8901d193
YQ
3477 lwps, so that other threads won't hit the breakpoint in the
3478 staled memory. */
3b9a79ef 3479 int remove_single_step_breakpoints_p = 0;
8901d193
YQ
3480
3481 if (non_stop)
3482 {
3b9a79ef
YQ
3483 remove_single_step_breakpoints_p
3484 = has_single_step_breakpoints (current_thread);
8901d193
YQ
3485 }
3486 else
3487 {
3488 /* In all-stop, a stop reply cancels all previous resume
3b9a79ef 3489 requests. Delete all single-step breakpoints. */
8901d193 3490
9c80ecd6
SM
3491 find_thread ([&] (thread_info *thread) {
3492 if (has_single_step_breakpoints (thread))
3493 {
3494 remove_single_step_breakpoints_p = 1;
3495 return true;
3496 }
8901d193 3497
9c80ecd6
SM
3498 return false;
3499 });
8901d193
YQ
3500 }
3501
3b9a79ef 3502 if (remove_single_step_breakpoints_p)
8901d193 3503 {
3b9a79ef 3504 /* If we remove single-step breakpoints from memory, stop all lwps,
8901d193
YQ
3505 so that other threads won't hit the breakpoint in the staled
3506 memory. */
3507 stop_all_lwps (0, event_child);
3508
3509 if (non_stop)
3510 {
3b9a79ef
YQ
3511 gdb_assert (has_single_step_breakpoints (current_thread));
3512 delete_single_step_breakpoints (current_thread);
8901d193
YQ
3513 }
3514 else
3515 {
9c80ecd6
SM
3516 for_each_thread ([] (thread_info *thread){
3517 if (has_single_step_breakpoints (thread))
3518 delete_single_step_breakpoints (thread);
3519 });
8901d193
YQ
3520 }
3521
3522 unstop_all_lwps (0, event_child);
3523 }
3524 }
3525
582511be 3526 if (!stabilizing_threads)
6bf5e0ba
PA
3527 {
3528 /* In all-stop, stop all threads. */
582511be
PA
3529 if (!non_stop)
3530 stop_all_lwps (0, NULL);
6bf5e0ba 3531
c03e6ccc 3532 if (step_over_finished)
582511be
PA
3533 {
3534 if (!non_stop)
3535 {
3536 /* If we were doing a step-over, all other threads but
3537 the stepping one had been paused in start_step_over,
3538 with their suspend counts incremented. We don't want
3539 to do a full unstop/unpause, because we're in
3540 all-stop mode (so we want threads stopped), but we
3541 still need to unsuspend the other threads, to
3542 decrement their `suspended' count back. */
3543 unsuspend_all_lwps (event_child);
3544 }
3545 else
3546 {
3547 /* If we just finished a step-over, then all threads had
3548 been momentarily paused. In all-stop, that's fine,
3549 we want threads stopped by now anyway. In non-stop,
3550 we need to re-resume threads that GDB wanted to be
3551 running. */
3552 unstop_all_lwps (1, event_child);
3553 }
3554 }
c03e6ccc 3555
3aa5cfa0
AT
3556 /* If we're not waiting for a specific LWP, choose an event LWP
3557 from among those that have had events. Giving equal priority
3558 to all LWPs that have had events helps prevent
3559 starvation. */
d7e15655 3560 if (ptid == minus_one_ptid)
3aa5cfa0
AT
3561 {
3562 event_child->status_pending_p = 1;
3563 event_child->status_pending = w;
3564
3565 select_event_lwp (&event_child);
3566
3567 /* current_thread and event_child must stay in sync. */
24583e45 3568 switch_to_thread (get_lwp_thread (event_child));
3aa5cfa0
AT
3569
3570 event_child->status_pending_p = 0;
3571 w = event_child->status_pending;
3572 }
3573
3574
fa593d66 3575 /* Stabilize threads (move out of jump pads). */
582511be 3576 if (!non_stop)
5c9eb2f2 3577 target_stabilize_threads ();
6bf5e0ba
PA
3578 }
3579 else
3580 {
3581 /* If we just finished a step-over, then all threads had been
3582 momentarily paused. In all-stop, that's fine, we want
3583 threads stopped by now anyway. In non-stop, we need to
3584 re-resume threads that GDB wanted to be running. */
3585 if (step_over_finished)
7984d532 3586 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3587 }
3588
e88cf517
SM
3589 /* At this point, we haven't set OURSTATUS. This is where we do it. */
3590 gdb_assert (ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3591
183be222 3592 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
de0d863e 3593 {
393a6b59
PA
3594 /* If the reported event is an exit, fork, vfork, clone or exec,
3595 let GDB know. */
5a04c4cf 3596
393a6b59
PA
3597 /* Break the unreported fork/vfork/clone relationship chain. */
3598 if (is_new_child_status (event_child->waitstatus.kind ()))
5a04c4cf 3599 {
393a6b59
PA
3600 event_child->relative->relative = NULL;
3601 event_child->relative = NULL;
5a04c4cf
PA
3602 }
3603
00db26fa 3604 *ourstatus = event_child->waitstatus;
de0d863e 3605 /* Clear the event lwp's waitstatus since we handled it already. */
183be222 3606 event_child->waitstatus.set_ignore ();
de0d863e
DB
3607 }
3608 else
183be222 3609 {
e88cf517 3610 /* The LWP stopped due to a plain signal or a syscall signal. Either way,
3bfdcabb 3611 event_child->waitstatus wasn't filled in with the details, so look at
e88cf517
SM
3612 the wait status W. */
3613 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3614 {
3615 int syscall_number;
3616
3617 get_syscall_trapinfo (event_child, &syscall_number);
3618 if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3619 ourstatus->set_syscall_entry (syscall_number);
3620 else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3621 ourstatus->set_syscall_return (syscall_number);
3622 else
3623 gdb_assert_not_reached ("unexpected syscall state");
3624 }
3625 else if (current_thread->last_resume_kind == resume_stop
3626 && WSTOPSIG (w) == SIGSTOP)
3627 {
3628 /* A thread that has been requested to stop by GDB with vCont;t,
3629 and it stopped cleanly, so report as SIG0. The use of
3630 SIGSTOP is an implementation detail. */
3631 ourstatus->set_stopped (GDB_SIGNAL_0);
3632 }
3633 else
3634 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
183be222 3635 }
5b1c542e 3636
582511be 3637 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3638 it was a software breakpoint, and the client doesn't know we can
3639 adjust the breakpoint ourselves. */
3640 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
c12a5089 3641 && !cs.swbreak_feature)
582511be 3642 {
d4807ea2 3643 int decr_pc = low_decr_pc_after_break ();
582511be
PA
3644
3645 if (decr_pc != 0)
3646 {
3647 struct regcache *regcache
3648 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3649 low_set_pc (regcache, event_child->stop_pc + decr_pc);
582511be
PA
3650 }
3651 }
3652
d7e15655 3653 gdb_assert (step_over_bkpt == null_ptid);
d50171e4 3654
e48359ea 3655 threads_debug_printf ("ret = %s, %s",
c058728c 3656 target_pid_to_str (ptid_of (current_thread)).c_str (),
e48359ea 3657 ourstatus->to_string ().c_str ());
bd99dc85 3658
48989498 3659 return filter_exit_event (event_child, ourstatus);
bd99dc85
PA
3660}
3661
3662/* Get rid of any pending event in the pipe. */
3663static void
3664async_file_flush (void)
3665{
cdc8e9b2 3666 linux_event_pipe.flush ();
bd99dc85
PA
3667}
3668
3669/* Put something in the pipe, so the event loop wakes up. */
3670static void
3671async_file_mark (void)
3672{
cdc8e9b2 3673 linux_event_pipe.mark ();
bd99dc85
PA
3674}
3675
6532e7e3
TBA
3676ptid_t
3677linux_process_target::wait (ptid_t ptid,
3678 target_waitstatus *ourstatus,
b60cea74 3679 target_wait_flags target_options)
bd99dc85 3680{
95954743 3681 ptid_t event_ptid;
bd99dc85 3682
bd99dc85
PA
3683 /* Flush the async file first. */
3684 if (target_is_async_p ())
3685 async_file_flush ();
3686
582511be
PA
3687 do
3688 {
d16f3f6c 3689 event_ptid = wait_1 (ptid, ourstatus, target_options);
582511be
PA
3690 }
3691 while ((target_options & TARGET_WNOHANG) == 0
183be222 3692 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3693
3694 /* If at least one stop was reported, there may be more. A single
3695 SIGCHLD can signal more than one child stop. */
3696 if (target_is_async_p ()
3697 && (target_options & TARGET_WNOHANG) != 0
d7e15655 3698 && event_ptid != null_ptid)
bd99dc85
PA
3699 async_file_mark ();
3700
3701 return event_ptid;
da6d8c04
DJ
3702}
3703
c5f62d5f 3704/* Send a signal to an LWP. */
fd500816
DJ
3705
3706static int
a1928bad 3707kill_lwp (unsigned long lwpid, int signo)
fd500816 3708{
4a6ed09b 3709 int ret;
fd500816 3710
4a6ed09b
PA
3711 errno = 0;
3712 ret = syscall (__NR_tkill, lwpid, signo);
3713 if (errno == ENOSYS)
3714 {
3715 /* If tkill fails, then we are not using nptl threads, a
3716 configuration we no longer support. */
3717 perror_with_name (("tkill"));
3718 }
3719 return ret;
fd500816
DJ
3720}
3721
964e4306
PA
3722void
3723linux_stop_lwp (struct lwp_info *lwp)
3724{
3725 send_sigstop (lwp);
3726}
3727
0d62e5e8 3728static void
02fc4de7 3729send_sigstop (struct lwp_info *lwp)
0d62e5e8 3730{
bd99dc85 3731 int pid;
0d62e5e8 3732
d86d4aaf 3733 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3734
0d62e5e8
DJ
3735 /* If we already have a pending stop signal for this process, don't
3736 send another. */
54a0b537 3737 if (lwp->stop_expected)
0d62e5e8 3738 {
c058728c 3739 threads_debug_printf ("Have pending sigstop for lwp %d", pid);
ae13219e 3740
0d62e5e8
DJ
3741 return;
3742 }
3743
c058728c 3744 threads_debug_printf ("Sending sigstop to lwp %d", pid);
0d62e5e8 3745
d50171e4 3746 lwp->stop_expected = 1;
bd99dc85 3747 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3748}
3749
df3e4dbe
SM
3750static void
3751send_sigstop (thread_info *thread, lwp_info *except)
02fc4de7 3752{
d86d4aaf 3753 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3754
7984d532
PA
3755 /* Ignore EXCEPT. */
3756 if (lwp == except)
df3e4dbe 3757 return;
7984d532 3758
02fc4de7 3759 if (lwp->stopped)
df3e4dbe 3760 return;
02fc4de7
PA
3761
3762 send_sigstop (lwp);
7984d532
PA
3763}
3764
3765/* Increment the suspend count of an LWP, and stop it, if not stopped
3766 yet. */
df3e4dbe
SM
3767static void
3768suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
7984d532 3769{
d86d4aaf 3770 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3771
3772 /* Ignore EXCEPT. */
3773 if (lwp == except)
df3e4dbe 3774 return;
7984d532 3775
863d01bd 3776 lwp_suspended_inc (lwp);
7984d532 3777
df3e4dbe 3778 send_sigstop (thread, except);
02fc4de7
PA
3779}
3780
e8a625d1
PA
3781/* Mark LWP dead, with WSTAT as exit status pending to report later.
3782 If THREAD_EVENT is true, interpret WSTAT as a thread exit event
3783 instead of a process exit event. This is meaningful for the leader
3784 thread, as we normally report a process-wide exit event when we see
3785 the leader exit, and a thread exit event when we see any other
3786 thread exit. */
3787
95954743 3788static void
e8a625d1 3789mark_lwp_dead (struct lwp_info *lwp, int wstat, bool thread_event)
95954743 3790{
95954743
PA
3791 /* Store the exit status for later. */
3792 lwp->status_pending_p = 1;
3793 lwp->status_pending = wstat;
3794
00db26fa
PA
3795 /* Store in waitstatus as well, as there's nothing else to process
3796 for this event. */
3797 if (WIFEXITED (wstat))
e8a625d1
PA
3798 {
3799 if (thread_event)
3800 lwp->waitstatus.set_thread_exited (WEXITSTATUS (wstat));
3801 else
3802 lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
3803 }
00db26fa 3804 else if (WIFSIGNALED (wstat))
e8a625d1
PA
3805 {
3806 gdb_assert (!thread_event);
3807 lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
3808 }
3809 else
3810 gdb_assert_not_reached ("unknown status kind");
00db26fa 3811
95954743
PA
3812 /* Prevent trying to stop it. */
3813 lwp->stopped = 1;
3814
3815 /* No further stops are expected from a dead lwp. */
3816 lwp->stop_expected = 0;
3817}
3818
00db26fa
PA
3819/* Return true if LWP has exited already, and has a pending exit event
3820 to report to GDB. */
3821
3822static int
3823lwp_is_marked_dead (struct lwp_info *lwp)
3824{
3825 return (lwp->status_pending_p
3826 && (WIFEXITED (lwp->status_pending)
3827 || WIFSIGNALED (lwp->status_pending)));
3828}
3829
d16f3f6c
TBA
3830void
3831linux_process_target::wait_for_sigstop ()
0d62e5e8 3832{
0bfdf32f 3833 struct thread_info *saved_thread;
95954743 3834 ptid_t saved_tid;
fa96cb38
PA
3835 int wstat;
3836 int ret;
0d62e5e8 3837
0bfdf32f
GB
3838 saved_thread = current_thread;
3839 if (saved_thread != NULL)
9c80ecd6 3840 saved_tid = saved_thread->id;
bd99dc85 3841 else
95954743 3842 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3843
20ac1cdb
TBA
3844 scoped_restore_current_thread restore_thread;
3845
c058728c 3846 threads_debug_printf ("pulling events");
d50171e4 3847
fa96cb38
PA
3848 /* Passing NULL_PTID as filter indicates we want all events to be
3849 left pending. Eventually this returns when there are no
3850 unwaited-for children left. */
d16f3f6c 3851 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
fa96cb38 3852 gdb_assert (ret == -1);
0d62e5e8 3853
13d3d99b 3854 if (saved_thread == NULL || mythread_alive (saved_tid))
20ac1cdb 3855 return;
0d62e5e8
DJ
3856 else
3857 {
c058728c 3858 threads_debug_printf ("Previously current thread died.");
0d62e5e8 3859
f0db101d
PA
3860 /* We can't change the current inferior behind GDB's back,
3861 otherwise, a subsequent command may apply to the wrong
3862 process. */
20ac1cdb
TBA
3863 restore_thread.dont_restore ();
3864 switch_to_thread (nullptr);
0d62e5e8
DJ
3865 }
3866}
3867
13e567af
TBA
3868bool
3869linux_process_target::stuck_in_jump_pad (thread_info *thread)
fa593d66 3870{
d86d4aaf 3871 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3872
863d01bd
PA
3873 if (lwp->suspended != 0)
3874 {
f34652de 3875 internal_error ("LWP %ld is suspended, suspended=%d\n",
863d01bd
PA
3876 lwpid_of (thread), lwp->suspended);
3877 }
fa593d66
PA
3878 gdb_assert (lwp->stopped);
3879
3880 /* Allow debugging the jump pad, gdb_collect, etc.. */
3881 return (supports_fast_tracepoints ()
58b4daa5 3882 && agent_loaded_p ()
fa593d66 3883 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3884 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66 3885 || thread->last_resume_kind == resume_step)
229d26fc
SM
3886 && (linux_fast_tracepoint_collecting (lwp, NULL)
3887 != fast_tpoint_collect_result::not_collecting));
fa593d66
PA
3888}
3889
d16f3f6c
TBA
3890void
3891linux_process_target::move_out_of_jump_pad (thread_info *thread)
fa593d66 3892{
d86d4aaf 3893 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3894 int *wstat;
3895
863d01bd
PA
3896 if (lwp->suspended != 0)
3897 {
f34652de 3898 internal_error ("LWP %ld is suspended, suspended=%d\n",
863d01bd
PA
3899 lwpid_of (thread), lwp->suspended);
3900 }
fa593d66
PA
3901 gdb_assert (lwp->stopped);
3902
f0ce0d3a 3903 /* For gdb_breakpoint_here. */
24583e45
TBA
3904 scoped_restore_current_thread restore_thread;
3905 switch_to_thread (thread);
f0ce0d3a 3906
fa593d66
PA
3907 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3908
3909 /* Allow debugging the jump pad, gdb_collect, etc. */
3910 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3911 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3912 && thread->last_resume_kind != resume_step
3913 && maybe_move_out_of_jump_pad (lwp, wstat))
3914 {
c058728c
SM
3915 threads_debug_printf ("LWP %ld needs stabilizing (in jump pad)",
3916 lwpid_of (thread));
fa593d66
PA
3917
3918 if (wstat)
3919 {
3920 lwp->status_pending_p = 0;
3921 enqueue_one_deferred_signal (lwp, wstat);
3922
c058728c
SM
3923 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad",
3924 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
3925 }
3926
df95181f 3927 resume_one_lwp (lwp, 0, 0, NULL);
fa593d66
PA
3928 }
3929 else
863d01bd 3930 lwp_suspended_inc (lwp);
fa593d66
PA
3931}
3932
5a6b0a41
SM
3933static bool
3934lwp_running (thread_info *thread)
fa593d66 3935{
d86d4aaf 3936 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3937
00db26fa 3938 if (lwp_is_marked_dead (lwp))
5a6b0a41
SM
3939 return false;
3940
3941 return !lwp->stopped;
fa593d66
PA
3942}
3943
d16f3f6c
TBA
3944void
3945linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
0d62e5e8 3946{
bde24c0a
PA
3947 /* Should not be called recursively. */
3948 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3949
c058728c
SM
3950 THREADS_SCOPED_DEBUG_ENTER_EXIT;
3951
3952 threads_debug_printf
3953 ("%s, except=%s", suspend ? "stop-and-suspend" : "stop",
3954 (except != NULL
3955 ? target_pid_to_str (ptid_of (get_lwp_thread (except))).c_str ()
3956 : "none"));
87ce2a04 3957
bde24c0a
PA
3958 stopping_threads = (suspend
3959 ? STOPPING_AND_SUSPENDING_THREADS
3960 : STOPPING_THREADS);
7984d532
PA
3961
3962 if (suspend)
df3e4dbe
SM
3963 for_each_thread ([&] (thread_info *thread)
3964 {
3965 suspend_and_send_sigstop (thread, except);
3966 });
7984d532 3967 else
df3e4dbe
SM
3968 for_each_thread ([&] (thread_info *thread)
3969 {
3970 send_sigstop (thread, except);
3971 });
3972
fa96cb38 3973 wait_for_sigstop ();
bde24c0a 3974 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04 3975
c058728c 3976 threads_debug_printf ("setting stopping_threads back to !stopping");
0d62e5e8
DJ
3977}
3978
863d01bd
PA
3979/* Enqueue one signal in the chain of signals which need to be
3980 delivered to this process on next resume. */
3981
3982static void
3983enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3984{
013e3554
TBA
3985 lwp->pending_signals.emplace_back (signal);
3986 if (info == nullptr)
3987 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
863d01bd 3988 else
013e3554 3989 lwp->pending_signals.back ().info = *info;
863d01bd
PA
3990}
3991
df95181f
TBA
3992void
3993linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
fa5308bd 3994{
984a2c04
YQ
3995 struct thread_info *thread = get_lwp_thread (lwp);
3996 struct regcache *regcache = get_thread_regcache (thread, 1);
8ce47547 3997
24583e45 3998 scoped_restore_current_thread restore_thread;
984a2c04 3999
24583e45 4000 switch_to_thread (thread);
7582c77c 4001 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
fa5308bd 4002
a0ff9e1a 4003 for (CORE_ADDR pc : next_pcs)
3b9a79ef 4004 set_single_step_breakpoint (pc, current_ptid);
fa5308bd
AT
4005}
4006
df95181f
TBA
4007int
4008linux_process_target::single_step (lwp_info* lwp)
7fe5e27e
AT
4009{
4010 int step = 0;
4011
b31cdfa6 4012 if (supports_hardware_single_step ())
7fe5e27e
AT
4013 {
4014 step = 1;
4015 }
7582c77c 4016 else if (supports_software_single_step ())
7fe5e27e
AT
4017 {
4018 install_software_single_step_breakpoints (lwp);
4019 step = 0;
4020 }
4021 else
c058728c 4022 threads_debug_printf ("stepping is not implemented on this target");
7fe5e27e
AT
4023
4024 return step;
4025}
4026
35ac8b3e 4027/* The signal can be delivered to the inferior if we are not trying to
5b061e98
YQ
4028 finish a fast tracepoint collect. Since signal can be delivered in
4029 the step-over, the program may go to signal handler and trap again
4030 after return from the signal handler. We can live with the spurious
4031 double traps. */
35ac8b3e
YQ
4032
4033static int
4034lwp_signal_can_be_delivered (struct lwp_info *lwp)
4035{
229d26fc
SM
4036 return (lwp->collecting_fast_tracepoint
4037 == fast_tpoint_collect_result::not_collecting);
35ac8b3e
YQ
4038}
4039
df95181f
TBA
4040void
4041linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4042 int signal, siginfo_t *info)
da6d8c04 4043{
d86d4aaf 4044 struct thread_info *thread = get_lwp_thread (lwp);
82075af2 4045 int ptrace_request;
c06cbd92
YQ
4046 struct process_info *proc = get_thread_process (thread);
4047
4048 /* Note that target description may not be initialised
4049 (proc->tdesc == NULL) at this point because the program hasn't
4050 stopped at the first instruction yet. It means GDBserver skips
4051 the extra traps from the wrapper program (see option --wrapper).
4052 Code in this function that requires register access should be
4053 guarded by proc->tdesc == NULL or something else. */
0d62e5e8 4054
54a0b537 4055 if (lwp->stopped == 0)
0d62e5e8
DJ
4056 return;
4057
183be222 4058 gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
65706a29 4059
229d26fc
SM
4060 fast_tpoint_collect_result fast_tp_collecting
4061 = lwp->collecting_fast_tracepoint;
fa593d66 4062
229d26fc
SM
4063 gdb_assert (!stabilizing_threads
4064 || (fast_tp_collecting
4065 != fast_tpoint_collect_result::not_collecting));
fa593d66 4066
219f2f23
PA
4067 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4068 user used the "jump" command, or "set $pc = foo"). */
c06cbd92 4069 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
219f2f23
PA
4070 {
4071 /* Collecting 'while-stepping' actions doesn't make sense
4072 anymore. */
d86d4aaf 4073 release_while_stepping_state_list (thread);
219f2f23
PA
4074 }
4075
0d62e5e8 4076 /* If we have pending signals or status, and a new signal, enqueue the
35ac8b3e
YQ
4077 signal. Also enqueue the signal if it can't be delivered to the
4078 inferior right now. */
0d62e5e8 4079 if (signal != 0
fa593d66 4080 && (lwp->status_pending_p
013e3554 4081 || !lwp->pending_signals.empty ()
35ac8b3e 4082 || !lwp_signal_can_be_delivered (lwp)))
94610ec4
YQ
4083 {
4084 enqueue_pending_signal (lwp, signal, info);
4085
4086 /* Postpone any pending signal. It was enqueued above. */
4087 signal = 0;
4088 }
0d62e5e8 4089
d50171e4
PA
4090 if (lwp->status_pending_p)
4091 {
c058728c
SM
4092 threads_debug_printf
4093 ("Not resuming lwp %ld (%s, stop %s); has pending status",
4094 lwpid_of (thread), step ? "step" : "continue",
4095 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
4096 return;
4097 }
0d62e5e8 4098
24583e45
TBA
4099 scoped_restore_current_thread restore_thread;
4100 switch_to_thread (thread);
0d62e5e8 4101
0d62e5e8
DJ
4102 /* This bit needs some thinking about. If we get a signal that
4103 we must report while a single-step reinsert is still pending,
4104 we often end up resuming the thread. It might be better to
4105 (ew) allow a stack of pending events; then we could be sure that
4106 the reinsert happened right away and not lose any signals.
4107
4108 Making this stack would also shrink the window in which breakpoints are
54a0b537 4109 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
4110 complete correctness, so it won't solve that problem. It may be
4111 worthwhile just to solve this one, however. */
54a0b537 4112 if (lwp->bp_reinsert != 0)
0d62e5e8 4113 {
c058728c
SM
4114 threads_debug_printf (" pending reinsert at 0x%s",
4115 paddress (lwp->bp_reinsert));
d50171e4 4116
b31cdfa6 4117 if (supports_hardware_single_step ())
d50171e4 4118 {
229d26fc 4119 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
4120 {
4121 if (step == 0)
9986ba08 4122 warning ("BAD - reinserting but not stepping.");
fa593d66 4123 if (lwp->suspended)
9986ba08
PA
4124 warning ("BAD - reinserting and suspended(%d).",
4125 lwp->suspended);
fa593d66 4126 }
d50171e4 4127 }
f79b145d
YQ
4128
4129 step = maybe_hw_step (thread);
0d62e5e8
DJ
4130 }
4131
229d26fc 4132 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
c058728c
SM
4133 threads_debug_printf
4134 ("lwp %ld wants to get out of fast tracepoint jump pad "
4135 "(exit-jump-pad-bkpt)", lwpid_of (thread));
4136
229d26fc 4137 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
fa593d66 4138 {
c058728c
SM
4139 threads_debug_printf
4140 ("lwp %ld wants to get out of fast tracepoint jump pad single-stepping",
4141 lwpid_of (thread));
fa593d66 4142
b31cdfa6 4143 if (supports_hardware_single_step ())
fa593d66
PA
4144 step = 1;
4145 else
38e08fca 4146 {
f34652de 4147 internal_error ("moving out of jump pad single-stepping"
38e08fca
GB
4148 " not implemented on this target");
4149 }
fa593d66
PA
4150 }
4151
219f2f23
PA
4152 /* If we have while-stepping actions in this thread set it stepping.
4153 If we have a signal to deliver, it may or may not be set to
4154 SIG_IGN, we don't know. Assume so, and allow collecting
4155 while-stepping into a signal handler. A possible smart thing to
4156 do would be to set an internal breakpoint at the signal return
4157 address, continue, and carry on catching this while-stepping
4158 action only when that breakpoint is hit. A future
4159 enhancement. */
7fe5e27e 4160 if (thread->while_stepping != NULL)
219f2f23 4161 {
c058728c
SM
4162 threads_debug_printf
4163 ("lwp %ld has a while-stepping action -> forcing step.",
4164 lwpid_of (thread));
7fe5e27e
AT
4165
4166 step = single_step (lwp);
219f2f23
PA
4167 }
4168
bf9ae9d8 4169 if (proc->tdesc != NULL && low_supports_breakpoints ())
0d62e5e8 4170 {
0bfdf32f 4171 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be 4172
bf9ae9d8 4173 lwp->stop_pc = low_get_pc (regcache);
582511be 4174
c058728c
SM
4175 threads_debug_printf (" %s from pc 0x%lx", step ? "step" : "continue",
4176 (long) lwp->stop_pc);
0d62e5e8
DJ
4177 }
4178
35ac8b3e
YQ
4179 /* If we have pending signals, consume one if it can be delivered to
4180 the inferior. */
013e3554 4181 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
0d62e5e8 4182 {
013e3554 4183 const pending_signal &p_sig = lwp->pending_signals.front ();
0d62e5e8 4184
013e3554
TBA
4185 signal = p_sig.signal;
4186 if (p_sig.info.si_signo != 0)
d86d4aaf 4187 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554 4188 &p_sig.info);
32ca6d61 4189
013e3554 4190 lwp->pending_signals.pop_front ();
0d62e5e8
DJ
4191 }
4192
c058728c
SM
4193 threads_debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)",
4194 lwpid_of (thread), step ? "step" : "continue", signal,
4195 lwp->stop_expected ? "expected" : "not expected");
94610ec4 4196
d7599cc0 4197 low_prepare_to_resume (lwp);
aa5ca48f 4198
d86d4aaf 4199 regcache_invalidate_thread (thread);
da6d8c04 4200 errno = 0;
54a0b537 4201 lwp->stepping = step;
82075af2
JS
4202 if (step)
4203 ptrace_request = PTRACE_SINGLESTEP;
4204 else if (gdb_catching_syscalls_p (lwp))
4205 ptrace_request = PTRACE_SYSCALL;
4206 else
4207 ptrace_request = PTRACE_CONT;
4208 ptrace (ptrace_request,
4209 lwpid_of (thread),
b8e1b30e 4210 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
4211 /* Coerce to a uintptr_t first to avoid potential gcc warning
4212 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4213 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 4214
da6d8c04 4215 if (errno)
20471e00
SM
4216 {
4217 int saved_errno = errno;
4218
4219 threads_debug_printf ("ptrace errno = %d (%s)",
4220 saved_errno, strerror (saved_errno));
4221
4222 errno = saved_errno;
4223 perror_with_name ("resuming thread");
4224 }
23f238d3
PA
4225
4226 /* Successfully resumed. Clear state that no longer makes sense,
4227 and mark the LWP as running. Must not do this before resuming
4228 otherwise if that fails other code will be confused. E.g., we'd
4229 later try to stop the LWP and hang forever waiting for a stop
4230 status. Note that we must not throw after this is cleared,
4231 otherwise handle_zombie_lwp_error would get confused. */
4232 lwp->stopped = 0;
4233 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4234}
4235
d7599cc0
TBA
4236void
4237linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4238{
4239 /* Nop. */
4240}
4241
23f238d3
PA
4242/* Called when we try to resume a stopped LWP and that errors out. If
4243 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4244 or about to become), discard the error, clear any pending status
4245 the LWP may have, and return true (we'll collect the exit status
4246 soon enough). Otherwise, return false. */
4247
4248static int
4249check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4250{
4251 struct thread_info *thread = get_lwp_thread (lp);
4252
4253 /* If we get an error after resuming the LWP successfully, we'd
4254 confuse !T state for the LWP being gone. */
4255 gdb_assert (lp->stopped);
4256
4257 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4258 because even if ptrace failed with ESRCH, the tracee may be "not
4259 yet fully dead", but already refusing ptrace requests. In that
4260 case the tracee has 'R (Running)' state for a little bit
4261 (observed in Linux 3.18). See also the note on ESRCH in the
4262 ptrace(2) man page. Instead, check whether the LWP has any state
4263 other than ptrace-stopped. */
4264
4265 /* Don't assume anything if /proc/PID/status can't be read. */
4266 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 4267 {
23f238d3
PA
4268 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4269 lp->status_pending_p = 0;
4270 return 1;
4271 }
4272 return 0;
4273}
4274
df95181f
TBA
4275void
4276linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4277 siginfo_t *info)
23f238d3 4278{
a70b8144 4279 try
23f238d3 4280 {
df95181f 4281 resume_one_lwp_throw (lwp, step, signal, info);
23f238d3 4282 }
230d2906 4283 catch (const gdb_exception_error &ex)
23f238d3 4284 {
20471e00
SM
4285 if (check_ptrace_stopped_lwp_gone (lwp))
4286 {
4287 /* This could because we tried to resume an LWP after its leader
4288 exited. Mark it as resumed, so we can collect an exit event
4289 from it. */
4290 lwp->stopped = 0;
4291 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4292 }
4293 else
eedc3f4f 4294 throw;
3221518c 4295 }
da6d8c04
DJ
4296}
4297
5fdda392
SM
4298/* This function is called once per thread via for_each_thread.
4299 We look up which resume request applies to THREAD and mark it with a
4300 pointer to the appropriate resume request.
5544ad89
DJ
4301
4302 This algorithm is O(threads * resume elements), but resume elements
4303 is small (and will remain small at least until GDB supports thread
4304 suspension). */
ebcf782c 4305
5fdda392
SM
4306static void
4307linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
0d62e5e8 4308{
d86d4aaf 4309 struct lwp_info *lwp = get_thread_lwp (thread);
64386c31 4310
5fdda392 4311 for (int ndx = 0; ndx < n; ndx++)
95954743 4312 {
5fdda392 4313 ptid_t ptid = resume[ndx].thread;
d7e15655 4314 if (ptid == minus_one_ptid
9c80ecd6 4315 || ptid == thread->id
0c9070b3
YQ
4316 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4317 of PID'. */
e99b03dc 4318 || (ptid.pid () == pid_of (thread)
0e998d96 4319 && (ptid.is_pid ()
e38504b3 4320 || ptid.lwp () == -1)))
95954743 4321 {
5fdda392 4322 if (resume[ndx].kind == resume_stop
8336d594 4323 && thread->last_resume_kind == resume_stop)
d50171e4 4324 {
c058728c
SM
4325 threads_debug_printf
4326 ("already %s LWP %ld at GDB's request",
4327 (thread->last_status.kind () == TARGET_WAITKIND_STOPPED
4328 ? "stopped" : "stopping"),
4329 lwpid_of (thread));
d50171e4
PA
4330
4331 continue;
4332 }
4333
5a04c4cf
PA
4334 /* Ignore (wildcard) resume requests for already-resumed
4335 threads. */
5fdda392 4336 if (resume[ndx].kind != resume_stop
5a04c4cf
PA
4337 && thread->last_resume_kind != resume_stop)
4338 {
c058728c
SM
4339 threads_debug_printf
4340 ("already %s LWP %ld at GDB's request",
4341 (thread->last_resume_kind == resume_step
4342 ? "stepping" : "continuing"),
4343 lwpid_of (thread));
5a04c4cf
PA
4344 continue;
4345 }
4346
393a6b59
PA
4347 /* Don't let wildcard resumes resume fork/vfork/clone
4348 children that GDB does not yet know are new children. */
4349 if (lwp->relative != NULL)
5a04c4cf 4350 {
393a6b59 4351 struct lwp_info *rel = lwp->relative;
5a04c4cf
PA
4352
4353 if (rel->status_pending_p
393a6b59 4354 && is_new_child_status (rel->waitstatus.kind ()))
5a04c4cf 4355 {
c058728c
SM
4356 threads_debug_printf
4357 ("not resuming LWP %ld: has queued stop reply",
4358 lwpid_of (thread));
5a04c4cf
PA
4359 continue;
4360 }
4361 }
4362
4363 /* If the thread has a pending event that has already been
4364 reported to GDBserver core, but GDB has not pulled the
4365 event out of the vStopped queue yet, likewise, ignore the
4366 (wildcard) resume request. */
9c80ecd6 4367 if (in_queued_stop_replies (thread->id))
5a04c4cf 4368 {
c058728c
SM
4369 threads_debug_printf
4370 ("not resuming LWP %ld: has queued stop reply",
4371 lwpid_of (thread));
5a04c4cf
PA
4372 continue;
4373 }
4374
5fdda392 4375 lwp->resume = &resume[ndx];
8336d594 4376 thread->last_resume_kind = lwp->resume->kind;
fa593d66 4377
c2d6af84
PA
4378 lwp->step_range_start = lwp->resume->step_range_start;
4379 lwp->step_range_end = lwp->resume->step_range_end;
4380
fa593d66
PA
4381 /* If we had a deferred signal to report, dequeue one now.
4382 This can happen if LWP gets more than one signal while
4383 trying to get out of a jump pad. */
4384 if (lwp->stopped
4385 && !lwp->status_pending_p
4386 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4387 {
4388 lwp->status_pending_p = 1;
4389
c058728c
SM
4390 threads_debug_printf
4391 ("Dequeueing deferred signal %d for LWP %ld, "
4392 "leaving status pending.",
4393 WSTOPSIG (lwp->status_pending),
4394 lwpid_of (thread));
fa593d66
PA
4395 }
4396
5fdda392 4397 return;
95954743
PA
4398 }
4399 }
2bd7c093
PA
4400
4401 /* No resume action for this thread. */
4402 lwp->resume = NULL;
5544ad89
DJ
4403}
4404
df95181f
TBA
4405bool
4406linux_process_target::resume_status_pending (thread_info *thread)
5544ad89 4407{
d86d4aaf 4408 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4409
bd99dc85
PA
4410 /* LWPs which will not be resumed are not interesting, because
4411 we might not wait for them next time through linux_wait. */
2bd7c093 4412 if (lwp->resume == NULL)
25c28b4d 4413 return false;
64386c31 4414
df95181f 4415 return thread_still_has_status_pending (thread);
d50171e4
PA
4416}
4417
df95181f
TBA
4418bool
4419linux_process_target::thread_needs_step_over (thread_info *thread)
d50171e4 4420{
d86d4aaf 4421 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4422 CORE_ADDR pc;
c06cbd92
YQ
4423 struct process_info *proc = get_thread_process (thread);
4424
4425 /* GDBserver is skipping the extra traps from the wrapper program,
4426 don't have to do step over. */
4427 if (proc->tdesc == NULL)
eca55aec 4428 return false;
d50171e4
PA
4429
4430 /* LWPs which will not be resumed are not interesting, because we
4431 might not wait for them next time through linux_wait. */
4432
4433 if (!lwp->stopped)
4434 {
c058728c
SM
4435 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped",
4436 lwpid_of (thread));
eca55aec 4437 return false;
d50171e4
PA
4438 }
4439
8336d594 4440 if (thread->last_resume_kind == resume_stop)
d50171e4 4441 {
c058728c
SM
4442 threads_debug_printf
4443 ("Need step over [LWP %ld]? Ignoring, should remain stopped",
4444 lwpid_of (thread));
eca55aec 4445 return false;
d50171e4
PA
4446 }
4447
7984d532
PA
4448 gdb_assert (lwp->suspended >= 0);
4449
4450 if (lwp->suspended)
4451 {
c058728c
SM
4452 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, suspended",
4453 lwpid_of (thread));
eca55aec 4454 return false;
7984d532
PA
4455 }
4456
bd99dc85 4457 if (lwp->status_pending_p)
d50171e4 4458 {
c058728c
SM
4459 threads_debug_printf
4460 ("Need step over [LWP %ld]? Ignoring, has pending status.",
4461 lwpid_of (thread));
eca55aec 4462 return false;
d50171e4
PA
4463 }
4464
4465 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4466 or we have. */
4467 pc = get_pc (lwp);
4468
4469 /* If the PC has changed since we stopped, then don't do anything,
4470 and let the breakpoint/tracepoint be hit. This happens if, for
4471 instance, GDB handled the decr_pc_after_break subtraction itself,
4472 GDB is OOL stepping this thread, or the user has issued a "jump"
4473 command, or poked thread's registers herself. */
4474 if (pc != lwp->stop_pc)
4475 {
c058728c
SM
4476 threads_debug_printf
4477 ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4478 "Old stop_pc was 0x%s, PC is now 0x%s", lwpid_of (thread),
4479 paddress (lwp->stop_pc), paddress (pc));
eca55aec 4480 return false;
d50171e4
PA
4481 }
4482
484b3c32
YQ
4483 /* On software single step target, resume the inferior with signal
4484 rather than stepping over. */
7582c77c 4485 if (supports_software_single_step ()
013e3554 4486 && !lwp->pending_signals.empty ()
484b3c32
YQ
4487 && lwp_signal_can_be_delivered (lwp))
4488 {
c058728c
SM
4489 threads_debug_printf
4490 ("Need step over [LWP %ld]? Ignoring, has pending signals.",
4491 lwpid_of (thread));
484b3c32 4492
eca55aec 4493 return false;
484b3c32
YQ
4494 }
4495
24583e45
TBA
4496 scoped_restore_current_thread restore_thread;
4497 switch_to_thread (thread);
d50171e4 4498
8b07ae33 4499 /* We can only step over breakpoints we know about. */
fa593d66 4500 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 4501 {
8b07ae33 4502 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
4503 though. If the condition is being evaluated on the target's side
4504 and it evaluate to false, step over this breakpoint as well. */
4505 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
4506 && gdb_condition_true_at_breakpoint (pc)
4507 && gdb_no_commands_at_breakpoint (pc))
8b07ae33 4508 {
c058728c
SM
4509 threads_debug_printf ("Need step over [LWP %ld]? yes, but found"
4510 " GDB breakpoint at 0x%s; skipping step over",
4511 lwpid_of (thread), paddress (pc));
d50171e4 4512
eca55aec 4513 return false;
8b07ae33
PA
4514 }
4515 else
4516 {
c058728c
SM
4517 threads_debug_printf ("Need step over [LWP %ld]? yes, "
4518 "found breakpoint at 0x%s",
4519 lwpid_of (thread), paddress (pc));
d50171e4 4520
8b07ae33 4521 /* We've found an lwp that needs stepping over --- return 1 so
8f86d7aa 4522 that find_thread stops looking. */
eca55aec 4523 return true;
8b07ae33 4524 }
d50171e4
PA
4525 }
4526
c058728c
SM
4527 threads_debug_printf
4528 ("Need step over [LWP %ld]? No, no breakpoint found at 0x%s",
4529 lwpid_of (thread), paddress (pc));
c6ecbae5 4530
eca55aec 4531 return false;
5544ad89
DJ
4532}
4533
d16f3f6c
TBA
4534void
4535linux_process_target::start_step_over (lwp_info *lwp)
d50171e4 4536{
d86d4aaf 4537 struct thread_info *thread = get_lwp_thread (lwp);
d50171e4 4538 CORE_ADDR pc;
d50171e4 4539
c058728c
SM
4540 threads_debug_printf ("Starting step-over on LWP %ld. Stopping all threads",
4541 lwpid_of (thread));
d50171e4 4542
7984d532 4543 stop_all_lwps (1, lwp);
863d01bd
PA
4544
4545 if (lwp->suspended != 0)
4546 {
f34652de 4547 internal_error ("LWP %ld suspended=%d\n", lwpid_of (thread),
863d01bd
PA
4548 lwp->suspended);
4549 }
d50171e4 4550
c058728c 4551 threads_debug_printf ("Done stopping all threads for step-over.");
d50171e4
PA
4552
4553 /* Note, we should always reach here with an already adjusted PC,
4554 either by GDB (if we're resuming due to GDB's request), or by our
4555 caller, if we just finished handling an internal breakpoint GDB
4556 shouldn't care about. */
4557 pc = get_pc (lwp);
4558
24583e45
TBA
4559 bool step = false;
4560 {
4561 scoped_restore_current_thread restore_thread;
4562 switch_to_thread (thread);
d50171e4 4563
24583e45
TBA
4564 lwp->bp_reinsert = pc;
4565 uninsert_breakpoints_at (pc);
4566 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4 4567
24583e45
TBA
4568 step = single_step (lwp);
4569 }
d50171e4 4570
df95181f 4571 resume_one_lwp (lwp, step, 0, NULL);
d50171e4
PA
4572
4573 /* Require next event from this LWP. */
9c80ecd6 4574 step_over_bkpt = thread->id;
d50171e4
PA
4575}
4576
b31cdfa6
TBA
4577bool
4578linux_process_target::finish_step_over (lwp_info *lwp)
d50171e4
PA
4579{
4580 if (lwp->bp_reinsert != 0)
4581 {
24583e45 4582 scoped_restore_current_thread restore_thread;
f79b145d 4583
c058728c 4584 threads_debug_printf ("Finished step over.");
d50171e4 4585
24583e45 4586 switch_to_thread (get_lwp_thread (lwp));
f79b145d 4587
d50171e4
PA
4588 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4589 may be no breakpoint to reinsert there by now. */
4590 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4591 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4592
4593 lwp->bp_reinsert = 0;
4594
3b9a79ef
YQ
4595 /* Delete any single-step breakpoints. No longer needed. We
4596 don't have to worry about other threads hitting this trap,
4597 and later not being able to explain it, because we were
4598 stepping over a breakpoint, and we hold all threads but
4599 LWP stopped while doing that. */
b31cdfa6 4600 if (!supports_hardware_single_step ())
f79b145d 4601 {
3b9a79ef
YQ
4602 gdb_assert (has_single_step_breakpoints (current_thread));
4603 delete_single_step_breakpoints (current_thread);
f79b145d 4604 }
d50171e4
PA
4605
4606 step_over_bkpt = null_ptid;
b31cdfa6 4607 return true;
d50171e4
PA
4608 }
4609 else
b31cdfa6 4610 return false;
d50171e4
PA
4611}
4612
d16f3f6c
TBA
4613void
4614linux_process_target::complete_ongoing_step_over ()
863d01bd 4615{
d7e15655 4616 if (step_over_bkpt != null_ptid)
863d01bd
PA
4617 {
4618 struct lwp_info *lwp;
4619 int wstat;
4620 int ret;
4621
c058728c 4622 threads_debug_printf ("detach: step over in progress, finish it first");
863d01bd
PA
4623
4624 /* Passing NULL_PTID as filter indicates we want all events to
4625 be left pending. Eventually this returns when there are no
4626 unwaited-for children left. */
d16f3f6c
TBA
4627 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4628 __WALL);
863d01bd
PA
4629 gdb_assert (ret == -1);
4630
4631 lwp = find_lwp_pid (step_over_bkpt);
4632 if (lwp != NULL)
7e9cf1fe
PA
4633 {
4634 finish_step_over (lwp);
4635
4636 /* If we got our step SIGTRAP, don't leave it pending,
4637 otherwise we would report it to GDB as a spurious
4638 SIGTRAP. */
4639 gdb_assert (lwp->status_pending_p);
4640 if (WIFSTOPPED (lwp->status_pending)
4641 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4642 {
4643 thread_info *thread = get_lwp_thread (lwp);
4644 if (thread->last_resume_kind != resume_step)
4645 {
c058728c 4646 threads_debug_printf ("detach: discard step-over SIGTRAP");
7e9cf1fe
PA
4647
4648 lwp->status_pending_p = 0;
4649 lwp->status_pending = 0;
4650 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4651 }
4652 else
c058728c
SM
4653 threads_debug_printf
4654 ("detach: resume_step, not discarding step-over SIGTRAP");
7e9cf1fe
PA
4655 }
4656 }
863d01bd
PA
4657 step_over_bkpt = null_ptid;
4658 unsuspend_all_lwps (lwp);
4659 }
4660}
4661
df95181f
TBA
4662void
4663linux_process_target::resume_one_thread (thread_info *thread,
4664 bool leave_all_stopped)
5544ad89 4665{
d86d4aaf 4666 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4667 int leave_pending;
5544ad89 4668
2bd7c093 4669 if (lwp->resume == NULL)
c80825ff 4670 return;
5544ad89 4671
bd99dc85 4672 if (lwp->resume->kind == resume_stop)
5544ad89 4673 {
c058728c
SM
4674 threads_debug_printf ("resume_stop request for LWP %ld",
4675 lwpid_of (thread));
bd99dc85
PA
4676
4677 if (!lwp->stopped)
4678 {
c058728c 4679 threads_debug_printf ("stopping LWP %ld", lwpid_of (thread));
bd99dc85 4680
d50171e4
PA
4681 /* Stop the thread, and wait for the event asynchronously,
4682 through the event loop. */
02fc4de7 4683 send_sigstop (lwp);
bd99dc85
PA
4684 }
4685 else
4686 {
c058728c 4687 threads_debug_printf ("already stopped LWP %ld", lwpid_of (thread));
d50171e4
PA
4688
4689 /* The LWP may have been stopped in an internal event that
4690 was not meant to be notified back to GDB (e.g., gdbserver
4691 breakpoint), so we should be reporting a stop event in
4692 this case too. */
4693
4694 /* If the thread already has a pending SIGSTOP, this is a
4695 no-op. Otherwise, something later will presumably resume
4696 the thread and this will cause it to cancel any pending
4697 operation, due to last_resume_kind == resume_stop. If
4698 the thread already has a pending status to report, we
4699 will still report it the next time we wait - see
4700 status_pending_p_callback. */
1a981360
PA
4701
4702 /* If we already have a pending signal to report, then
4703 there's no need to queue a SIGSTOP, as this means we're
4704 midway through moving the LWP out of the jumppad, and we
4705 will report the pending signal as soon as that is
4706 finished. */
013e3554 4707 if (lwp->pending_signals_to_report.empty ())
1a981360 4708 send_sigstop (lwp);
bd99dc85 4709 }
32ca6d61 4710
bd99dc85
PA
4711 /* For stop requests, we're done. */
4712 lwp->resume = NULL;
183be222 4713 thread->last_status.set_ignore ();
c80825ff 4714 return;
5544ad89
DJ
4715 }
4716
bd99dc85 4717 /* If this thread which is about to be resumed has a pending status,
863d01bd
PA
4718 then don't resume it - we can just report the pending status.
4719 Likewise if it is suspended, because e.g., another thread is
4720 stepping past a breakpoint. Make sure to queue any signals that
4721 would otherwise be sent. In all-stop mode, we do this decision
4722 based on if *any* thread has a pending status. If there's a
4723 thread that needs the step-over-breakpoint dance, then don't
4724 resume any other thread but that particular one. */
4725 leave_pending = (lwp->suspended
4726 || lwp->status_pending_p
4727 || leave_all_stopped);
5544ad89 4728
0e9a339e
YQ
4729 /* If we have a new signal, enqueue the signal. */
4730 if (lwp->resume->sig != 0)
4731 {
4732 siginfo_t info, *info_p;
4733
4734 /* If this is the same signal we were previously stopped by,
4735 make sure to queue its siginfo. */
4736 if (WIFSTOPPED (lwp->last_status)
4737 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4738 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4739 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4740 info_p = &info;
4741 else
4742 info_p = NULL;
4743
4744 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4745 }
4746
d50171e4 4747 if (!leave_pending)
bd99dc85 4748 {
c058728c 4749 threads_debug_printf ("resuming LWP %ld", lwpid_of (thread));
5544ad89 4750
9c80ecd6 4751 proceed_one_lwp (thread, NULL);
bd99dc85
PA
4752 }
4753 else
c058728c 4754 threads_debug_printf ("leaving LWP %ld stopped", lwpid_of (thread));
5544ad89 4755
183be222 4756 thread->last_status.set_ignore ();
bd99dc85 4757 lwp->resume = NULL;
0d62e5e8
DJ
4758}
4759
0e4d7e35
TBA
4760void
4761linux_process_target::resume (thread_resume *resume_info, size_t n)
0d62e5e8 4762{
d86d4aaf 4763 struct thread_info *need_step_over = NULL;
c6ecbae5 4764
c058728c 4765 THREADS_SCOPED_DEBUG_ENTER_EXIT;
87ce2a04 4766
5fdda392
SM
4767 for_each_thread ([&] (thread_info *thread)
4768 {
4769 linux_set_resume_request (thread, resume_info, n);
4770 });
5544ad89 4771
d50171e4
PA
4772 /* If there is a thread which would otherwise be resumed, which has
4773 a pending status, then don't resume any threads - we can just
4774 report the pending status. Make sure to queue any signals that
4775 would otherwise be sent. In non-stop mode, we'll apply this
4776 logic to each thread individually. We consume all pending events
4777 before considering to start a step-over (in all-stop). */
25c28b4d 4778 bool any_pending = false;
bd99dc85 4779 if (!non_stop)
df95181f
TBA
4780 any_pending = find_thread ([this] (thread_info *thread)
4781 {
4782 return resume_status_pending (thread);
4783 }) != nullptr;
d50171e4
PA
4784
4785 /* If there is a thread which would otherwise be resumed, which is
4786 stopped at a breakpoint that needs stepping over, then don't
4787 resume any threads - have it step over the breakpoint with all
4788 other threads stopped, then resume all threads again. Make sure
4789 to queue any signals that would otherwise be delivered or
4790 queued. */
bf9ae9d8 4791 if (!any_pending && low_supports_breakpoints ())
df95181f
TBA
4792 need_step_over = find_thread ([this] (thread_info *thread)
4793 {
4794 return thread_needs_step_over (thread);
4795 });
d50171e4 4796
c80825ff 4797 bool leave_all_stopped = (need_step_over != NULL || any_pending);
d50171e4 4798
c058728c
SM
4799 if (need_step_over != NULL)
4800 threads_debug_printf ("Not resuming all, need step over");
4801 else if (any_pending)
4802 threads_debug_printf ("Not resuming, all-stop and found "
4803 "an LWP with pending status");
4804 else
4805 threads_debug_printf ("Resuming, no pending status or step over needed");
d50171e4
PA
4806
4807 /* Even if we're leaving threads stopped, queue all signals we'd
4808 otherwise deliver. */
c80825ff
SM
4809 for_each_thread ([&] (thread_info *thread)
4810 {
df95181f 4811 resume_one_thread (thread, leave_all_stopped);
c80825ff 4812 });
d50171e4
PA
4813
4814 if (need_step_over)
d86d4aaf 4815 start_step_over (get_thread_lwp (need_step_over));
87ce2a04 4816
1bebeeca
PA
4817 /* We may have events that were pending that can/should be sent to
4818 the client now. Trigger a linux_wait call. */
4819 if (target_is_async_p ())
4820 async_file_mark ();
d50171e4
PA
4821}
4822
df95181f
TBA
4823void
4824linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
d50171e4 4825{
d86d4aaf 4826 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4827 int step;
4828
7984d532 4829 if (lwp == except)
e2b44075 4830 return;
d50171e4 4831
c058728c 4832 threads_debug_printf ("lwp %ld", lwpid_of (thread));
d50171e4
PA
4833
4834 if (!lwp->stopped)
4835 {
c058728c 4836 threads_debug_printf (" LWP %ld already running", lwpid_of (thread));
e2b44075 4837 return;
d50171e4
PA
4838 }
4839
02fc4de7 4840 if (thread->last_resume_kind == resume_stop
183be222 4841 && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
d50171e4 4842 {
c058728c
SM
4843 threads_debug_printf (" client wants LWP to remain %ld stopped",
4844 lwpid_of (thread));
e2b44075 4845 return;
d50171e4
PA
4846 }
4847
4848 if (lwp->status_pending_p)
4849 {
c058728c
SM
4850 threads_debug_printf (" LWP %ld has pending status, leaving stopped",
4851 lwpid_of (thread));
e2b44075 4852 return;
d50171e4
PA
4853 }
4854
7984d532
PA
4855 gdb_assert (lwp->suspended >= 0);
4856
d50171e4
PA
4857 if (lwp->suspended)
4858 {
c058728c 4859 threads_debug_printf (" LWP %ld is suspended", lwpid_of (thread));
e2b44075 4860 return;
d50171e4
PA
4861 }
4862
1a981360 4863 if (thread->last_resume_kind == resume_stop
013e3554 4864 && lwp->pending_signals_to_report.empty ()
229d26fc
SM
4865 && (lwp->collecting_fast_tracepoint
4866 == fast_tpoint_collect_result::not_collecting))
02fc4de7
PA
4867 {
4868 /* We haven't reported this LWP as stopped yet (otherwise, the
4869 last_status.kind check above would catch it, and we wouldn't
4870 reach here. This LWP may have been momentarily paused by a
4871 stop_all_lwps call while handling for example, another LWP's
4872 step-over. In that case, the pending expected SIGSTOP signal
4873 that was queued at vCont;t handling time will have already
4874 been consumed by wait_for_sigstop, and so we need to requeue
4875 another one here. Note that if the LWP already has a SIGSTOP
4876 pending, this is a no-op. */
4877
c058728c
SM
4878 threads_debug_printf
4879 ("Client wants LWP %ld to stop. Making sure it has a SIGSTOP pending",
4880 lwpid_of (thread));
02fc4de7
PA
4881
4882 send_sigstop (lwp);
4883 }
4884
863d01bd
PA
4885 if (thread->last_resume_kind == resume_step)
4886 {
c058728c
SM
4887 threads_debug_printf (" stepping LWP %ld, client wants it stepping",
4888 lwpid_of (thread));
8901d193 4889
3b9a79ef 4890 /* If resume_step is requested by GDB, install single-step
8901d193 4891 breakpoints when the thread is about to be actually resumed if
3b9a79ef 4892 the single-step breakpoints weren't removed. */
7582c77c 4893 if (supports_software_single_step ()
3b9a79ef 4894 && !has_single_step_breakpoints (thread))
8901d193
YQ
4895 install_software_single_step_breakpoints (lwp);
4896
4897 step = maybe_hw_step (thread);
863d01bd
PA
4898 }
4899 else if (lwp->bp_reinsert != 0)
4900 {
c058728c
SM
4901 threads_debug_printf (" stepping LWP %ld, reinsert set",
4902 lwpid_of (thread));
f79b145d
YQ
4903
4904 step = maybe_hw_step (thread);
863d01bd
PA
4905 }
4906 else
4907 step = 0;
4908
df95181f 4909 resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
4910}
4911
df95181f
TBA
4912void
4913linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4914 lwp_info *except)
7984d532 4915{
d86d4aaf 4916 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
4917
4918 if (lwp == except)
e2b44075 4919 return;
7984d532 4920
863d01bd 4921 lwp_suspended_decr (lwp);
7984d532 4922
e2b44075 4923 proceed_one_lwp (thread, except);
d50171e4
PA
4924}
4925
d16f3f6c
TBA
4926void
4927linux_process_target::proceed_all_lwps ()
d50171e4 4928{
d86d4aaf 4929 struct thread_info *need_step_over;
d50171e4
PA
4930
4931 /* If there is a thread which would otherwise be resumed, which is
4932 stopped at a breakpoint that needs stepping over, then don't
4933 resume any threads - have it step over the breakpoint with all
4934 other threads stopped, then resume all threads again. */
4935
bf9ae9d8 4936 if (low_supports_breakpoints ())
d50171e4 4937 {
df95181f
TBA
4938 need_step_over = find_thread ([this] (thread_info *thread)
4939 {
4940 return thread_needs_step_over (thread);
4941 });
d50171e4
PA
4942
4943 if (need_step_over != NULL)
4944 {
c058728c
SM
4945 threads_debug_printf ("found thread %ld needing a step-over",
4946 lwpid_of (need_step_over));
d50171e4 4947
d86d4aaf 4948 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
4949 return;
4950 }
4951 }
5544ad89 4952
c058728c 4953 threads_debug_printf ("Proceeding, no step-over needed");
d50171e4 4954
df95181f 4955 for_each_thread ([this] (thread_info *thread)
e2b44075
SM
4956 {
4957 proceed_one_lwp (thread, NULL);
4958 });
d50171e4
PA
4959}
4960
d16f3f6c
TBA
4961void
4962linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
d50171e4 4963{
c058728c
SM
4964 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4965
4966 if (except)
4967 threads_debug_printf ("except=(LWP %ld)",
4968 lwpid_of (get_lwp_thread (except)));
4969 else
4970 threads_debug_printf ("except=nullptr");
5544ad89 4971
7984d532 4972 if (unsuspend)
e2b44075
SM
4973 for_each_thread ([&] (thread_info *thread)
4974 {
4975 unsuspend_and_proceed_one_lwp (thread, except);
4976 });
7984d532 4977 else
e2b44075
SM
4978 for_each_thread ([&] (thread_info *thread)
4979 {
4980 proceed_one_lwp (thread, except);
4981 });
0d62e5e8
DJ
4982}
4983
58caa3dc
DJ
4984
4985#ifdef HAVE_LINUX_REGSETS
4986
1faeff08
MR
4987#define use_linux_regsets 1
4988
030031ee
PA
4989/* Returns true if REGSET has been disabled. */
4990
4991static int
4992regset_disabled (struct regsets_info *info, struct regset_info *regset)
4993{
4994 return (info->disabled_regsets != NULL
4995 && info->disabled_regsets[regset - info->regsets]);
4996}
4997
4998/* Disable REGSET. */
4999
5000static void
5001disable_regset (struct regsets_info *info, struct regset_info *regset)
5002{
5003 int dr_offset;
5004
5005 dr_offset = regset - info->regsets;
5006 if (info->disabled_regsets == NULL)
224c3ddb 5007 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
030031ee
PA
5008 info->disabled_regsets[dr_offset] = 1;
5009}
5010
58caa3dc 5011static int
3aee8918
PA
5012regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5013 struct regcache *regcache)
58caa3dc
DJ
5014{
5015 struct regset_info *regset;
e9d25b98 5016 int saw_general_regs = 0;
95954743 5017 int pid;
1570b33e 5018 struct iovec iov;
58caa3dc 5019
0bfdf32f 5020 pid = lwpid_of (current_thread);
28eef672 5021 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5022 {
1570b33e
L
5023 void *buf, *data;
5024 int nt_type, res;
58caa3dc 5025
030031ee 5026 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 5027 continue;
58caa3dc 5028
bca929d3 5029 buf = xmalloc (regset->size);
1570b33e
L
5030
5031 nt_type = regset->nt_type;
5032 if (nt_type)
5033 {
5034 iov.iov_base = buf;
5035 iov.iov_len = regset->size;
5036 data = (void *) &iov;
5037 }
5038 else
5039 data = buf;
5040
dfb64f85 5041#ifndef __sparc__
f15f9948 5042 res = ptrace (regset->get_request, pid,
b8e1b30e 5043 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5044#else
1570b33e 5045 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5046#endif
58caa3dc
DJ
5047 if (res < 0)
5048 {
1ef53e6b
AH
5049 if (errno == EIO
5050 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5051 {
1ef53e6b
AH
5052 /* If we get EIO on a regset, or an EINVAL and the regset is
5053 optional, do not try it again for this process mode. */
030031ee 5054 disable_regset (regsets_info, regset);
58caa3dc 5055 }
e5a9158d
AA
5056 else if (errno == ENODATA)
5057 {
5058 /* ENODATA may be returned if the regset is currently
5059 not "active". This can happen in normal operation,
5060 so suppress the warning in this case. */
5061 }
fcd4a73d
YQ
5062 else if (errno == ESRCH)
5063 {
5064 /* At this point, ESRCH should mean the process is
5065 already gone, in which case we simply ignore attempts
5066 to read its registers. */
5067 }
58caa3dc
DJ
5068 else
5069 {
0d62e5e8 5070 char s[256];
95954743
PA
5071 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5072 pid);
0d62e5e8 5073 perror (s);
58caa3dc
DJ
5074 }
5075 }
098dbe61
AA
5076 else
5077 {
5078 if (regset->type == GENERAL_REGS)
5079 saw_general_regs = 1;
5080 regset->store_function (regcache, buf);
5081 }
fdeb2a12 5082 free (buf);
58caa3dc 5083 }
e9d25b98
DJ
5084 if (saw_general_regs)
5085 return 0;
5086 else
5087 return 1;
58caa3dc
DJ
5088}
5089
5090static int
3aee8918
PA
5091regsets_store_inferior_registers (struct regsets_info *regsets_info,
5092 struct regcache *regcache)
58caa3dc
DJ
5093{
5094 struct regset_info *regset;
e9d25b98 5095 int saw_general_regs = 0;
95954743 5096 int pid;
1570b33e 5097 struct iovec iov;
58caa3dc 5098
0bfdf32f 5099 pid = lwpid_of (current_thread);
28eef672 5100 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5101 {
1570b33e
L
5102 void *buf, *data;
5103 int nt_type, res;
58caa3dc 5104
feea5f36
AA
5105 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5106 || regset->fill_function == NULL)
28eef672 5107 continue;
58caa3dc 5108
bca929d3 5109 buf = xmalloc (regset->size);
545587ee
DJ
5110
5111 /* First fill the buffer with the current register set contents,
5112 in case there are any items in the kernel's regset that are
5113 not in gdbserver's regcache. */
1570b33e
L
5114
5115 nt_type = regset->nt_type;
5116 if (nt_type)
5117 {
5118 iov.iov_base = buf;
5119 iov.iov_len = regset->size;
5120 data = (void *) &iov;
5121 }
5122 else
5123 data = buf;
5124
dfb64f85 5125#ifndef __sparc__
f15f9948 5126 res = ptrace (regset->get_request, pid,
b8e1b30e 5127 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5128#else
689cc2ae 5129 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5130#endif
545587ee
DJ
5131
5132 if (res == 0)
5133 {
5134 /* Then overlay our cached registers on that. */
442ea881 5135 regset->fill_function (regcache, buf);
545587ee
DJ
5136
5137 /* Only now do we write the register set. */
dfb64f85 5138#ifndef __sparc__
f15f9948 5139 res = ptrace (regset->set_request, pid,
b8e1b30e 5140 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5141#else
1570b33e 5142 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 5143#endif
545587ee
DJ
5144 }
5145
58caa3dc
DJ
5146 if (res < 0)
5147 {
1ef53e6b
AH
5148 if (errno == EIO
5149 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5150 {
1ef53e6b
AH
5151 /* If we get EIO on a regset, or an EINVAL and the regset is
5152 optional, do not try it again for this process mode. */
030031ee 5153 disable_regset (regsets_info, regset);
58caa3dc 5154 }
3221518c
UW
5155 else if (errno == ESRCH)
5156 {
1b3f6016
PA
5157 /* At this point, ESRCH should mean the process is
5158 already gone, in which case we simply ignore attempts
5159 to change its registers. See also the related
df95181f 5160 comment in resume_one_lwp. */
fdeb2a12 5161 free (buf);
3221518c
UW
5162 return 0;
5163 }
58caa3dc
DJ
5164 else
5165 {
ce3a066d 5166 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
5167 }
5168 }
e9d25b98
DJ
5169 else if (regset->type == GENERAL_REGS)
5170 saw_general_regs = 1;
09ec9b38 5171 free (buf);
58caa3dc 5172 }
e9d25b98
DJ
5173 if (saw_general_regs)
5174 return 0;
5175 else
5176 return 1;
58caa3dc
DJ
5177}
5178
1faeff08 5179#else /* !HAVE_LINUX_REGSETS */
58caa3dc 5180
1faeff08 5181#define use_linux_regsets 0
3aee8918
PA
5182#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5183#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 5184
58caa3dc 5185#endif
1faeff08
MR
5186
5187/* Return 1 if register REGNO is supported by one of the regset ptrace
5188 calls or 0 if it has to be transferred individually. */
5189
5190static int
3aee8918 5191linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
5192{
5193 unsigned char mask = 1 << (regno % 8);
5194 size_t index = regno / 8;
5195
5196 return (use_linux_regsets
3aee8918
PA
5197 && (regs_info->regset_bitmap == NULL
5198 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
5199}
5200
58caa3dc 5201#ifdef HAVE_LINUX_USRREGS
1faeff08 5202
5b3da067 5203static int
3aee8918 5204register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
5205{
5206 int addr;
5207
3aee8918 5208 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
5209 error ("Invalid register number %d.", regnum);
5210
3aee8918 5211 addr = usrregs->regmap[regnum];
1faeff08
MR
5212
5213 return addr;
5214}
5215
daca57a7
TBA
5216
5217void
5218linux_process_target::fetch_register (const usrregs_info *usrregs,
5219 regcache *regcache, int regno)
1faeff08
MR
5220{
5221 CORE_ADDR regaddr;
5222 int i, size;
5223 char *buf;
5224 int pid;
5225
3aee8918 5226 if (regno >= usrregs->num_regs)
1faeff08 5227 return;
daca57a7 5228 if (low_cannot_fetch_register (regno))
1faeff08
MR
5229 return;
5230
3aee8918 5231 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5232 if (regaddr == -1)
5233 return;
5234
3aee8918
PA
5235 size = ((register_size (regcache->tdesc, regno)
5236 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5237 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5238 buf = (char *) alloca (size);
1faeff08 5239
0bfdf32f 5240 pid = lwpid_of (current_thread);
1faeff08
MR
5241 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5242 {
5243 errno = 0;
5244 *(PTRACE_XFER_TYPE *) (buf + i) =
5245 ptrace (PTRACE_PEEKUSER, pid,
5246 /* Coerce to a uintptr_t first to avoid potential gcc warning
5247 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5248 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
5249 regaddr += sizeof (PTRACE_XFER_TYPE);
5250 if (errno != 0)
9a70f35c
YQ
5251 {
5252 /* Mark register REGNO unavailable. */
5253 supply_register (regcache, regno, NULL);
5254 return;
5255 }
1faeff08
MR
5256 }
5257
b35db733 5258 low_supply_ptrace_register (regcache, regno, buf);
1faeff08
MR
5259}
5260
daca57a7
TBA
5261void
5262linux_process_target::store_register (const usrregs_info *usrregs,
5263 regcache *regcache, int regno)
1faeff08
MR
5264{
5265 CORE_ADDR regaddr;
5266 int i, size;
5267 char *buf;
5268 int pid;
5269
3aee8918 5270 if (regno >= usrregs->num_regs)
1faeff08 5271 return;
daca57a7 5272 if (low_cannot_store_register (regno))
1faeff08
MR
5273 return;
5274
3aee8918 5275 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5276 if (regaddr == -1)
5277 return;
5278
3aee8918
PA
5279 size = ((register_size (regcache->tdesc, regno)
5280 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5281 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5282 buf = (char *) alloca (size);
1faeff08
MR
5283 memset (buf, 0, size);
5284
b35db733 5285 low_collect_ptrace_register (regcache, regno, buf);
1faeff08 5286
0bfdf32f 5287 pid = lwpid_of (current_thread);
1faeff08
MR
5288 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5289 {
5290 errno = 0;
5291 ptrace (PTRACE_POKEUSER, pid,
5292 /* Coerce to a uintptr_t first to avoid potential gcc warning
5293 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5294 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5295 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
5296 if (errno != 0)
5297 {
5298 /* At this point, ESRCH should mean the process is
5299 already gone, in which case we simply ignore attempts
5300 to change its registers. See also the related
df95181f 5301 comment in resume_one_lwp. */
1faeff08
MR
5302 if (errno == ESRCH)
5303 return;
5304
daca57a7
TBA
5305
5306 if (!low_cannot_store_register (regno))
6d91ce9a 5307 error ("writing register %d: %s", regno, safe_strerror (errno));
1faeff08
MR
5308 }
5309 regaddr += sizeof (PTRACE_XFER_TYPE);
5310 }
5311}
daca57a7 5312#endif /* HAVE_LINUX_USRREGS */
1faeff08 5313
b35db733
TBA
5314void
5315linux_process_target::low_collect_ptrace_register (regcache *regcache,
5316 int regno, char *buf)
5317{
5318 collect_register (regcache, regno, buf);
5319}
5320
5321void
5322linux_process_target::low_supply_ptrace_register (regcache *regcache,
5323 int regno, const char *buf)
5324{
5325 supply_register (regcache, regno, buf);
5326}
5327
daca57a7
TBA
5328void
5329linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5330 regcache *regcache,
5331 int regno, int all)
1faeff08 5332{
daca57a7 5333#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5334 struct usrregs_info *usr = regs_info->usrregs;
5335
1faeff08
MR
5336 if (regno == -1)
5337 {
3aee8918
PA
5338 for (regno = 0; regno < usr->num_regs; regno++)
5339 if (all || !linux_register_in_regsets (regs_info, regno))
5340 fetch_register (usr, regcache, regno);
1faeff08
MR
5341 }
5342 else
3aee8918 5343 fetch_register (usr, regcache, regno);
daca57a7 5344#endif
1faeff08
MR
5345}
5346
daca57a7
TBA
5347void
5348linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5349 regcache *regcache,
5350 int regno, int all)
1faeff08 5351{
daca57a7 5352#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5353 struct usrregs_info *usr = regs_info->usrregs;
5354
1faeff08
MR
5355 if (regno == -1)
5356 {
3aee8918
PA
5357 for (regno = 0; regno < usr->num_regs; regno++)
5358 if (all || !linux_register_in_regsets (regs_info, regno))
5359 store_register (usr, regcache, regno);
1faeff08
MR
5360 }
5361 else
3aee8918 5362 store_register (usr, regcache, regno);
58caa3dc 5363#endif
daca57a7 5364}
1faeff08 5365
a5a4d4cd
TBA
5366void
5367linux_process_target::fetch_registers (regcache *regcache, int regno)
1faeff08
MR
5368{
5369 int use_regsets;
5370 int all = 0;
aa8d21c9 5371 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5372
5373 if (regno == -1)
5374 {
bd70b1f2 5375 if (regs_info->usrregs != NULL)
3aee8918 5376 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
bd70b1f2 5377 low_fetch_register (regcache, regno);
c14dfd32 5378
3aee8918
PA
5379 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5380 if (regs_info->usrregs != NULL)
5381 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
5382 }
5383 else
5384 {
bd70b1f2 5385 if (low_fetch_register (regcache, regno))
c14dfd32
PA
5386 return;
5387
3aee8918 5388 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5389 if (use_regsets)
3aee8918
PA
5390 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5391 regcache);
5392 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5393 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5394 }
58caa3dc
DJ
5395}
5396
a5a4d4cd
TBA
5397void
5398linux_process_target::store_registers (regcache *regcache, int regno)
58caa3dc 5399{
1faeff08
MR
5400 int use_regsets;
5401 int all = 0;
aa8d21c9 5402 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5403
5404 if (regno == -1)
5405 {
3aee8918
PA
5406 all = regsets_store_inferior_registers (regs_info->regsets_info,
5407 regcache);
5408 if (regs_info->usrregs != NULL)
5409 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
5410 }
5411 else
5412 {
3aee8918 5413 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5414 if (use_regsets)
3aee8918
PA
5415 all = regsets_store_inferior_registers (regs_info->regsets_info,
5416 regcache);
5417 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5418 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5419 }
58caa3dc
DJ
5420}
5421
bd70b1f2
TBA
5422bool
5423linux_process_target::low_fetch_register (regcache *regcache, int regno)
5424{
5425 return false;
5426}
da6d8c04 5427
e2558df3 5428/* A wrapper for the read_memory target op. */
da6d8c04 5429
c3e735a6 5430static int
f450004a 5431linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
e2558df3 5432{
52405d85 5433 return the_target->read_memory (memaddr, myaddr, len);
e2558df3
TBA
5434}
5435
e2558df3 5436
421490af
PA
5437/* Helper for read_memory/write_memory using /proc/PID/mem. Because
5438 we can use a single read/write call, this can be much more
5439 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
5440 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running threads.
5441 One an only one of READBUF and WRITEBUF is non-null. If READBUF is
5442 not null, then we're reading, otherwise we're writing. */
5443
5444static int
5445proc_xfer_memory (CORE_ADDR memaddr, unsigned char *readbuf,
5446 const gdb_byte *writebuf, int len)
da6d8c04 5447{
421490af 5448 gdb_assert ((readbuf == nullptr) != (writebuf == nullptr));
fd462a61 5449
421490af
PA
5450 process_info *proc = current_process ();
5451
5452 int fd = proc->priv->mem_fd;
5453 if (fd == -1)
5454 return EIO;
5455
5456 while (len > 0)
fd462a61 5457 {
4934b29e
MR
5458 int bytes;
5459
31a56a22
PA
5460 /* Use pread64/pwrite64 if available, since they save a syscall
5461 and can handle 64-bit offsets even on 32-bit platforms (for
5462 instance, SPARC debugging a SPARC64 application). But only
5463 use them if the offset isn't so high that when cast to off_t
5464 it'd be negative, as seen on SPARC64. pread64/pwrite64
5465 outright reject such offsets. lseek does not. */
fd462a61 5466#ifdef HAVE_PREAD64
31a56a22 5467 if ((off_t) memaddr >= 0)
421490af 5468 bytes = (readbuf != nullptr
31a56a22
PA
5469 ? pread64 (fd, readbuf, len, memaddr)
5470 : pwrite64 (fd, writebuf, len, memaddr));
5471 else
fd462a61 5472#endif
31a56a22
PA
5473 {
5474 bytes = -1;
5475 if (lseek (fd, memaddr, SEEK_SET) != -1)
5476 bytes = (readbuf != nullptr
5477 ? read (fd, readbuf, len)
5478 : write (fd, writebuf, len));
5479 }
fd462a61 5480
421490af
PA
5481 if (bytes < 0)
5482 return errno;
5483 else if (bytes == 0)
4934b29e 5484 {
421490af
PA
5485 /* EOF means the address space is gone, the whole process
5486 exited or execed. */
5487 return EIO;
4934b29e 5488 }
da6d8c04 5489
421490af
PA
5490 memaddr += bytes;
5491 if (readbuf != nullptr)
5492 readbuf += bytes;
5493 else
5494 writebuf += bytes;
5495 len -= bytes;
da6d8c04
DJ
5496 }
5497
421490af
PA
5498 return 0;
5499}
c3e735a6 5500
421490af
PA
5501int
5502linux_process_target::read_memory (CORE_ADDR memaddr,
5503 unsigned char *myaddr, int len)
5504{
5505 return proc_xfer_memory (memaddr, myaddr, nullptr, len);
da6d8c04
DJ
5506}
5507
93ae6fdc
PA
5508/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5509 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 5510 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 5511
e2558df3
TBA
5512int
5513linux_process_target::write_memory (CORE_ADDR memaddr,
5514 const unsigned char *myaddr, int len)
da6d8c04 5515{
0d62e5e8
DJ
5516 if (debug_threads)
5517 {
58d6951d 5518 /* Dump up to four bytes. */
bf47e248
PA
5519 char str[4 * 2 + 1];
5520 char *p = str;
5521 int dump = len < 4 ? len : 4;
5522
421490af 5523 for (int i = 0; i < dump; i++)
bf47e248
PA
5524 {
5525 sprintf (p, "%02x", myaddr[i]);
5526 p += 2;
5527 }
5528 *p = '\0';
5529
c058728c 5530 threads_debug_printf ("Writing %s to 0x%08lx in process %d",
421490af 5531 str, (long) memaddr, current_process ()->pid);
0d62e5e8
DJ
5532 }
5533
421490af 5534 return proc_xfer_memory (memaddr, nullptr, myaddr, len);
da6d8c04 5535}
2f2893d9 5536
2a31c7aa
TBA
5537void
5538linux_process_target::look_up_symbols ()
2f2893d9 5539{
0d62e5e8 5540#ifdef USE_THREAD_DB
95954743
PA
5541 struct process_info *proc = current_process ();
5542
fe978cb0 5543 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5544 return;
5545
9b4c5f87 5546 thread_db_init ();
0d62e5e8
DJ
5547#endif
5548}
5549
eb497a2a
TBA
5550void
5551linux_process_target::request_interrupt ()
e5379b03 5552{
78708b7c
PA
5553 /* Send a SIGINT to the process group. This acts just like the user
5554 typed a ^C on the controlling terminal. */
4c35c4c6
TV
5555 int res = ::kill (-signal_pid, SIGINT);
5556 if (res == -1)
5557 warning (_("Sending SIGINT to process group of pid %ld failed: %s"),
5558 signal_pid, safe_strerror (errno));
e5379b03
DJ
5559}
5560
eac215cc
TBA
5561bool
5562linux_process_target::supports_read_auxv ()
5563{
5564 return true;
5565}
5566
aa691b87
RM
5567/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5568 to debugger memory starting at MYADDR. */
5569
eac215cc 5570int
43e5fbd8
TJB
5571linux_process_target::read_auxv (int pid, CORE_ADDR offset,
5572 unsigned char *myaddr, unsigned int len)
aa691b87
RM
5573{
5574 char filename[PATH_MAX];
5575 int fd, n;
5576
6cebaf6e 5577 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5578
5579 fd = open (filename, O_RDONLY);
5580 if (fd < 0)
5581 return -1;
5582
5583 if (offset != (CORE_ADDR) 0
5584 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5585 n = -1;
5586 else
5587 n = read (fd, myaddr, len);
5588
5589 close (fd);
5590
5591 return n;
5592}
5593
7e0bde70
TBA
5594int
5595linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5596 int size, raw_breakpoint *bp)
e013ee27 5597{
c8f4bfdd
YQ
5598 if (type == raw_bkpt_type_sw)
5599 return insert_memory_breakpoint (bp);
e013ee27 5600 else
9db9aa23
TBA
5601 return low_insert_point (type, addr, size, bp);
5602}
5603
5604int
5605linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5606 int size, raw_breakpoint *bp)
5607{
5608 /* Unsupported (see target.h). */
5609 return 1;
e013ee27
OF
5610}
5611
7e0bde70
TBA
5612int
5613linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5614 int size, raw_breakpoint *bp)
e013ee27 5615{
c8f4bfdd
YQ
5616 if (type == raw_bkpt_type_sw)
5617 return remove_memory_breakpoint (bp);
e013ee27 5618 else
9db9aa23
TBA
5619 return low_remove_point (type, addr, size, bp);
5620}
5621
5622int
5623linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5624 int size, raw_breakpoint *bp)
5625{
5626 /* Unsupported (see target.h). */
5627 return 1;
e013ee27
OF
5628}
5629
84320c4e 5630/* Implement the stopped_by_sw_breakpoint target_ops
3e572f71
PA
5631 method. */
5632
84320c4e
TBA
5633bool
5634linux_process_target::stopped_by_sw_breakpoint ()
3e572f71
PA
5635{
5636 struct lwp_info *lwp = get_thread_lwp (current_thread);
5637
5638 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5639}
5640
84320c4e 5641/* Implement the supports_stopped_by_sw_breakpoint target_ops
3e572f71
PA
5642 method. */
5643
84320c4e
TBA
5644bool
5645linux_process_target::supports_stopped_by_sw_breakpoint ()
3e572f71
PA
5646{
5647 return USE_SIGTRAP_SIGINFO;
5648}
5649
93fe88b2 5650/* Implement the stopped_by_hw_breakpoint target_ops
3e572f71
PA
5651 method. */
5652
93fe88b2
TBA
5653bool
5654linux_process_target::stopped_by_hw_breakpoint ()
3e572f71
PA
5655{
5656 struct lwp_info *lwp = get_thread_lwp (current_thread);
5657
5658 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5659}
5660
93fe88b2 5661/* Implement the supports_stopped_by_hw_breakpoint target_ops
3e572f71
PA
5662 method. */
5663
93fe88b2
TBA
5664bool
5665linux_process_target::supports_stopped_by_hw_breakpoint ()
3e572f71
PA
5666{
5667 return USE_SIGTRAP_SIGINFO;
5668}
5669
70b90b91 5670/* Implement the supports_hardware_single_step target_ops method. */
45614f15 5671
22aa6223
TBA
5672bool
5673linux_process_target::supports_hardware_single_step ()
45614f15 5674{
b31cdfa6 5675 return true;
45614f15
YQ
5676}
5677
6eeb5c55
TBA
5678bool
5679linux_process_target::stopped_by_watchpoint ()
e013ee27 5680{
0bfdf32f 5681 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 5682
15c66dd6 5683 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
5684}
5685
6eeb5c55
TBA
5686CORE_ADDR
5687linux_process_target::stopped_data_address ()
e013ee27 5688{
0bfdf32f 5689 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
5690
5691 return lwp->stopped_data_address;
e013ee27
OF
5692}
5693
db0dfaa0
LM
5694/* This is only used for targets that define PT_TEXT_ADDR,
5695 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5696 the target has different ways of acquiring this information, like
5697 loadmaps. */
52fb6437 5698
5203ae1e
TBA
5699bool
5700linux_process_target::supports_read_offsets ()
5701{
5702#ifdef SUPPORTS_READ_OFFSETS
5703 return true;
5704#else
5705 return false;
5706#endif
5707}
5708
52fb6437
NS
5709/* Under uClinux, programs are loaded at non-zero offsets, which we need
5710 to tell gdb about. */
5711
5203ae1e
TBA
5712int
5713linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
52fb6437 5714{
5203ae1e 5715#ifdef SUPPORTS_READ_OFFSETS
52fb6437 5716 unsigned long text, text_end, data;
62828379 5717 int pid = lwpid_of (current_thread);
52fb6437
NS
5718
5719 errno = 0;
5720
b8e1b30e
LM
5721 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5722 (PTRACE_TYPE_ARG4) 0);
5723 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5724 (PTRACE_TYPE_ARG4) 0);
5725 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5726 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
5727
5728 if (errno == 0)
5729 {
5730 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
5731 used by gdb) are relative to the beginning of the program,
5732 with the data segment immediately following the text segment.
5733 However, the actual runtime layout in memory may put the data
5734 somewhere else, so when we send gdb a data base-address, we
5735 use the real data base address and subtract the compile-time
5736 data base-address from it (which is just the length of the
5737 text segment). BSS immediately follows data in both
5738 cases. */
52fb6437
NS
5739 *text_p = text;
5740 *data_p = data - (text_end - text);
1b3f6016 5741
52fb6437
NS
5742 return 1;
5743 }
5203ae1e
TBA
5744 return 0;
5745#else
5746 gdb_assert_not_reached ("target op read_offsets not supported");
52fb6437 5747#endif
5203ae1e 5748}
52fb6437 5749
6e3fd7e9
TBA
5750bool
5751linux_process_target::supports_get_tls_address ()
5752{
5753#ifdef USE_THREAD_DB
5754 return true;
5755#else
5756 return false;
5757#endif
5758}
5759
5760int
5761linux_process_target::get_tls_address (thread_info *thread,
5762 CORE_ADDR offset,
5763 CORE_ADDR load_module,
5764 CORE_ADDR *address)
5765{
5766#ifdef USE_THREAD_DB
5767 return thread_db_get_tls_address (thread, offset, load_module, address);
5768#else
5769 return -1;
5770#endif
5771}
5772
2d0795ee
TBA
5773bool
5774linux_process_target::supports_qxfer_osdata ()
5775{
5776 return true;
5777}
5778
5779int
5780linux_process_target::qxfer_osdata (const char *annex,
5781 unsigned char *readbuf,
5782 unsigned const char *writebuf,
5783 CORE_ADDR offset, int len)
07e059b5 5784{
d26e3629 5785 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
5786}
5787
cb63de7c
TBA
5788void
5789linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5790 gdb_byte *inf_siginfo, int direction)
d0722149 5791{
cb63de7c 5792 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
d0722149
DE
5793
5794 /* If there was no callback, or the callback didn't do anything,
5795 then just do a straight memcpy. */
5796 if (!done)
5797 {
5798 if (direction == 1)
a5362b9a 5799 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 5800 else
a5362b9a 5801 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
5802 }
5803}
5804
cb63de7c
TBA
5805bool
5806linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5807 int direction)
5808{
5809 return false;
5810}
5811
d7abedf7
TBA
5812bool
5813linux_process_target::supports_qxfer_siginfo ()
5814{
5815 return true;
5816}
5817
5818int
5819linux_process_target::qxfer_siginfo (const char *annex,
5820 unsigned char *readbuf,
5821 unsigned const char *writebuf,
5822 CORE_ADDR offset, int len)
4aa995e1 5823{
d0722149 5824 int pid;
a5362b9a 5825 siginfo_t siginfo;
8adce034 5826 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1 5827
0bfdf32f 5828 if (current_thread == NULL)
4aa995e1
PA
5829 return -1;
5830
0bfdf32f 5831 pid = lwpid_of (current_thread);
4aa995e1 5832
c058728c
SM
5833 threads_debug_printf ("%s siginfo for lwp %d.",
5834 readbuf != NULL ? "Reading" : "Writing",
5835 pid);
4aa995e1 5836
0adea5f7 5837 if (offset >= sizeof (siginfo))
4aa995e1
PA
5838 return -1;
5839
b8e1b30e 5840 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5841 return -1;
5842
d0722149
DE
5843 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5844 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5845 inferior with a 64-bit GDBSERVER should look the same as debugging it
5846 with a 32-bit GDBSERVER, we need to convert it. */
5847 siginfo_fixup (&siginfo, inf_siginfo, 0);
5848
4aa995e1
PA
5849 if (offset + len > sizeof (siginfo))
5850 len = sizeof (siginfo) - offset;
5851
5852 if (readbuf != NULL)
d0722149 5853 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
5854 else
5855 {
d0722149
DE
5856 memcpy (inf_siginfo + offset, writebuf, len);
5857
5858 /* Convert back to ptrace layout before flushing it out. */
5859 siginfo_fixup (&siginfo, inf_siginfo, 1);
5860
b8e1b30e 5861 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5862 return -1;
5863 }
5864
5865 return len;
5866}
5867
bd99dc85
PA
5868/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5869 so we notice when children change state; as the handler for the
5870 sigsuspend in my_waitpid. */
5871
5872static void
5873sigchld_handler (int signo)
5874{
5875 int old_errno = errno;
5876
5877 if (debug_threads)
e581f2b4
PA
5878 {
5879 do
5880 {
a7e559cc
AH
5881 /* Use the async signal safe debug function. */
5882 if (debug_write ("sigchld_handler\n",
5883 sizeof ("sigchld_handler\n") - 1) < 0)
e581f2b4
PA
5884 break; /* just ignore */
5885 } while (0);
5886 }
bd99dc85
PA
5887
5888 if (target_is_async_p ())
5889 async_file_mark (); /* trigger a linux_wait */
5890
5891 errno = old_errno;
5892}
5893
0dc587d4
TBA
5894bool
5895linux_process_target::supports_non_stop ()
bd99dc85 5896{
0dc587d4 5897 return true;
bd99dc85
PA
5898}
5899
0dc587d4
TBA
5900bool
5901linux_process_target::async (bool enable)
bd99dc85 5902{
0dc587d4 5903 bool previous = target_is_async_p ();
bd99dc85 5904
c058728c
SM
5905 threads_debug_printf ("async (%d), previous=%d",
5906 enable, previous);
8336d594 5907
bd99dc85
PA
5908 if (previous != enable)
5909 {
5910 sigset_t mask;
5911 sigemptyset (&mask);
5912 sigaddset (&mask, SIGCHLD);
5913
21987b9c 5914 gdb_sigmask (SIG_BLOCK, &mask, NULL);
bd99dc85
PA
5915
5916 if (enable)
5917 {
8674f082 5918 if (!linux_event_pipe.open_pipe ())
aa96c426 5919 {
21987b9c 5920 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
aa96c426
GB
5921
5922 warning ("creating event pipe failed.");
5923 return previous;
5924 }
bd99dc85 5925
bd99dc85 5926 /* Register the event loop handler. */
cdc8e9b2 5927 add_file_handler (linux_event_pipe.event_fd (),
2554f6f5
SM
5928 handle_target_event, NULL,
5929 "linux-low");
bd99dc85
PA
5930
5931 /* Always trigger a linux_wait. */
5932 async_file_mark ();
5933 }
5934 else
5935 {
cdc8e9b2 5936 delete_file_handler (linux_event_pipe.event_fd ());
bd99dc85 5937
8674f082 5938 linux_event_pipe.close_pipe ();
bd99dc85
PA
5939 }
5940
21987b9c 5941 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
bd99dc85
PA
5942 }
5943
5944 return previous;
5945}
5946
0dc587d4
TBA
5947int
5948linux_process_target::start_non_stop (bool nonstop)
bd99dc85
PA
5949{
5950 /* Register or unregister from event-loop accordingly. */
0dc587d4 5951 target_async (nonstop);
aa96c426 5952
0dc587d4 5953 if (target_is_async_p () != (nonstop != false))
aa96c426
GB
5954 return -1;
5955
bd99dc85
PA
5956 return 0;
5957}
5958
652aef77
TBA
5959bool
5960linux_process_target::supports_multi_process ()
cf8fd78b 5961{
652aef77 5962 return true;
cf8fd78b
PA
5963}
5964
89245bc0
DB
5965/* Check if fork events are supported. */
5966
9690a72a
TBA
5967bool
5968linux_process_target::supports_fork_events ()
89245bc0 5969{
a2885186 5970 return true;
89245bc0
DB
5971}
5972
5973/* Check if vfork events are supported. */
5974
9690a72a
TBA
5975bool
5976linux_process_target::supports_vfork_events ()
89245bc0 5977{
a2885186 5978 return true;
89245bc0
DB
5979}
5980
393a6b59
PA
5981/* Return the set of supported thread options. */
5982
5983gdb_thread_options
5984linux_process_target::supported_thread_options ()
5985{
48989498 5986 return GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
393a6b59
PA
5987}
5988
94585166
DB
5989/* Check if exec events are supported. */
5990
9690a72a
TBA
5991bool
5992linux_process_target::supports_exec_events ()
94585166 5993{
a2885186 5994 return true;
94585166
DB
5995}
5996
de0d863e
DB
5997/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5998 ptrace flags for all inferiors. This is in case the new GDB connection
5999 doesn't support the same set of events that the previous one did. */
6000
fb00dfce
TBA
6001void
6002linux_process_target::handle_new_gdb_connection ()
de0d863e 6003{
de0d863e 6004 /* Request that all the lwps reset their ptrace options. */
bbf550d5
SM
6005 for_each_thread ([] (thread_info *thread)
6006 {
6007 struct lwp_info *lwp = get_thread_lwp (thread);
6008
6009 if (!lwp->stopped)
6010 {
6011 /* Stop the lwp so we can modify its ptrace options. */
6012 lwp->must_set_ptrace_flags = 1;
6013 linux_stop_lwp (lwp);
6014 }
6015 else
6016 {
6017 /* Already stopped; go ahead and set the ptrace options. */
6018 struct process_info *proc = find_process_pid (pid_of (thread));
6019 int options = linux_low_ptrace_options (proc->attached);
6020
6021 linux_enable_event_reporting (lwpid_of (thread), options);
6022 lwp->must_set_ptrace_flags = 0;
6023 }
6024 });
de0d863e
DB
6025}
6026
55cf3021
TBA
6027int
6028linux_process_target::handle_monitor_command (char *mon)
6029{
6030#ifdef USE_THREAD_DB
6031 return thread_db_handle_monitor_command (mon);
6032#else
6033 return 0;
6034#endif
6035}
6036
95a45fc1
TBA
6037int
6038linux_process_target::core_of_thread (ptid_t ptid)
6039{
6040 return linux_common_core_of_thread (ptid);
6041}
6042
c756403b
TBA
6043bool
6044linux_process_target::supports_disable_randomization ()
03583c20 6045{
c756403b 6046 return true;
03583c20 6047}
efcbbd14 6048
c0245cb9
TBA
6049bool
6050linux_process_target::supports_agent ()
d1feda86 6051{
c0245cb9 6052 return true;
d1feda86
YQ
6053}
6054
2526e0cd
TBA
6055bool
6056linux_process_target::supports_range_stepping ()
c2d6af84 6057{
7582c77c 6058 if (supports_software_single_step ())
2526e0cd 6059 return true;
c2d6af84 6060
9cfd8715
TBA
6061 return low_supports_range_stepping ();
6062}
6063
6064bool
6065linux_process_target::low_supports_range_stepping ()
6066{
6067 return false;
c2d6af84
PA
6068}
6069
8247b823
TBA
6070bool
6071linux_process_target::supports_pid_to_exec_file ()
6072{
6073 return true;
6074}
6075
04977957 6076const char *
8247b823
TBA
6077linux_process_target::pid_to_exec_file (int pid)
6078{
6079 return linux_proc_pid_to_exec_file (pid);
6080}
6081
c9b7b804
TBA
6082bool
6083linux_process_target::supports_multifs ()
6084{
6085 return true;
6086}
6087
6088int
6089linux_process_target::multifs_open (int pid, const char *filename,
6090 int flags, mode_t mode)
6091{
6092 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6093}
6094
6095int
6096linux_process_target::multifs_unlink (int pid, const char *filename)
6097{
6098 return linux_mntns_unlink (pid, filename);
6099}
6100
6101ssize_t
6102linux_process_target::multifs_readlink (int pid, const char *filename,
6103 char *buf, size_t bufsiz)
6104{
6105 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6106}
6107
723b724b 6108#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
6109struct target_loadseg
6110{
6111 /* Core address to which the segment is mapped. */
6112 Elf32_Addr addr;
6113 /* VMA recorded in the program header. */
6114 Elf32_Addr p_vaddr;
6115 /* Size of this segment in memory. */
6116 Elf32_Word p_memsz;
6117};
6118
723b724b 6119# if defined PT_GETDSBT
78d85199
YQ
6120struct target_loadmap
6121{
6122 /* Protocol version number, must be zero. */
6123 Elf32_Word version;
6124 /* Pointer to the DSBT table, its size, and the DSBT index. */
6125 unsigned *dsbt_table;
6126 unsigned dsbt_size, dsbt_index;
6127 /* Number of segments in this map. */
6128 Elf32_Word nsegs;
6129 /* The actual memory map. */
6130 struct target_loadseg segs[/*nsegs*/];
6131};
723b724b
MF
6132# define LINUX_LOADMAP PT_GETDSBT
6133# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6134# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6135# else
6136struct target_loadmap
6137{
6138 /* Protocol version number, must be zero. */
6139 Elf32_Half version;
6140 /* Number of segments in this map. */
6141 Elf32_Half nsegs;
6142 /* The actual memory map. */
6143 struct target_loadseg segs[/*nsegs*/];
6144};
6145# define LINUX_LOADMAP PTRACE_GETFDPIC
6146# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6147# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6148# endif
78d85199 6149
9da41fda
TBA
6150bool
6151linux_process_target::supports_read_loadmap ()
6152{
6153 return true;
6154}
6155
6156int
6157linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6158 unsigned char *myaddr, unsigned int len)
78d85199 6159{
0bfdf32f 6160 int pid = lwpid_of (current_thread);
78d85199
YQ
6161 int addr = -1;
6162 struct target_loadmap *data = NULL;
6163 unsigned int actual_length, copy_length;
6164
6165 if (strcmp (annex, "exec") == 0)
723b724b 6166 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 6167 else if (strcmp (annex, "interp") == 0)
723b724b 6168 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
6169 else
6170 return -1;
6171
723b724b 6172 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
6173 return -1;
6174
6175 if (data == NULL)
6176 return -1;
6177
6178 actual_length = sizeof (struct target_loadmap)
6179 + sizeof (struct target_loadseg) * data->nsegs;
6180
6181 if (offset < 0 || offset > actual_length)
6182 return -1;
6183
6184 copy_length = actual_length - offset < len ? actual_length - offset : len;
6185 memcpy (myaddr, (char *) data + offset, copy_length);
6186 return copy_length;
6187}
723b724b 6188#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 6189
bc8d3ae4
TBA
6190bool
6191linux_process_target::supports_catch_syscall ()
82075af2 6192{
a2885186 6193 return low_supports_catch_syscall ();
82075af2
JS
6194}
6195
9eedd27d
TBA
6196bool
6197linux_process_target::low_supports_catch_syscall ()
6198{
6199 return false;
6200}
6201
770d8f6a
TBA
6202CORE_ADDR
6203linux_process_target::read_pc (regcache *regcache)
219f2f23 6204{
bf9ae9d8 6205 if (!low_supports_breakpoints ())
219f2f23
PA
6206 return 0;
6207
bf9ae9d8 6208 return low_get_pc (regcache);
219f2f23
PA
6209}
6210
770d8f6a
TBA
6211void
6212linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
219f2f23 6213{
bf9ae9d8 6214 gdb_assert (low_supports_breakpoints ());
219f2f23 6215
bf9ae9d8 6216 low_set_pc (regcache, pc);
219f2f23
PA
6217}
6218
68119632
TBA
6219bool
6220linux_process_target::supports_thread_stopped ()
6221{
6222 return true;
6223}
6224
6225bool
6226linux_process_target::thread_stopped (thread_info *thread)
8336d594
PA
6227{
6228 return get_thread_lwp (thread)->stopped;
6229}
6230
ef980d65
PA
6231bool
6232linux_process_target::any_resumed ()
6233{
6234 bool any_resumed;
6235
6236 auto status_pending_p_any = [&] (thread_info *thread)
6237 {
6238 return status_pending_p_callback (thread, minus_one_ptid);
6239 };
6240
6241 auto not_stopped = [&] (thread_info *thread)
6242 {
6243 return not_stopped_callback (thread, minus_one_ptid);
6244 };
6245
6246 /* Find a resumed LWP, if any. */
6247 if (find_thread (status_pending_p_any) != NULL)
6248 any_resumed = 1;
6249 else if (find_thread (not_stopped) != NULL)
6250 any_resumed = 1;
6251 else
6252 any_resumed = 0;
6253
6254 return any_resumed;
6255}
6256
8336d594
PA
6257/* This exposes stop-all-threads functionality to other modules. */
6258
29e8dc09
TBA
6259void
6260linux_process_target::pause_all (bool freeze)
8336d594 6261{
7984d532
PA
6262 stop_all_lwps (freeze, NULL);
6263}
6264
6265/* This exposes unstop-all-threads functionality to other gdbserver
6266 modules. */
6267
29e8dc09
TBA
6268void
6269linux_process_target::unpause_all (bool unfreeze)
7984d532
PA
6270{
6271 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
6272}
6273
2268b414
JK
6274/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6275
6276static int
6277get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6278 CORE_ADDR *phdr_memaddr, int *num_phdr)
6279{
6280 char filename[PATH_MAX];
6281 int fd;
6282 const int auxv_size = is_elf64
6283 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6284 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6285
6286 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6287
6288 fd = open (filename, O_RDONLY);
6289 if (fd < 0)
6290 return 1;
6291
6292 *phdr_memaddr = 0;
6293 *num_phdr = 0;
6294 while (read (fd, buf, auxv_size) == auxv_size
6295 && (*phdr_memaddr == 0 || *num_phdr == 0))
6296 {
6297 if (is_elf64)
6298 {
6299 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6300
6301 switch (aux->a_type)
6302 {
6303 case AT_PHDR:
6304 *phdr_memaddr = aux->a_un.a_val;
6305 break;
6306 case AT_PHNUM:
6307 *num_phdr = aux->a_un.a_val;
6308 break;
6309 }
6310 }
6311 else
6312 {
6313 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6314
6315 switch (aux->a_type)
6316 {
6317 case AT_PHDR:
6318 *phdr_memaddr = aux->a_un.a_val;
6319 break;
6320 case AT_PHNUM:
6321 *num_phdr = aux->a_un.a_val;
6322 break;
6323 }
6324 }
6325 }
6326
6327 close (fd);
6328
6329 if (*phdr_memaddr == 0 || *num_phdr == 0)
6330 {
6331 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6332 "phdr_memaddr = %ld, phdr_num = %d",
6333 (long) *phdr_memaddr, *num_phdr);
6334 return 2;
6335 }
6336
6337 return 0;
6338}
6339
6340/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6341
6342static CORE_ADDR
6343get_dynamic (const int pid, const int is_elf64)
6344{
6345 CORE_ADDR phdr_memaddr, relocation;
db1ff28b 6346 int num_phdr, i;
2268b414 6347 unsigned char *phdr_buf;
db1ff28b 6348 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
2268b414
JK
6349
6350 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6351 return 0;
6352
6353 gdb_assert (num_phdr < 100); /* Basic sanity check. */
224c3ddb 6354 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
2268b414
JK
6355
6356 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6357 return 0;
6358
6359 /* Compute relocation: it is expected to be 0 for "regular" executables,
6360 non-zero for PIE ones. */
6361 relocation = -1;
db1ff28b
JK
6362 for (i = 0; relocation == -1 && i < num_phdr; i++)
6363 if (is_elf64)
6364 {
6365 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6366
6367 if (p->p_type == PT_PHDR)
6368 relocation = phdr_memaddr - p->p_vaddr;
6369 }
6370 else
6371 {
6372 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6373
6374 if (p->p_type == PT_PHDR)
6375 relocation = phdr_memaddr - p->p_vaddr;
6376 }
6377
2268b414
JK
6378 if (relocation == -1)
6379 {
e237a7e2
JK
6380 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6381 any real world executables, including PIE executables, have always
6382 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6383 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6384 or present DT_DEBUG anyway (fpc binaries are statically linked).
6385
6386 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6387
6388 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6389
2268b414
JK
6390 return 0;
6391 }
6392
db1ff28b
JK
6393 for (i = 0; i < num_phdr; i++)
6394 {
6395 if (is_elf64)
6396 {
6397 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6398
6399 if (p->p_type == PT_DYNAMIC)
6400 return p->p_vaddr + relocation;
6401 }
6402 else
6403 {
6404 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
2268b414 6405
db1ff28b
JK
6406 if (p->p_type == PT_DYNAMIC)
6407 return p->p_vaddr + relocation;
6408 }
6409 }
2268b414
JK
6410
6411 return 0;
6412}
6413
6414/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
6415 can be 0 if the inferior does not yet have the library list initialized.
6416 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6417 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
6418
6419static CORE_ADDR
6420get_r_debug (const int pid, const int is_elf64)
6421{
6422 CORE_ADDR dynamic_memaddr;
6423 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6424 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 6425 CORE_ADDR map = -1;
2268b414
JK
6426
6427 dynamic_memaddr = get_dynamic (pid, is_elf64);
6428 if (dynamic_memaddr == 0)
367ba2c2 6429 return map;
2268b414
JK
6430
6431 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6432 {
6433 if (is_elf64)
6434 {
6435 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
a738da3a 6436#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6437 union
6438 {
6439 Elf64_Xword map;
6440 unsigned char buf[sizeof (Elf64_Xword)];
6441 }
6442 rld_map;
a738da3a
MF
6443#endif
6444#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6445 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6446 {
6447 if (linux_read_memory (dyn->d_un.d_val,
6448 rld_map.buf, sizeof (rld_map.buf)) == 0)
6449 return rld_map.map;
6450 else
6451 break;
6452 }
75f62ce7 6453#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6454#ifdef DT_MIPS_RLD_MAP_REL
6455 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6456 {
6457 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6458 rld_map.buf, sizeof (rld_map.buf)) == 0)
6459 return rld_map.map;
6460 else
6461 break;
6462 }
6463#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6464
367ba2c2
MR
6465 if (dyn->d_tag == DT_DEBUG && map == -1)
6466 map = dyn->d_un.d_val;
2268b414
JK
6467
6468 if (dyn->d_tag == DT_NULL)
6469 break;
6470 }
6471 else
6472 {
6473 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
a738da3a 6474#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6475 union
6476 {
6477 Elf32_Word map;
6478 unsigned char buf[sizeof (Elf32_Word)];
6479 }
6480 rld_map;
a738da3a
MF
6481#endif
6482#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6483 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6484 {
6485 if (linux_read_memory (dyn->d_un.d_val,
6486 rld_map.buf, sizeof (rld_map.buf)) == 0)
6487 return rld_map.map;
6488 else
6489 break;
6490 }
75f62ce7 6491#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6492#ifdef DT_MIPS_RLD_MAP_REL
6493 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6494 {
6495 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6496 rld_map.buf, sizeof (rld_map.buf)) == 0)
6497 return rld_map.map;
6498 else
6499 break;
6500 }
6501#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6502
367ba2c2
MR
6503 if (dyn->d_tag == DT_DEBUG && map == -1)
6504 map = dyn->d_un.d_val;
2268b414
JK
6505
6506 if (dyn->d_tag == DT_NULL)
6507 break;
6508 }
6509
6510 dynamic_memaddr += dyn_size;
6511 }
6512
367ba2c2 6513 return map;
2268b414
JK
6514}
6515
6516/* Read one pointer from MEMADDR in the inferior. */
6517
6518static int
6519read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6520{
485f1ee4
PA
6521 int ret;
6522
6523 /* Go through a union so this works on either big or little endian
6524 hosts, when the inferior's pointer size is smaller than the size
6525 of CORE_ADDR. It is assumed the inferior's endianness is the
6526 same of the superior's. */
6527 union
6528 {
6529 CORE_ADDR core_addr;
6530 unsigned int ui;
6531 unsigned char uc;
6532 } addr;
6533
6534 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6535 if (ret == 0)
6536 {
6537 if (ptr_size == sizeof (CORE_ADDR))
6538 *ptr = addr.core_addr;
6539 else if (ptr_size == sizeof (unsigned int))
6540 *ptr = addr.ui;
6541 else
6542 gdb_assert_not_reached ("unhandled pointer size");
6543 }
6544 return ret;
2268b414
JK
6545}
6546
974387bb
TBA
6547bool
6548linux_process_target::supports_qxfer_libraries_svr4 ()
6549{
6550 return true;
6551}
6552
2268b414
JK
6553struct link_map_offsets
6554 {
6555 /* Offset and size of r_debug.r_version. */
6556 int r_version_offset;
6557
6558 /* Offset and size of r_debug.r_map. */
6559 int r_map_offset;
6560
8d56636a
MM
6561 /* Offset of r_debug_extended.r_next. */
6562 int r_next_offset;
6563
2268b414
JK
6564 /* Offset to l_addr field in struct link_map. */
6565 int l_addr_offset;
6566
6567 /* Offset to l_name field in struct link_map. */
6568 int l_name_offset;
6569
6570 /* Offset to l_ld field in struct link_map. */
6571 int l_ld_offset;
6572
6573 /* Offset to l_next field in struct link_map. */
6574 int l_next_offset;
6575
6576 /* Offset to l_prev field in struct link_map. */
6577 int l_prev_offset;
6578 };
6579
8d56636a
MM
6580static const link_map_offsets lmo_32bit_offsets =
6581 {
6582 0, /* r_version offset. */
6583 4, /* r_debug.r_map offset. */
6584 20, /* r_debug_extended.r_next. */
6585 0, /* l_addr offset in link_map. */
6586 4, /* l_name offset in link_map. */
6587 8, /* l_ld offset in link_map. */
6588 12, /* l_next offset in link_map. */
6589 16 /* l_prev offset in link_map. */
6590 };
6591
6592static const link_map_offsets lmo_64bit_offsets =
6593 {
6594 0, /* r_version offset. */
6595 8, /* r_debug.r_map offset. */
6596 40, /* r_debug_extended.r_next. */
6597 0, /* l_addr offset in link_map. */
6598 8, /* l_name offset in link_map. */
6599 16, /* l_ld offset in link_map. */
6600 24, /* l_next offset in link_map. */
6601 32 /* l_prev offset in link_map. */
6602 };
6603
6604/* Get the loaded shared libraries from one namespace. */
6605
6606static void
2733d9d5
MM
6607read_link_map (std::string &document, CORE_ADDR lmid, CORE_ADDR lm_addr,
6608 CORE_ADDR lm_prev, int ptr_size, const link_map_offsets *lmo)
8d56636a
MM
6609{
6610 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6611
6612 while (lm_addr
6613 && read_one_ptr (lm_addr + lmo->l_name_offset,
6614 &l_name, ptr_size) == 0
6615 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6616 &l_addr, ptr_size) == 0
6617 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6618 &l_ld, ptr_size) == 0
6619 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6620 &l_prev, ptr_size) == 0
6621 && read_one_ptr (lm_addr + lmo->l_next_offset,
6622 &l_next, ptr_size) == 0)
6623 {
6624 unsigned char libname[PATH_MAX];
6625
6626 if (lm_prev != l_prev)
6627 {
6628 warning ("Corrupted shared library list: 0x%s != 0x%s",
6629 paddress (lm_prev), paddress (l_prev));
6630 break;
6631 }
6632
ad10f44e
MM
6633 /* Not checking for error because reading may stop before we've got
6634 PATH_MAX worth of characters. */
6635 libname[0] = '\0';
6636 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6637 libname[sizeof (libname) - 1] = '\0';
6638 if (libname[0] != '\0')
8d56636a 6639 {
ad10f44e 6640 string_appendf (document, "<library name=\"");
de75275f 6641 xml_escape_text_append (document, (char *) libname);
ad10f44e 6642 string_appendf (document, "\" lm=\"0x%s\" l_addr=\"0x%s\" "
2733d9d5 6643 "l_ld=\"0x%s\" lmid=\"0x%s\"/>",
ad10f44e 6644 paddress (lm_addr), paddress (l_addr),
2733d9d5 6645 paddress (l_ld), paddress (lmid));
8d56636a
MM
6646 }
6647
6648 lm_prev = lm_addr;
6649 lm_addr = l_next;
6650 }
6651}
6652
fb723180 6653/* Construct qXfer:libraries-svr4:read reply. */
2268b414 6654
974387bb
TBA
6655int
6656linux_process_target::qxfer_libraries_svr4 (const char *annex,
6657 unsigned char *readbuf,
6658 unsigned const char *writebuf,
6659 CORE_ADDR offset, int len)
2268b414 6660{
fe978cb0 6661 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
6662 char filename[PATH_MAX];
6663 int pid, is_elf64;
214d508e 6664 unsigned int machine;
2733d9d5 6665 CORE_ADDR lmid = 0, lm_addr = 0, lm_prev = 0;
2268b414
JK
6666
6667 if (writebuf != NULL)
6668 return -2;
6669 if (readbuf == NULL)
6670 return -1;
6671
0bfdf32f 6672 pid = lwpid_of (current_thread);
2268b414 6673 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 6674 is_elf64 = elf_64_file_p (filename, &machine);
8d56636a
MM
6675 const link_map_offsets *lmo;
6676 int ptr_size;
6677 if (is_elf64)
6678 {
6679 lmo = &lmo_64bit_offsets;
6680 ptr_size = 8;
6681 }
6682 else
6683 {
6684 lmo = &lmo_32bit_offsets;
6685 ptr_size = 4;
6686 }
2268b414 6687
b1fbec62
GB
6688 while (annex[0] != '\0')
6689 {
6690 const char *sep;
6691 CORE_ADDR *addrp;
da4ae14a 6692 int name_len;
2268b414 6693
b1fbec62
GB
6694 sep = strchr (annex, '=');
6695 if (sep == NULL)
6696 break;
0c5bf5a9 6697
da4ae14a 6698 name_len = sep - annex;
2733d9d5
MM
6699 if (name_len == 4 && startswith (annex, "lmid"))
6700 addrp = &lmid;
6701 else if (name_len == 5 && startswith (annex, "start"))
b1fbec62 6702 addrp = &lm_addr;
da4ae14a 6703 else if (name_len == 4 && startswith (annex, "prev"))
b1fbec62
GB
6704 addrp = &lm_prev;
6705 else
6706 {
6707 annex = strchr (sep, ';');
6708 if (annex == NULL)
6709 break;
6710 annex++;
6711 continue;
6712 }
6713
6714 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 6715 }
b1fbec62 6716
8d56636a
MM
6717 std::string document = "<library-list-svr4 version=\"1.0\"";
6718
6719 /* When the starting LM_ADDR is passed in the annex, only traverse that
2733d9d5 6720 namespace, which is assumed to be identified by LMID.
8d56636a
MM
6721
6722 Otherwise, start with R_DEBUG and traverse all namespaces we find. */
6723 if (lm_addr != 0)
ad10f44e
MM
6724 {
6725 document += ">";
2733d9d5 6726 read_link_map (document, lmid, lm_addr, lm_prev, ptr_size, lmo);
ad10f44e 6727 }
8d56636a 6728 else
2268b414 6729 {
8d56636a
MM
6730 if (lm_prev != 0)
6731 warning ("ignoring prev=0x%s without start", paddress (lm_prev));
b1fbec62 6732
2733d9d5
MM
6733 /* We could interpret LMID as 'provide only the libraries for this
6734 namespace' but GDB is currently only providing lmid, start, and
6735 prev, or nothing. */
6736 if (lmid != 0)
6737 warning ("ignoring lmid=0x%s without start", paddress (lmid));
6738
8d56636a
MM
6739 CORE_ADDR r_debug = priv->r_debug;
6740 if (r_debug == 0)
6741 r_debug = priv->r_debug = get_r_debug (pid, is_elf64);
b1fbec62
GB
6742
6743 /* We failed to find DT_DEBUG. Such situation will not change
6744 for this inferior - do not retry it. Report it to GDB as
6745 E01, see for the reasons at the GDB solib-svr4.c side. */
8d56636a 6746 if (r_debug == (CORE_ADDR) -1)
b1fbec62
GB
6747 return -1;
6748
ad10f44e
MM
6749 /* Terminate the header if we end up with an empty list. */
6750 if (r_debug == 0)
6751 document += ">";
6752
8d56636a 6753 while (r_debug != 0)
2268b414 6754 {
8d56636a
MM
6755 int r_version = 0;
6756 if (linux_read_memory (r_debug + lmo->r_version_offset,
b1fbec62 6757 (unsigned char *) &r_version,
8d56636a
MM
6758 sizeof (r_version)) != 0)
6759 {
6760 warning ("unable to read r_version from 0x%s",
6761 paddress (r_debug + lmo->r_version_offset));
6762 break;
6763 }
6764
6765 if (r_version < 1)
b1fbec62
GB
6766 {
6767 warning ("unexpected r_debug version %d", r_version);
8d56636a 6768 break;
b1fbec62 6769 }
8d56636a
MM
6770
6771 if (read_one_ptr (r_debug + lmo->r_map_offset, &lm_addr,
6772 ptr_size) != 0)
b1fbec62 6773 {
8d56636a
MM
6774 warning ("unable to read r_map from 0x%s",
6775 paddress (r_debug + lmo->r_map_offset));
6776 break;
b1fbec62 6777 }
2268b414 6778
ad10f44e
MM
6779 /* We read the entire namespace. */
6780 lm_prev = 0;
6781
6782 /* The first entry corresponds to the main executable unless the
6783 dynamic loader was loaded late by a static executable. But
6784 in such case the main executable does not have PT_DYNAMIC
6785 present and we would not have gotten here. */
6786 if (r_debug == priv->r_debug)
6787 {
6788 if (lm_addr != 0)
6789 string_appendf (document, " main-lm=\"0x%s\">",
6790 paddress (lm_addr));
6791 else
6792 document += ">";
6793
6794 lm_prev = lm_addr;
6795 if (read_one_ptr (lm_addr + lmo->l_next_offset,
6796 &lm_addr, ptr_size) != 0)
6797 {
6798 warning ("unable to read l_next from 0x%s",
6799 paddress (lm_addr + lmo->l_next_offset));
6800 break;
6801 }
6802 }
6803
2733d9d5 6804 read_link_map (document, r_debug, lm_addr, lm_prev, ptr_size, lmo);
b1fbec62 6805
8d56636a
MM
6806 if (r_version < 2)
6807 break;
b1fbec62 6808
8d56636a
MM
6809 if (read_one_ptr (r_debug + lmo->r_next_offset, &r_debug,
6810 ptr_size) != 0)
2268b414 6811 {
8d56636a
MM
6812 warning ("unable to read r_next from 0x%s",
6813 paddress (r_debug + lmo->r_next_offset));
6814 break;
d878444c 6815 }
0afae3cf 6816 }
2268b414
JK
6817 }
6818
ad10f44e 6819 document += "</library-list-svr4>";
b1fbec62 6820
f6e8a41e 6821 int document_len = document.length ();
2268b414
JK
6822 if (offset < document_len)
6823 document_len -= offset;
6824 else
6825 document_len = 0;
6826 if (len > document_len)
6827 len = document_len;
6828
f6e8a41e 6829 memcpy (readbuf, document.data () + offset, len);
2268b414
JK
6830
6831 return len;
6832}
6833
9accd112
MM
6834#ifdef HAVE_LINUX_BTRACE
6835
8263b346
TBA
6836bool
6837linux_process_target::supports_btrace ()
6838{
6839 return true;
6840}
6841
79597bdd 6842btrace_target_info *
696c0d5e 6843linux_process_target::enable_btrace (thread_info *tp,
79597bdd
TBA
6844 const btrace_config *conf)
6845{
696c0d5e 6846 return linux_enable_btrace (tp->id, conf);
79597bdd
TBA
6847}
6848
969c39fb 6849/* See to_disable_btrace target method. */
9accd112 6850
79597bdd
TBA
6851int
6852linux_process_target::disable_btrace (btrace_target_info *tinfo)
969c39fb
MM
6853{
6854 enum btrace_error err;
6855
6856 err = linux_disable_btrace (tinfo);
6857 return (err == BTRACE_ERR_NONE ? 0 : -1);
6858}
6859
bc504a31 6860/* Encode an Intel Processor Trace configuration. */
b20a6524
MM
6861
6862static void
873a185b 6863linux_low_encode_pt_config (std::string *buffer,
b20a6524
MM
6864 const struct btrace_data_pt_config *config)
6865{
873a185b 6866 *buffer += "<pt-config>\n";
b20a6524
MM
6867
6868 switch (config->cpu.vendor)
6869 {
6870 case CV_INTEL:
873a185b
TT
6871 string_xml_appendf (*buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6872 "model=\"%u\" stepping=\"%u\"/>\n",
6873 config->cpu.family, config->cpu.model,
6874 config->cpu.stepping);
b20a6524
MM
6875 break;
6876
6877 default:
6878 break;
6879 }
6880
873a185b 6881 *buffer += "</pt-config>\n";
b20a6524
MM
6882}
6883
6884/* Encode a raw buffer. */
6885
6886static void
873a185b 6887linux_low_encode_raw (std::string *buffer, const gdb_byte *data,
b20a6524
MM
6888 unsigned int size)
6889{
6890 if (size == 0)
6891 return;
6892
268a13a5 6893 /* We use hex encoding - see gdbsupport/rsp-low.h. */
873a185b 6894 *buffer += "<raw>\n";
b20a6524
MM
6895
6896 while (size-- > 0)
6897 {
6898 char elem[2];
6899
6900 elem[0] = tohex ((*data >> 4) & 0xf);
6901 elem[1] = tohex (*data++ & 0xf);
6902
8b2d5ef8 6903 buffer->append (elem, 2);
b20a6524
MM
6904 }
6905
873a185b 6906 *buffer += "</raw>\n";
b20a6524
MM
6907}
6908
969c39fb
MM
6909/* See to_read_btrace target method. */
6910
79597bdd
TBA
6911int
6912linux_process_target::read_btrace (btrace_target_info *tinfo,
873a185b 6913 std::string *buffer,
79597bdd 6914 enum btrace_read_type type)
9accd112 6915{
734b0e4b 6916 struct btrace_data btrace;
969c39fb 6917 enum btrace_error err;
9accd112 6918
969c39fb
MM
6919 err = linux_read_btrace (&btrace, tinfo, type);
6920 if (err != BTRACE_ERR_NONE)
6921 {
6922 if (err == BTRACE_ERR_OVERFLOW)
873a185b 6923 *buffer += "E.Overflow.";
969c39fb 6924 else
873a185b 6925 *buffer += "E.Generic Error.";
969c39fb 6926
8dcc53b3 6927 return -1;
969c39fb 6928 }
9accd112 6929
734b0e4b
MM
6930 switch (btrace.format)
6931 {
6932 case BTRACE_FORMAT_NONE:
873a185b 6933 *buffer += "E.No Trace.";
8dcc53b3 6934 return -1;
734b0e4b
MM
6935
6936 case BTRACE_FORMAT_BTS:
873a185b
TT
6937 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6938 *buffer += "<btrace version=\"1.0\">\n";
9accd112 6939
46f29a9a 6940 for (const btrace_block &block : *btrace.variant.bts.blocks)
873a185b
TT
6941 string_xml_appendf (*buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6942 paddress (block.begin), paddress (block.end));
9accd112 6943
873a185b 6944 *buffer += "</btrace>\n";
734b0e4b
MM
6945 break;
6946
b20a6524 6947 case BTRACE_FORMAT_PT:
873a185b
TT
6948 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6949 *buffer += "<btrace version=\"1.0\">\n";
6950 *buffer += "<pt>\n";
b20a6524
MM
6951
6952 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
9accd112 6953
b20a6524
MM
6954 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6955 btrace.variant.pt.size);
6956
873a185b
TT
6957 *buffer += "</pt>\n";
6958 *buffer += "</btrace>\n";
b20a6524
MM
6959 break;
6960
6961 default:
873a185b 6962 *buffer += "E.Unsupported Trace Format.";
8dcc53b3 6963 return -1;
734b0e4b 6964 }
969c39fb
MM
6965
6966 return 0;
9accd112 6967}
f4abbc16
MM
6968
6969/* See to_btrace_conf target method. */
6970
79597bdd
TBA
6971int
6972linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
873a185b 6973 std::string *buffer)
f4abbc16
MM
6974{
6975 const struct btrace_config *conf;
6976
873a185b
TT
6977 *buffer += "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n";
6978 *buffer += "<btrace-conf version=\"1.0\">\n";
f4abbc16
MM
6979
6980 conf = linux_btrace_conf (tinfo);
6981 if (conf != NULL)
6982 {
6983 switch (conf->format)
6984 {
6985 case BTRACE_FORMAT_NONE:
6986 break;
6987
6988 case BTRACE_FORMAT_BTS:
873a185b
TT
6989 string_xml_appendf (*buffer, "<bts");
6990 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->bts.size);
6991 string_xml_appendf (*buffer, " />\n");
f4abbc16 6992 break;
b20a6524
MM
6993
6994 case BTRACE_FORMAT_PT:
873a185b
TT
6995 string_xml_appendf (*buffer, "<pt");
6996 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->pt.size);
6997 string_xml_appendf (*buffer, "/>\n");
b20a6524 6998 break;
f4abbc16
MM
6999 }
7000 }
7001
873a185b 7002 *buffer += "</btrace-conf>\n";
f4abbc16
MM
7003 return 0;
7004}
9accd112
MM
7005#endif /* HAVE_LINUX_BTRACE */
7006
7b669087
GB
7007/* See nat/linux-nat.h. */
7008
7009ptid_t
7010current_lwp_ptid (void)
7011{
7012 return ptid_of (current_thread);
7013}
7014
07b3255c
TT
7015/* A helper function that copies NAME to DEST, replacing non-printable
7016 characters with '?'. Returns DEST as a convenience. */
7017
7018static const char *
7019replace_non_ascii (char *dest, const char *name)
7020{
7021 while (*name != '\0')
7022 {
7023 if (!ISPRINT (*name))
7024 *dest++ = '?';
7025 else
7026 *dest++ = *name;
7027 ++name;
7028 }
7029 return dest;
7030}
7031
7f63b89b
TBA
7032const char *
7033linux_process_target::thread_name (ptid_t thread)
7034{
07b3255c
TT
7035 static char dest[100];
7036
7037 const char *name = linux_proc_tid_get_name (thread);
7038 if (name == nullptr)
7039 return nullptr;
7040
7041 /* Linux limits the comm file to 16 bytes (including the trailing
7042 \0. If the program or thread name is set when using a multi-byte
7043 encoding, this might cause it to be truncated mid-character. In
7044 this situation, sending the truncated form in an XML <thread>
7045 response will cause a parse error in gdb. So, instead convert
7046 from the locale's encoding (we can't be sure this is the correct
7047 encoding, but it's as good a guess as we have) to UTF-8, but in a
7048 way that ignores any encoding errors. See PR remote/30618. */
7049 const char *cset = nl_langinfo (CODESET);
7050 iconv_t handle = iconv_open ("UTF-8//IGNORE", cset);
7051 if (handle == (iconv_t) -1)
7052 return replace_non_ascii (dest, name);
7053
7054 size_t inbytes = strlen (name);
7055 char *inbuf = const_cast<char *> (name);
7056 size_t outbytes = sizeof (dest);
7057 char *outbuf = dest;
7058 size_t result = iconv (handle, &inbuf, &inbytes, &outbuf, &outbytes);
7059
7060 if (result == (size_t) -1)
7061 {
7062 if (errno == E2BIG)
7063 outbuf = &dest[sizeof (dest) - 1];
7064 else if ((errno == EILSEQ || errno == EINVAL)
7065 && outbuf < &dest[sizeof (dest) - 2])
7066 *outbuf++ = '?';
7067 *outbuf = '\0';
7068 }
7069
7070 iconv_close (handle);
7071 return *dest == '\0' ? nullptr : dest;
7f63b89b
TBA
7072}
7073
7074#if USE_THREAD_DB
7075bool
7076linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7077 int *handle_len)
7078{
7079 return thread_db_thread_handle (ptid, handle, handle_len);
7080}
7081#endif
7082
7b961964
SM
7083thread_info *
7084linux_process_target::thread_pending_parent (thread_info *thread)
7085{
7086 lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
7087
7088 if (parent == nullptr)
7089 return nullptr;
7090
7091 return get_lwp_thread (parent);
7092}
7093
df5ad102 7094thread_info *
faf44a31
PA
7095linux_process_target::thread_pending_child (thread_info *thread,
7096 target_waitkind *kind)
df5ad102 7097{
faf44a31 7098 lwp_info *child = get_thread_lwp (thread)->pending_child (kind);
df5ad102
SM
7099
7100 if (child == nullptr)
7101 return nullptr;
7102
7103 return get_lwp_thread (child);
7104}
7105
276d4552
YQ
7106/* Default implementation of linux_target_ops method "set_pc" for
7107 32-bit pc register which is literally named "pc". */
7108
7109void
7110linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7111{
7112 uint32_t newpc = pc;
7113
7114 supply_register_by_name (regcache, "pc", &newpc);
7115}
7116
7117/* Default implementation of linux_target_ops method "get_pc" for
7118 32-bit pc register which is literally named "pc". */
7119
7120CORE_ADDR
7121linux_get_pc_32bit (struct regcache *regcache)
7122{
7123 uint32_t pc;
7124
7125 collect_register_by_name (regcache, "pc", &pc);
c058728c 7126 threads_debug_printf ("stop pc is 0x%" PRIx32, pc);
276d4552
YQ
7127 return pc;
7128}
7129
6f69e520
YQ
7130/* Default implementation of linux_target_ops method "set_pc" for
7131 64-bit pc register which is literally named "pc". */
7132
7133void
7134linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7135{
7136 uint64_t newpc = pc;
7137
7138 supply_register_by_name (regcache, "pc", &newpc);
7139}
7140
7141/* Default implementation of linux_target_ops method "get_pc" for
7142 64-bit pc register which is literally named "pc". */
7143
7144CORE_ADDR
7145linux_get_pc_64bit (struct regcache *regcache)
7146{
7147 uint64_t pc;
7148
7149 collect_register_by_name (regcache, "pc", &pc);
c058728c 7150 threads_debug_printf ("stop pc is 0x%" PRIx64, pc);
6f69e520
YQ
7151 return pc;
7152}
7153
0570503d 7154/* See linux-low.h. */
974c89e0 7155
0570503d 7156int
43e5fbd8 7157linux_get_auxv (int pid, int wordsize, CORE_ADDR match, CORE_ADDR *valp)
974c89e0
AH
7158{
7159 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7160 int offset = 0;
7161
7162 gdb_assert (wordsize == 4 || wordsize == 8);
7163
43e5fbd8
TJB
7164 while (the_target->read_auxv (pid, offset, data, 2 * wordsize)
7165 == 2 * wordsize)
974c89e0
AH
7166 {
7167 if (wordsize == 4)
7168 {
0570503d 7169 uint32_t *data_p = (uint32_t *) data;
974c89e0 7170 if (data_p[0] == match)
0570503d
PFC
7171 {
7172 *valp = data_p[1];
7173 return 1;
7174 }
974c89e0
AH
7175 }
7176 else
7177 {
0570503d 7178 uint64_t *data_p = (uint64_t *) data;
974c89e0 7179 if (data_p[0] == match)
0570503d
PFC
7180 {
7181 *valp = data_p[1];
7182 return 1;
7183 }
974c89e0
AH
7184 }
7185
7186 offset += 2 * wordsize;
7187 }
7188
7189 return 0;
7190}
7191
7192/* See linux-low.h. */
7193
7194CORE_ADDR
43e5fbd8 7195linux_get_hwcap (int pid, int wordsize)
974c89e0 7196{
0570503d 7197 CORE_ADDR hwcap = 0;
43e5fbd8 7198 linux_get_auxv (pid, wordsize, AT_HWCAP, &hwcap);
0570503d 7199 return hwcap;
974c89e0
AH
7200}
7201
7202/* See linux-low.h. */
7203
7204CORE_ADDR
43e5fbd8 7205linux_get_hwcap2 (int pid, int wordsize)
974c89e0 7206{
0570503d 7207 CORE_ADDR hwcap2 = 0;
43e5fbd8 7208 linux_get_auxv (pid, wordsize, AT_HWCAP2, &hwcap2);
0570503d 7209 return hwcap2;
974c89e0 7210}
6f69e520 7211
3aee8918
PA
7212#ifdef HAVE_LINUX_REGSETS
7213void
7214initialize_regsets_info (struct regsets_info *info)
7215{
7216 for (info->num_regsets = 0;
7217 info->regsets[info->num_regsets].size >= 0;
7218 info->num_regsets++)
7219 ;
3aee8918
PA
7220}
7221#endif
7222
da6d8c04
DJ
7223void
7224initialize_low (void)
7225{
bd99dc85 7226 struct sigaction sigchld_action;
dd373349 7227
bd99dc85 7228 memset (&sigchld_action, 0, sizeof (sigchld_action));
ef0478f6 7229 set_target_ops (the_linux_target);
dd373349 7230
aa7c7447 7231 linux_ptrace_init_warnings ();
1b919490 7232 linux_proc_init_warnings ();
bd99dc85
PA
7233
7234 sigchld_action.sa_handler = sigchld_handler;
7235 sigemptyset (&sigchld_action.sa_mask);
7236 sigchld_action.sa_flags = SA_RESTART;
7237 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
7238
7239 initialize_low_arch ();
89245bc0
DB
7240
7241 linux_check_ptrace_features ();
da6d8c04 7242}