]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdbserver/linux-low.cc
Revert "gdb/x86: move reading of cs and ds state into gdb/nat directory"
[thirdparty/binutils-gdb.git] / gdbserver / linux-low.cc
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
1d506c26 2 Copyright (C) 1995-2024 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
268a13a5 22#include "gdbsupport/agent.h"
de0d863e 23#include "tdesc.h"
cdc8e9b2
JB
24#include "gdbsupport/event-loop.h"
25#include "gdbsupport/event-pipe.h"
268a13a5
TT
26#include "gdbsupport/rsp-low.h"
27#include "gdbsupport/signals-state-save-restore.h"
96d7229d
LM
28#include "nat/linux-nat.h"
29#include "nat/linux-waitpid.h"
268a13a5 30#include "gdbsupport/gdb_wait.h"
5826e159 31#include "nat/gdb_ptrace.h"
125f8a3d
GB
32#include "nat/linux-ptrace.h"
33#include "nat/linux-procfs.h"
8cc73a39 34#include "nat/linux-personality.h"
da6d8c04
DJ
35#include <signal.h>
36#include <sys/ioctl.h>
37#include <fcntl.h>
0a30fbc4 38#include <unistd.h>
fd500816 39#include <sys/syscall.h>
f9387fc3 40#include <sched.h>
07e059b5
VP
41#include <pwd.h>
42#include <sys/types.h>
43#include <dirent.h>
53ce3c39 44#include <sys/stat.h>
efcbbd14 45#include <sys/vfs.h>
1570b33e 46#include <sys/uio.h>
07b3255c
TT
47#include <langinfo.h>
48#include <iconv.h>
268a13a5 49#include "gdbsupport/filestuff.h"
07b3255c 50#include "gdbsupport/gdb-safe-ctype.h"
c144c7a0 51#include "tracepoint.h"
276d4552 52#include <inttypes.h>
268a13a5 53#include "gdbsupport/common-inferior.h"
2090129c 54#include "nat/fork-inferior.h"
268a13a5 55#include "gdbsupport/environ.h"
21987b9c 56#include "gdbsupport/gdb-sigmask.h"
268a13a5 57#include "gdbsupport/scoped_restore.h"
957f3f49
DE
58#ifndef ELFMAG0
59/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
60 then ELFMAG0 will have been defined. If it didn't get included by
61 gdb_proc_service.h then including it will likely introduce a duplicate
62 definition of elf_fpregset_t. */
63#include <elf.h>
64#endif
14d2069a 65#include "nat/linux-namespaces.h"
efcbbd14 66
fd462a61
DJ
67#ifndef O_LARGEFILE
68#define O_LARGEFILE 0
69#endif
1a981360 70
69f4c9cc
AH
71#ifndef AT_HWCAP2
72#define AT_HWCAP2 26
73#endif
74
db0dfaa0
LM
75/* Some targets did not define these ptrace constants from the start,
76 so gdbserver defines them locally here. In the future, these may
77 be removed after they are added to asm/ptrace.h. */
78#if !(defined(PT_TEXT_ADDR) \
79 || defined(PT_DATA_ADDR) \
80 || defined(PT_TEXT_END_ADDR))
81#if defined(__mcoldfire__)
82/* These are still undefined in 3.10 kernels. */
83#define PT_TEXT_ADDR 49*4
84#define PT_DATA_ADDR 50*4
85#define PT_TEXT_END_ADDR 51*4
db0dfaa0
LM
86/* These are still undefined in 3.10 kernels. */
87#elif defined(__TMS320C6X__)
88#define PT_TEXT_ADDR (0x10000*4)
89#define PT_DATA_ADDR (0x10004*4)
90#define PT_TEXT_END_ADDR (0x10008*4)
91#endif
92#endif
93
5203ae1e
TBA
94#if (defined(__UCLIBC__) \
95 && defined(HAS_NOMMU) \
96 && defined(PT_TEXT_ADDR) \
97 && defined(PT_DATA_ADDR) \
98 && defined(PT_TEXT_END_ADDR))
99#define SUPPORTS_READ_OFFSETS
100#endif
101
9accd112 102#ifdef HAVE_LINUX_BTRACE
125f8a3d 103# include "nat/linux-btrace.h"
268a13a5 104# include "gdbsupport/btrace-common.h"
9accd112
MM
105#endif
106
8365dcf5
TJB
107#ifndef HAVE_ELF32_AUXV_T
108/* Copied from glibc's elf.h. */
109typedef struct
110{
111 uint32_t a_type; /* Entry type */
112 union
113 {
114 uint32_t a_val; /* Integer value */
115 /* We use to have pointer elements added here. We cannot do that,
116 though, since it does not work when using 32-bit definitions
117 on 64-bit platforms and vice versa. */
118 } a_un;
119} Elf32_auxv_t;
120#endif
121
122#ifndef HAVE_ELF64_AUXV_T
123/* Copied from glibc's elf.h. */
124typedef struct
125{
126 uint64_t a_type; /* Entry type */
127 union
128 {
129 uint64_t a_val; /* Integer value */
130 /* We use to have pointer elements added here. We cannot do that,
131 though, since it does not work when using 32-bit definitions
132 on 64-bit platforms and vice versa. */
133 } a_un;
134} Elf64_auxv_t;
135#endif
136
ded48a5e 137/* Does the current host support PTRACE_GETREGSET? */
5920765d 138enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;
ded48a5e 139
8a841a35
PA
140/* Return TRUE if THREAD is the leader thread of the process. */
141
142static bool
143is_leader (thread_info *thread)
144{
145 ptid_t ptid = ptid_of (thread);
146 return ptid.pid () == ptid.lwp ();
147}
148
48989498
PA
149/* Return true if we should report thread exit events to GDB, for
150 THR. */
151
152static bool
153report_exit_events_for (thread_info *thr)
154{
155 client_state &cs = get_client_state ();
156
157 return (cs.report_thread_events
158 || (thr->thread_options & GDB_THREAD_OPTION_EXIT) != 0);
159}
160
cff068da
GB
161/* LWP accessors. */
162
163/* See nat/linux-nat.h. */
164
165ptid_t
166ptid_of_lwp (struct lwp_info *lwp)
167{
168 return ptid_of (get_lwp_thread (lwp));
169}
170
171/* See nat/linux-nat.h. */
172
4b134ca1
GB
173void
174lwp_set_arch_private_info (struct lwp_info *lwp,
175 struct arch_lwp_info *info)
176{
177 lwp->arch_private = info;
178}
179
180/* See nat/linux-nat.h. */
181
182struct arch_lwp_info *
183lwp_arch_private_info (struct lwp_info *lwp)
184{
185 return lwp->arch_private;
186}
187
188/* See nat/linux-nat.h. */
189
cff068da
GB
190int
191lwp_is_stopped (struct lwp_info *lwp)
192{
193 return lwp->stopped;
194}
195
196/* See nat/linux-nat.h. */
197
198enum target_stop_reason
199lwp_stop_reason (struct lwp_info *lwp)
200{
201 return lwp->stop_reason;
202}
203
0e00e962
AA
204/* See nat/linux-nat.h. */
205
206int
207lwp_is_stepping (struct lwp_info *lwp)
208{
209 return lwp->stepping;
210}
211
05044653
PA
212/* A list of all unknown processes which receive stop signals. Some
213 other process will presumably claim each of these as forked
214 children momentarily. */
24a09b5f 215
05044653
PA
216struct simple_pid_list
217{
218 /* The process ID. */
219 int pid;
220
221 /* The status as reported by waitpid. */
222 int status;
223
224 /* Next in chain. */
225 struct simple_pid_list *next;
226};
05c309a8 227static struct simple_pid_list *stopped_pids;
05044653
PA
228
229/* Trivial list manipulation functions to keep track of a list of new
230 stopped processes. */
231
232static void
233add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
234{
8d749320 235 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
05044653
PA
236
237 new_pid->pid = pid;
238 new_pid->status = status;
239 new_pid->next = *listp;
240 *listp = new_pid;
241}
242
243static int
244pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
245{
246 struct simple_pid_list **p;
247
248 for (p = listp; *p != NULL; p = &(*p)->next)
249 if ((*p)->pid == pid)
250 {
251 struct simple_pid_list *next = (*p)->next;
252
253 *statusp = (*p)->status;
254 xfree (*p);
255 *p = next;
256 return 1;
257 }
258 return 0;
259}
24a09b5f 260
bde24c0a
PA
261enum stopping_threads_kind
262 {
263 /* Not stopping threads presently. */
264 NOT_STOPPING_THREADS,
265
266 /* Stopping threads. */
267 STOPPING_THREADS,
268
269 /* Stopping and suspending threads. */
270 STOPPING_AND_SUSPENDING_THREADS
271 };
272
273/* This is set while stop_all_lwps is in effect. */
6bd434d6 274static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
275
276/* FIXME make into a target method? */
24a09b5f 277int using_threads = 1;
24a09b5f 278
fa593d66
PA
279/* True if we're presently stabilizing threads (moving them out of
280 jump pads). */
281static int stabilizing_threads;
282
f50bf8e5 283static void unsuspend_all_lwps (struct lwp_info *except);
e8a625d1
PA
284static void mark_lwp_dead (struct lwp_info *lwp, int wstat,
285 bool thread_event);
00db26fa 286static int lwp_is_marked_dead (struct lwp_info *lwp);
d50171e4 287static int kill_lwp (unsigned long lwpid, int signo);
863d01bd 288static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
ece66d65 289static int linux_low_ptrace_options (int attached);
ced2dffb 290static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
d50171e4 291
582511be
PA
292/* When the event-loop is doing a step-over, this points at the thread
293 being stepped. */
6bd434d6 294static ptid_t step_over_bkpt;
582511be 295
bf9ae9d8
TBA
296bool
297linux_process_target::low_supports_breakpoints ()
298{
299 return false;
300}
d50171e4 301
bf9ae9d8
TBA
302CORE_ADDR
303linux_process_target::low_get_pc (regcache *regcache)
304{
305 return 0;
306}
307
308void
309linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
d50171e4 310{
bf9ae9d8 311 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
d50171e4 312}
0d62e5e8 313
7582c77c
TBA
314std::vector<CORE_ADDR>
315linux_process_target::low_get_next_pcs (regcache *regcache)
316{
317 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
318 "implemented");
319}
320
d4807ea2
TBA
321int
322linux_process_target::low_decr_pc_after_break ()
323{
324 return 0;
325}
326
c2d6af84
PA
327/* True if LWP is stopped in its stepping range. */
328
329static int
330lwp_in_step_range (struct lwp_info *lwp)
331{
332 CORE_ADDR pc = lwp->stop_pc;
333
334 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
335}
336
cdc8e9b2
JB
337/* The event pipe registered as a waitable file in the event loop. */
338static event_pipe linux_event_pipe;
bd99dc85
PA
339
340/* True if we're currently in async mode. */
cdc8e9b2 341#define target_is_async_p() (linux_event_pipe.is_open ())
bd99dc85 342
02fc4de7 343static void send_sigstop (struct lwp_info *lwp);
bd99dc85 344
d0722149
DE
345/* Return non-zero if HEADER is a 64-bit ELF file. */
346
347static int
214d508e 348elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 349{
214d508e
L
350 if (header->e_ident[EI_MAG0] == ELFMAG0
351 && header->e_ident[EI_MAG1] == ELFMAG1
352 && header->e_ident[EI_MAG2] == ELFMAG2
353 && header->e_ident[EI_MAG3] == ELFMAG3)
354 {
355 *machine = header->e_machine;
356 return header->e_ident[EI_CLASS] == ELFCLASS64;
357
358 }
359 *machine = EM_NONE;
360 return -1;
d0722149
DE
361}
362
363/* Return non-zero if FILE is a 64-bit ELF file,
364 zero if the file is not a 64-bit ELF file,
365 and -1 if the file is not accessible or doesn't exist. */
366
be07f1a2 367static int
214d508e 368elf_64_file_p (const char *file, unsigned int *machine)
d0722149 369{
957f3f49 370 Elf64_Ehdr header;
d0722149
DE
371 int fd;
372
373 fd = open (file, O_RDONLY);
374 if (fd < 0)
375 return -1;
376
377 if (read (fd, &header, sizeof (header)) != sizeof (header))
378 {
379 close (fd);
380 return 0;
381 }
382 close (fd);
383
214d508e 384 return elf_64_header_p (&header, machine);
d0722149
DE
385}
386
be07f1a2
PA
387/* Accepts an integer PID; Returns true if the executable PID is
388 running is a 64-bit ELF file.. */
389
390int
214d508e 391linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 392{
d8d2a3ee 393 char file[PATH_MAX];
be07f1a2
PA
394
395 sprintf (file, "/proc/%d/exe", pid);
214d508e 396 return elf_64_file_p (file, machine);
be07f1a2
PA
397}
398
fd000fb3
TBA
399void
400linux_process_target::delete_lwp (lwp_info *lwp)
bd99dc85 401{
fa96cb38
PA
402 struct thread_info *thr = get_lwp_thread (lwp);
403
c058728c 404 threads_debug_printf ("deleting %ld", lwpid_of (thr));
fa96cb38
PA
405
406 remove_thread (thr);
466eecee 407
fd000fb3 408 low_delete_thread (lwp->arch_private);
466eecee 409
013e3554 410 delete lwp;
bd99dc85
PA
411}
412
fd000fb3
TBA
413void
414linux_process_target::low_delete_thread (arch_lwp_info *info)
415{
416 /* Default implementation should be overridden if architecture-specific
417 info is being used. */
418 gdb_assert (info == nullptr);
419}
95954743 420
421490af
PA
421/* Open the /proc/PID/mem file for PROC. */
422
423static void
424open_proc_mem_file (process_info *proc)
425{
426 gdb_assert (proc->priv->mem_fd == -1);
427
428 char filename[64];
429 xsnprintf (filename, sizeof filename, "/proc/%d/mem", proc->pid);
430
431 proc->priv->mem_fd
432 = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
433}
434
fd000fb3 435process_info *
421490af 436linux_process_target::add_linux_process_no_mem_file (int pid, int attached)
95954743
PA
437{
438 struct process_info *proc;
439
95954743 440 proc = add_process (pid, attached);
8d749320 441 proc->priv = XCNEW (struct process_info_private);
95954743 442
fd000fb3 443 proc->priv->arch_private = low_new_process ();
421490af
PA
444 proc->priv->mem_fd = -1;
445
446 return proc;
447}
448
aa5ca48f 449
421490af
PA
450process_info *
451linux_process_target::add_linux_process (int pid, int attached)
452{
453 process_info *proc = add_linux_process_no_mem_file (pid, attached);
454 open_proc_mem_file (proc);
95954743
PA
455 return proc;
456}
457
f551c8ef
SM
458void
459linux_process_target::remove_linux_process (process_info *proc)
460{
461 if (proc->priv->mem_fd >= 0)
462 close (proc->priv->mem_fd);
463
464 this->low_delete_process (proc->priv->arch_private);
465
466 xfree (proc->priv);
467 proc->priv = nullptr;
468
469 remove_process (proc);
470}
471
fd000fb3
TBA
472arch_process_info *
473linux_process_target::low_new_process ()
474{
475 return nullptr;
476}
477
478void
479linux_process_target::low_delete_process (arch_process_info *info)
480{
481 /* Default implementation must be overridden if architecture-specific
482 info exists. */
483 gdb_assert (info == nullptr);
484}
485
486void
487linux_process_target::low_new_fork (process_info *parent, process_info *child)
488{
489 /* Nop. */
490}
491
797bcff5
TBA
492void
493linux_process_target::arch_setup_thread (thread_info *thread)
94585166 494{
24583e45
TBA
495 scoped_restore_current_thread restore_thread;
496 switch_to_thread (thread);
94585166 497
797bcff5 498 low_arch_setup ();
94585166
DB
499}
500
d16f3f6c
TBA
501int
502linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
503 int wstat)
24a09b5f 504{
c12a5089 505 client_state &cs = get_client_state ();
94585166 506 struct lwp_info *event_lwp = *orig_event_lwp;
89a5711c 507 int event = linux_ptrace_get_extended_event (wstat);
de0d863e 508 struct thread_info *event_thr = get_lwp_thread (event_lwp);
24a09b5f 509
183be222 510 gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
65706a29 511
82075af2
JS
512 /* All extended events we currently use are mid-syscall. Only
513 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
514 you have to be using PTRACE_SEIZE to get that. */
515 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
516
c269dbdb
DB
517 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
518 || (event == PTRACE_EVENT_CLONE))
24a09b5f
DJ
519 {
520 unsigned long new_pid;
05044653 521 int ret, status;
24a09b5f 522
de0d863e 523 /* Get the pid of the new lwp. */
d86d4aaf 524 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 525 &new_pid);
24a09b5f
DJ
526
527 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 528 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
529 {
530 /* The new child has a pending SIGSTOP. We can't affect it until it
531 hits the SIGSTOP, but we're already attached. */
532
97438e3f 533 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
534
535 if (ret == -1)
536 perror_with_name ("waiting for new child");
537 else if (ret != new_pid)
538 warning ("wait returned unexpected PID %d", ret);
da5898ce 539 else if (!WIFSTOPPED (status))
24a09b5f
DJ
540 warning ("wait returned unexpected status 0x%x", status);
541 }
542
393a6b59 543 if (debug_threads)
de0d863e 544 {
393a6b59
PA
545 debug_printf ("HEW: Got %s event from LWP %ld, new child is %ld\n",
546 (event == PTRACE_EVENT_FORK ? "fork"
547 : event == PTRACE_EVENT_VFORK ? "vfork"
548 : event == PTRACE_EVENT_CLONE ? "clone"
549 : "???"),
550 ptid_of (event_thr).lwp (),
551 new_pid);
552 }
553
554 ptid_t child_ptid = (event != PTRACE_EVENT_CLONE
555 ? ptid_t (new_pid, new_pid)
556 : ptid_t (ptid_of (event_thr).pid (), new_pid));
de0d863e 557
38065394
PA
558 process_info *child_proc = nullptr;
559
560 if (event != PTRACE_EVENT_CLONE)
561 {
562 /* Add the new process to the tables before we add the LWP.
563 We need to do this even if the new process will be
564 detached. See breakpoint cloning code further below. */
565 child_proc = add_linux_process (new_pid, 0);
566 }
567
393a6b59
PA
568 lwp_info *child_lwp = add_lwp (child_ptid);
569 gdb_assert (child_lwp != NULL);
570 child_lwp->stopped = 1;
571 if (event != PTRACE_EVENT_CLONE)
572 child_lwp->must_set_ptrace_flags = 1;
573 child_lwp->status_pending_p = 0;
de0d863e 574
393a6b59 575 thread_info *child_thr = get_lwp_thread (child_lwp);
de0d863e 576
393a6b59
PA
577 /* If we're suspending all threads, leave this one suspended
578 too. If the fork/clone parent is stepping over a breakpoint,
579 all other threads have been suspended already. Leave the
580 child suspended too. */
581 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
582 || event_lwp->bp_reinsert != 0)
583 {
584 threads_debug_printf ("leaving child suspended");
585 child_lwp->suspended = 1;
586 }
587
588 if (event_lwp->bp_reinsert != 0
589 && supports_software_single_step ()
590 && event == PTRACE_EVENT_VFORK)
591 {
592 /* If we leave single-step breakpoints there, child will
593 hit it, so uninsert single-step breakpoints from parent
594 (and child). Once vfork child is done, reinsert
595 them back to parent. */
596 uninsert_single_step_breakpoints (event_thr);
597 }
598
599 if (event != PTRACE_EVENT_CLONE)
600 {
38065394
PA
601 /* Clone the breakpoint lists of the parent. We need to do
602 this even if the new process will be detached, since we
603 will need the process object and the breakpoints to
604 remove any breakpoints from memory when we detach, and
605 the client side will access registers. */
de0d863e 606 gdb_assert (child_proc != NULL);
863d01bd 607
393a6b59 608 process_info *parent_proc = get_thread_process (event_thr);
de0d863e 609 child_proc->attached = parent_proc->attached;
2e7b624b 610
63c40ec7 611 clone_all_breakpoints (child_thr, event_thr);
de0d863e 612
51a948fd
AB
613 target_desc_up tdesc = allocate_target_description ();
614 copy_target_description (tdesc.get (), parent_proc->tdesc);
615 child_proc->tdesc = tdesc.release ();
de0d863e 616
3a8a0396 617 /* Clone arch-specific process data. */
fd000fb3 618 low_new_fork (parent_proc, child_proc);
393a6b59 619 }
3a8a0396 620
393a6b59
PA
621 /* Save fork/clone info in the parent thread. */
622 if (event == PTRACE_EVENT_FORK)
623 event_lwp->waitstatus.set_forked (child_ptid);
624 else if (event == PTRACE_EVENT_VFORK)
625 event_lwp->waitstatus.set_vforked (child_ptid);
626 else if (event == PTRACE_EVENT_CLONE
627 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
628 event_lwp->waitstatus.set_thread_cloned (child_ptid);
629
630 if (event != PTRACE_EVENT_CLONE
631 || (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
632 {
de0d863e 633 /* The status_pending field contains bits denoting the
393a6b59
PA
634 extended event, so when the pending event is handled, the
635 handler will look at lwp->waitstatus. */
de0d863e
DB
636 event_lwp->status_pending_p = 1;
637 event_lwp->status_pending = wstat;
638
393a6b59
PA
639 /* Link the threads until the parent's event is passed on to
640 GDB. */
641 event_lwp->relative = child_lwp;
642 child_lwp->relative = event_lwp;
de0d863e
DB
643 }
644
393a6b59
PA
645 /* If the parent thread is doing step-over with single-step
646 breakpoints, the list of single-step breakpoints are cloned
647 from the parent's. Remove them from the child process.
648 In case of vfork, we'll reinsert them back once vforked
649 child is done. */
650 if (event_lwp->bp_reinsert != 0
651 && supports_software_single_step ())
652 {
653 /* The child process is forked and stopped, so it is safe
654 to access its memory without stopping all other threads
655 from other processes. */
656 delete_single_step_breakpoints (child_thr);
e27d73f6 657
393a6b59
PA
658 gdb_assert (has_single_step_breakpoints (event_thr));
659 gdb_assert (!has_single_step_breakpoints (child_thr));
660 }
bde24c0a 661
da5898ce
DJ
662 /* Normally we will get the pending SIGSTOP. But in some cases
663 we might get another signal delivered to the group first.
f21cc1a2 664 If we do get another signal, be sure not to lose it. */
20ba1ce6 665 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 666 {
393a6b59
PA
667 child_lwp->stop_expected = 1;
668 child_lwp->status_pending_p = 1;
669 child_lwp->status_pending = status;
da5898ce 670 }
393a6b59 671 else if (event == PTRACE_EVENT_CLONE && cs.report_thread_events)
65706a29 672 {
393a6b59
PA
673 child_lwp->waitstatus.set_thread_created ();
674 child_lwp->status_pending_p = 1;
675 child_lwp->status_pending = status;
65706a29 676 }
de0d863e 677
393a6b59
PA
678 if (event == PTRACE_EVENT_CLONE)
679 {
a0aad537 680#ifdef USE_THREAD_DB
393a6b59 681 thread_db_notice_clone (event_thr, child_ptid);
a0aad537 682#endif
393a6b59 683 }
86299109 684
393a6b59
PA
685 if (event == PTRACE_EVENT_CLONE
686 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) == 0)
687 {
688 threads_debug_printf
689 ("not reporting clone event from LWP %ld, new child is %ld\n",
690 ptid_of (event_thr).lwp (),
691 new_pid);
692 return 1;
693 }
694
695 /* Leave the child stopped until GDB processes the parent
696 event. */
697 child_thr->last_resume_kind = resume_stop;
698 child_thr->last_status.set_stopped (GDB_SIGNAL_0);
699
700 /* Report the event. */
701 threads_debug_printf
702 ("reporting %s event from LWP %ld, new child is %ld\n",
703 (event == PTRACE_EVENT_FORK ? "fork"
704 : event == PTRACE_EVENT_VFORK ? "vfork"
705 : event == PTRACE_EVENT_CLONE ? "clone"
706 : "???"),
707 ptid_of (event_thr).lwp (),
708 new_pid);
709 return 0;
24a09b5f 710 }
c269dbdb
DB
711 else if (event == PTRACE_EVENT_VFORK_DONE)
712 {
183be222 713 event_lwp->waitstatus.set_vfork_done ();
c269dbdb 714
7582c77c 715 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
2e7b624b 716 {
3b9a79ef 717 reinsert_single_step_breakpoints (event_thr);
2e7b624b 718
3b9a79ef 719 gdb_assert (has_single_step_breakpoints (event_thr));
2e7b624b
YQ
720 }
721
c269dbdb
DB
722 /* Report the event. */
723 return 0;
724 }
c12a5089 725 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
94585166
DB
726 {
727 struct process_info *proc;
f27866ba 728 std::vector<int> syscalls_to_catch;
94585166
DB
729 ptid_t event_ptid;
730 pid_t event_pid;
731
c058728c
SM
732 threads_debug_printf ("Got exec event from LWP %ld",
733 lwpid_of (event_thr));
94585166
DB
734
735 /* Get the event ptid. */
736 event_ptid = ptid_of (event_thr);
e99b03dc 737 event_pid = event_ptid.pid ();
94585166 738
82075af2 739 /* Save the syscall list from the execing process. */
94585166 740 proc = get_thread_process (event_thr);
f27866ba 741 syscalls_to_catch = std::move (proc->syscalls_to_catch);
82075af2
JS
742
743 /* Delete the execing process and all its threads. */
d16f3f6c 744 mourn (proc);
24583e45 745 switch_to_thread (nullptr);
94585166
DB
746
747 /* Create a new process/lwp/thread. */
fd000fb3 748 proc = add_linux_process (event_pid, 0);
94585166
DB
749 event_lwp = add_lwp (event_ptid);
750 event_thr = get_lwp_thread (event_lwp);
751 gdb_assert (current_thread == event_thr);
797bcff5 752 arch_setup_thread (event_thr);
94585166
DB
753
754 /* Set the event status. */
183be222
SM
755 event_lwp->waitstatus.set_execd
756 (make_unique_xstrdup
757 (linux_proc_pid_to_exec_file (lwpid_of (event_thr))));
94585166
DB
758
759 /* Mark the exec status as pending. */
760 event_lwp->stopped = 1;
761 event_lwp->status_pending_p = 1;
762 event_lwp->status_pending = wstat;
763 event_thr->last_resume_kind = resume_continue;
183be222 764 event_thr->last_status.set_ignore ();
94585166 765
82075af2
JS
766 /* Update syscall state in the new lwp, effectively mid-syscall too. */
767 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
768
769 /* Restore the list to catch. Don't rely on the client, which is free
770 to avoid sending a new list when the architecture doesn't change.
771 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
f27866ba 772 proc->syscalls_to_catch = std::move (syscalls_to_catch);
82075af2 773
94585166
DB
774 /* Report the event. */
775 *orig_event_lwp = event_lwp;
776 return 0;
777 }
de0d863e 778
f34652de 779 internal_error (_("unknown ptrace event %d"), event);
24a09b5f
DJ
780}
781
df95181f
TBA
782CORE_ADDR
783linux_process_target::get_pc (lwp_info *lwp)
d50171e4 784{
a9deee17
PA
785 process_info *proc = get_thread_process (get_lwp_thread (lwp));
786 gdb_assert (!proc->starting_up);
d50171e4 787
bf9ae9d8 788 if (!low_supports_breakpoints ())
d50171e4
PA
789 return 0;
790
24583e45
TBA
791 scoped_restore_current_thread restore_thread;
792 switch_to_thread (get_lwp_thread (lwp));
d50171e4 793
a9deee17
PA
794 struct regcache *regcache = get_thread_regcache (current_thread, 1);
795 CORE_ADDR pc = low_get_pc (regcache);
d50171e4 796
c058728c 797 threads_debug_printf ("pc is 0x%lx", (long) pc);
d50171e4 798
d50171e4
PA
799 return pc;
800}
801
9eedd27d
TBA
802void
803linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
82075af2 804{
82075af2
JS
805 struct regcache *regcache;
806
24583e45
TBA
807 scoped_restore_current_thread restore_thread;
808 switch_to_thread (get_lwp_thread (lwp));
82075af2
JS
809
810 regcache = get_thread_regcache (current_thread, 1);
9eedd27d 811 low_get_syscall_trapinfo (regcache, sysno);
82075af2 812
c058728c 813 threads_debug_printf ("get_syscall_trapinfo sysno %d", *sysno);
82075af2
JS
814}
815
9eedd27d
TBA
816void
817linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
818{
819 /* By default, report an unknown system call number. */
820 *sysno = UNKNOWN_SYSCALL;
821}
822
df95181f
TBA
823bool
824linux_process_target::save_stop_reason (lwp_info *lwp)
0d62e5e8 825{
582511be
PA
826 CORE_ADDR pc;
827 CORE_ADDR sw_breakpoint_pc;
3e572f71
PA
828#if USE_SIGTRAP_SIGINFO
829 siginfo_t siginfo;
830#endif
d50171e4 831
bf9ae9d8 832 if (!low_supports_breakpoints ())
df95181f 833 return false;
0d62e5e8 834
a9deee17
PA
835 process_info *proc = get_thread_process (get_lwp_thread (lwp));
836 if (proc->starting_up)
837 {
838 /* Claim we have the stop PC so that the caller doesn't try to
839 fetch it itself. */
840 return true;
841 }
842
582511be 843 pc = get_pc (lwp);
d4807ea2 844 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
d50171e4 845
582511be 846 /* breakpoint_at reads from the current thread. */
24583e45
TBA
847 scoped_restore_current_thread restore_thread;
848 switch_to_thread (get_lwp_thread (lwp));
47c0c975 849
3e572f71
PA
850#if USE_SIGTRAP_SIGINFO
851 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
852 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
853 {
854 if (siginfo.si_signo == SIGTRAP)
855 {
e7ad2f14
PA
856 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
857 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 858 {
e7ad2f14
PA
859 /* The si_code is ambiguous on this arch -- check debug
860 registers. */
861 if (!check_stopped_by_watchpoint (lwp))
862 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
863 }
864 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
865 {
866 /* If we determine the LWP stopped for a SW breakpoint,
867 trust it. Particularly don't check watchpoint
868 registers, because at least on s390, we'd find
869 stopped-by-watchpoint as long as there's a watchpoint
870 set. */
3e572f71 871 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
3e572f71 872 }
e7ad2f14 873 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 874 {
e7ad2f14
PA
875 /* This can indicate either a hardware breakpoint or
876 hardware watchpoint. Check debug registers. */
877 if (!check_stopped_by_watchpoint (lwp))
878 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
3e572f71 879 }
2bf6fb9d
PA
880 else if (siginfo.si_code == TRAP_TRACE)
881 {
e7ad2f14
PA
882 /* We may have single stepped an instruction that
883 triggered a watchpoint. In that case, on some
884 architectures (such as x86), instead of TRAP_HWBKPT,
885 si_code indicates TRAP_TRACE, and we need to check
886 the debug registers separately. */
887 if (!check_stopped_by_watchpoint (lwp))
888 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
2bf6fb9d 889 }
3e572f71
PA
890 }
891 }
892#else
582511be
PA
893 /* We may have just stepped a breakpoint instruction. E.g., in
894 non-stop mode, GDB first tells the thread A to step a range, and
895 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
896 case we need to report the breakpoint PC. */
897 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
d7146cda 898 && low_breakpoint_at (sw_breakpoint_pc))
e7ad2f14
PA
899 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
900
901 if (hardware_breakpoint_inserted_here (pc))
902 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
903
904 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
905 check_stopped_by_watchpoint (lwp);
906#endif
907
908 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
582511be 909 {
c058728c
SM
910 threads_debug_printf
911 ("%s stopped by software breakpoint",
912 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
582511be
PA
913
914 /* Back up the PC if necessary. */
915 if (pc != sw_breakpoint_pc)
e7ad2f14 916 {
582511be
PA
917 struct regcache *regcache
918 = get_thread_regcache (current_thread, 1);
bf9ae9d8 919 low_set_pc (regcache, sw_breakpoint_pc);
582511be
PA
920 }
921
e7ad2f14
PA
922 /* Update this so we record the correct stop PC below. */
923 pc = sw_breakpoint_pc;
582511be 924 }
e7ad2f14 925 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
c058728c
SM
926 threads_debug_printf
927 ("%s stopped by hardware breakpoint",
928 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14 929 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
c058728c
SM
930 threads_debug_printf
931 ("%s stopped by hardware watchpoint",
932 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14 933 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
c058728c
SM
934 threads_debug_printf
935 ("%s stopped by trace",
936 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
e7ad2f14
PA
937
938 lwp->stop_pc = pc;
df95181f 939 return true;
0d62e5e8 940}
ce3a066d 941
fd000fb3
TBA
942lwp_info *
943linux_process_target::add_lwp (ptid_t ptid)
611cb4a5 944{
c360a473 945 lwp_info *lwp = new lwp_info;
0d62e5e8 946
754e3168
AH
947 lwp->thread = add_thread (ptid, lwp);
948
fd000fb3 949 low_new_thread (lwp);
aa5ca48f 950
54a0b537 951 return lwp;
0d62e5e8 952}
611cb4a5 953
fd000fb3
TBA
954void
955linux_process_target::low_new_thread (lwp_info *info)
956{
957 /* Nop. */
958}
959
2090129c
SDJ
960/* Callback to be used when calling fork_inferior, responsible for
961 actually initiating the tracing of the inferior. */
962
963static void
964linux_ptrace_fun ()
965{
966 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
967 (PTRACE_TYPE_ARG4) 0) < 0)
50fa3001 968 trace_start_error_with_name ("ptrace");
2090129c
SDJ
969
970 if (setpgid (0, 0) < 0)
971 trace_start_error_with_name ("setpgid");
972
973 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
974 stdout to stderr so that inferior i/o doesn't corrupt the connection.
975 Also, redirect stdin to /dev/null. */
976 if (remote_connection_is_stdio ())
977 {
978 if (close (0) < 0)
979 trace_start_error_with_name ("close");
980 if (open ("/dev/null", O_RDONLY) < 0)
981 trace_start_error_with_name ("open");
982 if (dup2 (2, 1) < 0)
983 trace_start_error_with_name ("dup2");
984 if (write (2, "stdin/stdout redirected\n",
985 sizeof ("stdin/stdout redirected\n") - 1) < 0)
986 {
987 /* Errors ignored. */;
988 }
989 }
990}
991
da6d8c04 992/* Start an inferior process and returns its pid.
2090129c
SDJ
993 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
994 are its arguments. */
da6d8c04 995
15295543
TBA
996int
997linux_process_target::create_inferior (const char *program,
998 const std::vector<char *> &program_args)
da6d8c04 999{
c12a5089 1000 client_state &cs = get_client_state ();
a6dbe5df 1001 struct lwp_info *new_lwp;
da6d8c04 1002 int pid;
95954743 1003 ptid_t ptid;
03583c20 1004
41272101
TT
1005 {
1006 maybe_disable_address_space_randomization restore_personality
c12a5089 1007 (cs.disable_randomization);
bea571eb 1008 std::string str_program_args = construct_inferior_arguments (program_args);
41272101
TT
1009
1010 pid = fork_inferior (program,
1011 str_program_args.c_str (),
1012 get_environ ()->envp (), linux_ptrace_fun,
1013 NULL, NULL, NULL, NULL);
1014 }
03583c20 1015
421490af
PA
1016 /* When spawning a new process, we can't open the mem file yet. We
1017 still have to nurse the process through the shell, and that execs
1018 a couple times. The address space a /proc/PID/mem file is
1019 accessing is destroyed on exec. */
1020 process_info *proc = add_linux_process_no_mem_file (pid, 0);
95954743 1021
184ea2f7 1022 ptid = ptid_t (pid, pid);
95954743 1023 new_lwp = add_lwp (ptid);
a6dbe5df 1024 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 1025
2090129c
SDJ
1026 post_fork_inferior (pid, program);
1027
421490af
PA
1028 /* PROC is now past the shell running the program we want, so we can
1029 open the /proc/PID/mem file. */
1030 open_proc_mem_file (proc);
1031
a9fa9f7d 1032 return pid;
da6d8c04
DJ
1033}
1034
ece66d65
JS
1035/* Implement the post_create_inferior target_ops method. */
1036
6dee9afb
TBA
1037void
1038linux_process_target::post_create_inferior ()
ece66d65
JS
1039{
1040 struct lwp_info *lwp = get_thread_lwp (current_thread);
1041
797bcff5 1042 low_arch_setup ();
ece66d65
JS
1043
1044 if (lwp->must_set_ptrace_flags)
1045 {
1046 struct process_info *proc = current_process ();
1047 int options = linux_low_ptrace_options (proc->attached);
1048
1049 linux_enable_event_reporting (lwpid_of (current_thread), options);
1050 lwp->must_set_ptrace_flags = 0;
1051 }
1052}
1053
7ae1a6a6 1054int
fd000fb3 1055linux_process_target::attach_lwp (ptid_t ptid)
da6d8c04 1056{
54a0b537 1057 struct lwp_info *new_lwp;
e38504b3 1058 int lwpid = ptid.lwp ();
611cb4a5 1059
b8e1b30e 1060 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 1061 != 0)
7ae1a6a6 1062 return errno;
24a09b5f 1063
b3312d80 1064 new_lwp = add_lwp (ptid);
0d62e5e8 1065
a6dbe5df
PA
1066 /* We need to wait for SIGSTOP before being able to make the next
1067 ptrace call on this LWP. */
1068 new_lwp->must_set_ptrace_flags = 1;
1069
644cebc9 1070 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2 1071 {
c058728c 1072 threads_debug_printf ("Attached to a stopped process");
c14d7ab2
PA
1073
1074 /* The process is definitely stopped. It is in a job control
1075 stop, unless the kernel predates the TASK_STOPPED /
1076 TASK_TRACED distinction, in which case it might be in a
1077 ptrace stop. Make sure it is in a ptrace stop; from there we
1078 can kill it, signal it, et cetera.
1079
1080 First make sure there is a pending SIGSTOP. Since we are
1081 already attached, the process can not transition from stopped
1082 to running without a PTRACE_CONT; so we know this signal will
1083 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1084 probably already in the queue (unless this kernel is old
1085 enough to use TASK_STOPPED for ptrace stops); but since
1086 SIGSTOP is not an RT signal, it can only be queued once. */
1087 kill_lwp (lwpid, SIGSTOP);
1088
1089 /* Finally, resume the stopped process. This will deliver the
1090 SIGSTOP (or a higher priority signal, just like normal
1091 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 1092 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
1093 }
1094
0d62e5e8 1095 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
1096 brings it to a halt.
1097
1098 There are several cases to consider here:
1099
1100 1) gdbserver has already attached to the process and is being notified
1b3f6016 1101 of a new thread that is being created.
d50171e4
PA
1102 In this case we should ignore that SIGSTOP and resume the
1103 process. This is handled below by setting stop_expected = 1,
8336d594 1104 and the fact that add_thread sets last_resume_kind ==
d50171e4 1105 resume_continue.
0e21c1ec
DE
1106
1107 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
1108 to it via attach_inferior.
1109 In this case we want the process thread to stop.
d50171e4
PA
1110 This is handled by having linux_attach set last_resume_kind ==
1111 resume_stop after we return.
e3deef73
LM
1112
1113 If the pid we are attaching to is also the tgid, we attach to and
1114 stop all the existing threads. Otherwise, we attach to pid and
1115 ignore any other threads in the same group as this pid.
0e21c1ec
DE
1116
1117 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
1118 existing threads.
1119 In this case we want the thread to stop.
1120 FIXME: This case is currently not properly handled.
1121 We should wait for the SIGSTOP but don't. Things work apparently
1122 because enough time passes between when we ptrace (ATTACH) and when
1123 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
1124
1125 On the other hand, if we are currently trying to stop all threads, we
1126 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 1127 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
1128 end of the list, and so the new thread has not yet reached
1129 wait_for_sigstop (but will). */
d50171e4 1130 new_lwp->stop_expected = 1;
0d62e5e8 1131
7ae1a6a6 1132 return 0;
95954743
PA
1133}
1134
8784d563
PA
1135/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1136 already attached. Returns true if a new LWP is found, false
1137 otherwise. */
1138
1139static int
1140attach_proc_task_lwp_callback (ptid_t ptid)
1141{
1142 /* Is this a new thread? */
1143 if (find_thread_ptid (ptid) == NULL)
1144 {
e38504b3 1145 int lwpid = ptid.lwp ();
8784d563
PA
1146 int err;
1147
c058728c 1148 threads_debug_printf ("Found new lwp %d", lwpid);
8784d563 1149
fd000fb3 1150 err = the_linux_target->attach_lwp (ptid);
8784d563
PA
1151
1152 /* Be quiet if we simply raced with the thread exiting. EPERM
1153 is returned if the thread's task still exists, and is marked
1154 as exited or zombie, as well as other conditions, so in that
1155 case, confirm the status in /proc/PID/status. */
1156 if (err == ESRCH
1157 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
c058728c
SM
1158 threads_debug_printf
1159 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1160 lwpid, err, safe_strerror (err));
8784d563
PA
1161 else if (err != 0)
1162 {
4d9b86e1 1163 std::string reason
50fa3001 1164 = linux_ptrace_attach_fail_reason_string (ptid, err);
4d9b86e1 1165
c6f7f9c8 1166 error (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
8784d563
PA
1167 }
1168
1169 return 1;
1170 }
1171 return 0;
1172}
1173
500c1d85
PA
1174static void async_file_mark (void);
1175
e3deef73
LM
1176/* Attach to PID. If PID is the tgid, attach to it and all
1177 of its threads. */
1178
ef03dad8
TBA
1179int
1180linux_process_target::attach (unsigned long pid)
0d62e5e8 1181{
500c1d85
PA
1182 struct process_info *proc;
1183 struct thread_info *initial_thread;
184ea2f7 1184 ptid_t ptid = ptid_t (pid, pid);
7ae1a6a6
PA
1185 int err;
1186
421490af
PA
1187 /* Delay opening the /proc/PID/mem file until we've successfully
1188 attached. */
1189 proc = add_linux_process_no_mem_file (pid, 1);
df0da8a2 1190
e3deef73
LM
1191 /* Attach to PID. We will check for other threads
1192 soon. */
fd000fb3 1193 err = attach_lwp (ptid);
7ae1a6a6 1194 if (err != 0)
4d9b86e1 1195 {
f551c8ef 1196 this->remove_linux_process (proc);
4d9b86e1 1197
50fa3001
SDJ
1198 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1199 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
4d9b86e1 1200 }
7ae1a6a6 1201
421490af
PA
1202 open_proc_mem_file (proc);
1203
500c1d85
PA
1204 /* Don't ignore the initial SIGSTOP if we just attached to this
1205 process. It will be collected by wait shortly. */
184ea2f7 1206 initial_thread = find_thread_ptid (ptid_t (pid, pid));
59487af3 1207 gdb_assert (initial_thread != nullptr);
500c1d85 1208 initial_thread->last_resume_kind = resume_stop;
0d62e5e8 1209
8784d563
PA
1210 /* We must attach to every LWP. If /proc is mounted, use that to
1211 find them now. On the one hand, the inferior may be using raw
1212 clone instead of using pthreads. On the other hand, even if it
1213 is using pthreads, GDB may not be connected yet (thread_db needs
1214 to do symbol lookups, through qSymbol). Also, thread_db walks
1215 structures in the inferior's address space to find the list of
1216 threads/LWPs, and those structures may well be corrupted. Note
1217 that once thread_db is loaded, we'll still use it to list threads
1218 and associate pthread info with each LWP. */
c6f7f9c8
TT
1219 try
1220 {
1221 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1222 }
1223 catch (const gdb_exception_error &)
1224 {
1225 /* Make sure we do not deliver the SIGSTOP to the process. */
1226 initial_thread->last_resume_kind = resume_continue;
1227
1228 this->detach (proc);
1229 throw;
1230 }
500c1d85
PA
1231
1232 /* GDB will shortly read the xml target description for this
1233 process, to figure out the process' architecture. But the target
1234 description is only filled in when the first process/thread in
1235 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1236 that now, otherwise, if GDB is fast enough, it could read the
1237 target description _before_ that initial stop. */
1238 if (non_stop)
1239 {
1240 struct lwp_info *lwp;
1241 int wstat, lwpid;
f2907e49 1242 ptid_t pid_ptid = ptid_t (pid);
500c1d85 1243
d16f3f6c 1244 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
500c1d85
PA
1245 gdb_assert (lwpid > 0);
1246
f2907e49 1247 lwp = find_lwp_pid (ptid_t (lwpid));
59487af3 1248 gdb_assert (lwp != nullptr);
500c1d85
PA
1249
1250 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1251 {
1252 lwp->status_pending_p = 1;
1253 lwp->status_pending = wstat;
1254 }
1255
1256 initial_thread->last_resume_kind = resume_continue;
1257
1258 async_file_mark ();
1259
1260 gdb_assert (proc->tdesc != NULL);
1261 }
1262
95954743
PA
1263 return 0;
1264}
1265
95954743 1266static int
e4eb0dec 1267last_thread_of_process_p (int pid)
95954743 1268{
e4eb0dec 1269 bool seen_one = false;
95954743 1270
da4ae14a 1271 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
95954743 1272 {
e4eb0dec
SM
1273 if (!seen_one)
1274 {
1275 /* This is the first thread of this process we see. */
1276 seen_one = true;
1277 return false;
1278 }
1279 else
1280 {
1281 /* This is the second thread of this process we see. */
1282 return true;
1283 }
1284 });
da6d8c04 1285
e4eb0dec 1286 return thread == NULL;
95954743
PA
1287}
1288
da84f473
PA
1289/* Kill LWP. */
1290
1291static void
1292linux_kill_one_lwp (struct lwp_info *lwp)
1293{
d86d4aaf
DE
1294 struct thread_info *thr = get_lwp_thread (lwp);
1295 int pid = lwpid_of (thr);
da84f473
PA
1296
1297 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1298 there is no signal context, and ptrace(PTRACE_KILL) (or
1299 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1300 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1301 alternative is to kill with SIGKILL. We only need one SIGKILL
1302 per process, not one for each thread. But since we still support
4a6ed09b
PA
1303 support debugging programs using raw clone without CLONE_THREAD,
1304 we send one for each thread. For years, we used PTRACE_KILL
1305 only, so we're being a bit paranoid about some old kernels where
1306 PTRACE_KILL might work better (dubious if there are any such, but
1307 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1308 second, and so we're fine everywhere. */
da84f473
PA
1309
1310 errno = 0;
69ff6be5 1311 kill_lwp (pid, SIGKILL);
da84f473 1312 if (debug_threads)
ce9e3fe7
PA
1313 {
1314 int save_errno = errno;
1315
c058728c
SM
1316 threads_debug_printf ("kill_lwp (SIGKILL) %s, 0, 0 (%s)",
1317 target_pid_to_str (ptid_of (thr)).c_str (),
1318 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1319 }
da84f473
PA
1320
1321 errno = 0;
b8e1b30e 1322 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 1323 if (debug_threads)
ce9e3fe7
PA
1324 {
1325 int save_errno = errno;
1326
c058728c
SM
1327 threads_debug_printf ("PTRACE_KILL %s, 0, 0 (%s)",
1328 target_pid_to_str (ptid_of (thr)).c_str (),
1329 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1330 }
da84f473
PA
1331}
1332
e76126e8
PA
1333/* Kill LWP and wait for it to die. */
1334
1335static void
1336kill_wait_lwp (struct lwp_info *lwp)
1337{
1338 struct thread_info *thr = get_lwp_thread (lwp);
e99b03dc 1339 int pid = ptid_of (thr).pid ();
e38504b3 1340 int lwpid = ptid_of (thr).lwp ();
e76126e8
PA
1341 int wstat;
1342 int res;
1343
c058728c 1344 threads_debug_printf ("killing lwp %d, for pid: %d", lwpid, pid);
e76126e8
PA
1345
1346 do
1347 {
1348 linux_kill_one_lwp (lwp);
1349
1350 /* Make sure it died. Notes:
1351
1352 - The loop is most likely unnecessary.
1353
d16f3f6c 1354 - We don't use wait_for_event as that could delete lwps
e76126e8
PA
1355 while we're iterating over them. We're not interested in
1356 any pending status at this point, only in making sure all
1357 wait status on the kernel side are collected until the
1358 process is reaped.
1359
1360 - We don't use __WALL here as the __WALL emulation relies on
1361 SIGCHLD, and killing a stopped process doesn't generate
1362 one, nor an exit status.
1363 */
1364 res = my_waitpid (lwpid, &wstat, 0);
1365 if (res == -1 && errno == ECHILD)
1366 res = my_waitpid (lwpid, &wstat, __WCLONE);
1367 } while (res > 0 && WIFSTOPPED (wstat));
1368
586b02a9
PA
1369 /* Even if it was stopped, the child may have already disappeared.
1370 E.g., if it was killed by SIGKILL. */
1371 if (res < 0 && errno != ECHILD)
1372 perror_with_name ("kill_wait_lwp");
e76126e8
PA
1373}
1374
578290ec 1375/* Callback for `for_each_thread'. Kills an lwp of a given process,
da84f473 1376 except the leader. */
95954743 1377
578290ec
SM
1378static void
1379kill_one_lwp_callback (thread_info *thread, int pid)
da6d8c04 1380{
54a0b537 1381 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 1382
fd500816
DJ
1383 /* We avoid killing the first thread here, because of a Linux kernel (at
1384 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1385 the children get a chance to be reaped, it will remain a zombie
1386 forever. */
95954743 1387
d86d4aaf 1388 if (lwpid_of (thread) == pid)
95954743 1389 {
c058728c
SM
1390 threads_debug_printf ("is last of process %s",
1391 target_pid_to_str (thread->id).c_str ());
578290ec 1392 return;
95954743 1393 }
fd500816 1394
e76126e8 1395 kill_wait_lwp (lwp);
da6d8c04
DJ
1396}
1397
c6885a57
TBA
1398int
1399linux_process_target::kill (process_info *process)
0d62e5e8 1400{
a780ef4f 1401 int pid = process->pid;
9d606399 1402
f9e39928
PA
1403 /* If we're killing a running inferior, make sure it is stopped
1404 first, as PTRACE_KILL will not work otherwise. */
7984d532 1405 stop_all_lwps (0, NULL);
f9e39928 1406
578290ec
SM
1407 for_each_thread (pid, [&] (thread_info *thread)
1408 {
1409 kill_one_lwp_callback (thread, pid);
1410 });
fd500816 1411
54a0b537 1412 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1413 thread in the list, so do so now. */
a780ef4f 1414 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
bd99dc85 1415
784867a5 1416 if (lwp == NULL)
c058728c 1417 threads_debug_printf ("cannot find lwp for pid: %d", pid);
784867a5 1418 else
e76126e8 1419 kill_wait_lwp (lwp);
2d717e4f 1420
8adb37b9 1421 mourn (process);
f9e39928
PA
1422
1423 /* Since we presently can only stop all lwps of all processes, we
1424 need to unstop lwps of other processes. */
7984d532 1425 unstop_all_lwps (0, NULL);
95954743 1426 return 0;
0d62e5e8
DJ
1427}
1428
9b224c5e
PA
1429/* Get pending signal of THREAD, for detaching purposes. This is the
1430 signal the thread last stopped for, which we need to deliver to the
1431 thread when detaching, otherwise, it'd be suppressed/lost. */
1432
1433static int
1434get_detach_signal (struct thread_info *thread)
1435{
c12a5089 1436 client_state &cs = get_client_state ();
a493e3e2 1437 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1438 int status;
1439 struct lwp_info *lp = get_thread_lwp (thread);
1440
1441 if (lp->status_pending_p)
1442 status = lp->status_pending;
1443 else
1444 {
1445 /* If the thread had been suspended by gdbserver, and it stopped
1446 cleanly, then it'll have stopped with SIGSTOP. But we don't
1447 want to deliver that SIGSTOP. */
183be222
SM
1448 if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1449 || thread->last_status.sig () == GDB_SIGNAL_0)
9b224c5e
PA
1450 return 0;
1451
1452 /* Otherwise, we may need to deliver the signal we
1453 intercepted. */
1454 status = lp->last_status;
1455 }
1456
1457 if (!WIFSTOPPED (status))
1458 {
c058728c
SM
1459 threads_debug_printf ("lwp %s hasn't stopped: no pending signal",
1460 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e
PA
1461 return 0;
1462 }
1463
1464 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1465 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e 1466 {
c058728c
SM
1467 threads_debug_printf ("lwp %s had stopped with extended "
1468 "status: no pending signal",
1469 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e
PA
1470 return 0;
1471 }
1472
2ea28649 1473 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e 1474
c12a5089 1475 if (cs.program_signals_p && !cs.program_signals[signo])
9b224c5e 1476 {
c058728c
SM
1477 threads_debug_printf ("lwp %s had signal %s, but it is in nopass state",
1478 target_pid_to_str (ptid_of (thread)).c_str (),
1479 gdb_signal_to_string (signo));
9b224c5e
PA
1480 return 0;
1481 }
c12a5089 1482 else if (!cs.program_signals_p
9b224c5e
PA
1483 /* If we have no way to know which signals GDB does not
1484 want to have passed to the program, assume
1485 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1486 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e 1487 {
c058728c
SM
1488 threads_debug_printf ("lwp %s had signal %s, "
1489 "but we don't know if we should pass it. "
1490 "Default to not.",
1491 target_pid_to_str (ptid_of (thread)).c_str (),
1492 gdb_signal_to_string (signo));
9b224c5e
PA
1493 return 0;
1494 }
1495 else
1496 {
c058728c
SM
1497 threads_debug_printf ("lwp %s has pending signal %s: delivering it",
1498 target_pid_to_str (ptid_of (thread)).c_str (),
1499 gdb_signal_to_string (signo));
9b224c5e
PA
1500
1501 return WSTOPSIG (status);
1502 }
1503}
1504
fd000fb3
TBA
1505void
1506linux_process_target::detach_one_lwp (lwp_info *lwp)
6ad8ae5c 1507{
ced2dffb 1508 struct thread_info *thread = get_lwp_thread (lwp);
9b224c5e 1509 int sig;
ced2dffb 1510 int lwpid;
6ad8ae5c 1511
9b224c5e 1512 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1513 if (lwp->stop_expected)
ae13219e 1514 {
c058728c
SM
1515 threads_debug_printf ("Sending SIGCONT to %s",
1516 target_pid_to_str (ptid_of (thread)).c_str ());
9b224c5e 1517
d86d4aaf 1518 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1519 lwp->stop_expected = 0;
ae13219e
DJ
1520 }
1521
9b224c5e
PA
1522 /* Pass on any pending signal for this thread. */
1523 sig = get_detach_signal (thread);
1524
ced2dffb
PA
1525 /* Preparing to resume may try to write registers, and fail if the
1526 lwp is zombie. If that happens, ignore the error. We'll handle
1527 it below, when detach fails with ESRCH. */
a70b8144 1528 try
ced2dffb
PA
1529 {
1530 /* Flush any pending changes to the process's registers. */
1531 regcache_invalidate_thread (thread);
1532
1533 /* Finally, let it resume. */
d7599cc0 1534 low_prepare_to_resume (lwp);
ced2dffb 1535 }
230d2906 1536 catch (const gdb_exception_error &ex)
ced2dffb
PA
1537 {
1538 if (!check_ptrace_stopped_lwp_gone (lwp))
eedc3f4f 1539 throw;
ced2dffb 1540 }
ced2dffb
PA
1541
1542 lwpid = lwpid_of (thread);
1543 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1544 (PTRACE_TYPE_ARG4) (long) sig) < 0)
ced2dffb
PA
1545 {
1546 int save_errno = errno;
1547
1548 /* We know the thread exists, so ESRCH must mean the lwp is
1549 zombie. This can happen if one of the already-detached
1550 threads exits the whole thread group. In that case we're
1551 still attached, and must reap the lwp. */
1552 if (save_errno == ESRCH)
1553 {
1554 int ret, status;
1555
1556 ret = my_waitpid (lwpid, &status, __WALL);
1557 if (ret == -1)
1558 {
1559 warning (_("Couldn't reap LWP %d while detaching: %s"),
6d91ce9a 1560 lwpid, safe_strerror (errno));
ced2dffb
PA
1561 }
1562 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1563 {
1564 warning (_("Reaping LWP %d while detaching "
1565 "returned unexpected status 0x%x"),
1566 lwpid, status);
1567 }
1568 }
1569 else
1570 {
1571 error (_("Can't detach %s: %s"),
61d7f128 1572 target_pid_to_str (ptid_of (thread)).c_str (),
6d91ce9a 1573 safe_strerror (save_errno));
ced2dffb
PA
1574 }
1575 }
c058728c
SM
1576 else
1577 threads_debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)",
1578 target_pid_to_str (ptid_of (thread)).c_str (),
1579 strsignal (sig));
bd99dc85
PA
1580
1581 delete_lwp (lwp);
ced2dffb
PA
1582}
1583
9061c9cf
TBA
1584int
1585linux_process_target::detach (process_info *process)
95954743 1586{
ced2dffb 1587 struct lwp_info *main_lwp;
95954743 1588
863d01bd
PA
1589 /* As there's a step over already in progress, let it finish first,
1590 otherwise nesting a stabilize_threads operation on top gets real
1591 messy. */
1592 complete_ongoing_step_over ();
1593
f9e39928 1594 /* Stop all threads before detaching. First, ptrace requires that
30baf67b 1595 the thread is stopped to successfully detach. Second, thread_db
f9e39928
PA
1596 may need to uninstall thread event breakpoints from memory, which
1597 only works with a stopped process anyway. */
7984d532 1598 stop_all_lwps (0, NULL);
f9e39928 1599
ca5c370d 1600#ifdef USE_THREAD_DB
8336d594 1601 thread_db_detach (process);
ca5c370d
PA
1602#endif
1603
fa593d66 1604 /* Stabilize threads (move out of jump pads). */
5c9eb2f2 1605 target_stabilize_threads ();
fa593d66 1606
ced2dffb
PA
1607 /* Detach from the clone lwps first. If the thread group exits just
1608 while we're detaching, we must reap the clone lwps before we're
1609 able to reap the leader. */
fd000fb3
TBA
1610 for_each_thread (process->pid, [this] (thread_info *thread)
1611 {
1612 /* We don't actually detach from the thread group leader just yet.
1613 If the thread group exits, we must reap the zombie clone lwps
1614 before we're able to reap the leader. */
1615 if (thread->id.pid () == thread->id.lwp ())
1616 return;
1617
1618 lwp_info *lwp = get_thread_lwp (thread);
1619 detach_one_lwp (lwp);
1620 });
ced2dffb 1621
ef2ddb33 1622 main_lwp = find_lwp_pid (ptid_t (process->pid));
59487af3 1623 gdb_assert (main_lwp != nullptr);
fd000fb3 1624 detach_one_lwp (main_lwp);
8336d594 1625
8adb37b9 1626 mourn (process);
f9e39928
PA
1627
1628 /* Since we presently can only stop all lwps of all processes, we
1629 need to unstop lwps of other processes. */
7984d532 1630 unstop_all_lwps (0, NULL);
f9e39928
PA
1631 return 0;
1632}
1633
1634/* Remove all LWPs that belong to process PROC from the lwp list. */
1635
8adb37b9
TBA
1636void
1637linux_process_target::mourn (process_info *process)
8336d594 1638{
8336d594
PA
1639#ifdef USE_THREAD_DB
1640 thread_db_mourn (process);
1641#endif
1642
fd000fb3 1643 for_each_thread (process->pid, [this] (thread_info *thread)
6b2a85da
SM
1644 {
1645 delete_lwp (get_thread_lwp (thread));
1646 });
f9e39928 1647
f551c8ef 1648 this->remove_linux_process (process);
8336d594
PA
1649}
1650
95a49a39
TBA
1651void
1652linux_process_target::join (int pid)
444d6139 1653{
444d6139
PA
1654 int status, ret;
1655
1656 do {
d105de22 1657 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1658 if (WIFEXITED (status) || WIFSIGNALED (status))
1659 break;
1660 } while (ret != -1 || errno != ECHILD);
1661}
1662
13d3d99b
TBA
1663/* Return true if the given thread is still alive. */
1664
1665bool
1666linux_process_target::thread_alive (ptid_t ptid)
0d62e5e8 1667{
95954743
PA
1668 struct lwp_info *lwp = find_lwp_pid (ptid);
1669
1670 /* We assume we always know if a thread exits. If a whole process
1671 exited but we still haven't been able to report it to GDB, we'll
1672 hold on to the last lwp of the dead process. */
1673 if (lwp != NULL)
00db26fa 1674 return !lwp_is_marked_dead (lwp);
0d62e5e8
DJ
1675 else
1676 return 0;
1677}
1678
df95181f
TBA
1679bool
1680linux_process_target::thread_still_has_status_pending (thread_info *thread)
582511be
PA
1681{
1682 struct lwp_info *lp = get_thread_lwp (thread);
1683
1684 if (!lp->status_pending_p)
1685 return 0;
1686
582511be 1687 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1688 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1689 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be 1690 {
582511be
PA
1691 CORE_ADDR pc;
1692 int discard = 0;
1693
1694 gdb_assert (lp->last_status != 0);
1695
1696 pc = get_pc (lp);
1697
24583e45
TBA
1698 scoped_restore_current_thread restore_thread;
1699 switch_to_thread (thread);
582511be
PA
1700
1701 if (pc != lp->stop_pc)
1702 {
c058728c
SM
1703 threads_debug_printf ("PC of %ld changed",
1704 lwpid_of (thread));
582511be
PA
1705 discard = 1;
1706 }
3e572f71
PA
1707
1708#if !USE_SIGTRAP_SIGINFO
15c66dd6 1709 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
d7146cda 1710 && !low_breakpoint_at (pc))
582511be 1711 {
c058728c
SM
1712 threads_debug_printf ("previous SW breakpoint of %ld gone",
1713 lwpid_of (thread));
582511be
PA
1714 discard = 1;
1715 }
15c66dd6 1716 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1717 && !hardware_breakpoint_inserted_here (pc))
1718 {
c058728c
SM
1719 threads_debug_printf ("previous HW breakpoint of %ld gone",
1720 lwpid_of (thread));
582511be
PA
1721 discard = 1;
1722 }
3e572f71 1723#endif
582511be 1724
582511be
PA
1725 if (discard)
1726 {
c058728c 1727 threads_debug_printf ("discarding pending breakpoint status");
582511be
PA
1728 lp->status_pending_p = 0;
1729 return 0;
1730 }
1731 }
1732
1733 return 1;
1734}
1735
a681f9c9
PA
1736/* Returns true if LWP is resumed from the client's perspective. */
1737
1738static int
1739lwp_resumed (struct lwp_info *lwp)
1740{
1741 struct thread_info *thread = get_lwp_thread (lwp);
1742
1743 if (thread->last_resume_kind != resume_stop)
1744 return 1;
1745
1746 /* Did gdb send us a `vCont;t', but we haven't reported the
1747 corresponding stop to gdb yet? If so, the thread is still
1748 resumed/running from gdb's perspective. */
1749 if (thread->last_resume_kind == resume_stop
183be222 1750 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
a681f9c9
PA
1751 return 1;
1752
1753 return 0;
1754}
1755
df95181f
TBA
1756bool
1757linux_process_target::status_pending_p_callback (thread_info *thread,
1758 ptid_t ptid)
0d62e5e8 1759{
582511be 1760 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1761
1762 /* Check if we're only interested in events from a specific process
afa8d396 1763 or a specific LWP. */
83e1b6c1 1764 if (!thread->id.matches (ptid))
95954743 1765 return 0;
0d62e5e8 1766
a681f9c9
PA
1767 if (!lwp_resumed (lp))
1768 return 0;
1769
582511be 1770 if (lp->status_pending_p
df95181f 1771 && !thread_still_has_status_pending (thread))
582511be 1772 {
df95181f 1773 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
582511be
PA
1774 return 0;
1775 }
0d62e5e8 1776
582511be 1777 return lp->status_pending_p;
0d62e5e8
DJ
1778}
1779
95954743
PA
1780struct lwp_info *
1781find_lwp_pid (ptid_t ptid)
1782{
d4895ba2
SM
1783 long lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1784 thread_info *thread = find_thread ([lwp] (thread_info *thr_arg)
454296a2 1785 {
da4ae14a 1786 return thr_arg->id.lwp () == lwp;
454296a2 1787 });
d86d4aaf
DE
1788
1789 if (thread == NULL)
1790 return NULL;
1791
9c80ecd6 1792 return get_thread_lwp (thread);
95954743
PA
1793}
1794
fa96cb38 1795/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1796
fa96cb38
PA
1797static int
1798num_lwps (int pid)
1799{
fa96cb38 1800 int count = 0;
0d62e5e8 1801
4d3bb80e
SM
1802 for_each_thread (pid, [&] (thread_info *thread)
1803 {
9c80ecd6 1804 count++;
4d3bb80e 1805 });
3aee8918 1806
fa96cb38
PA
1807 return count;
1808}
d61ddec4 1809
6d4ee8c6
GB
1810/* See nat/linux-nat.h. */
1811
1812struct lwp_info *
1813iterate_over_lwps (ptid_t filter,
d3a70e03 1814 gdb::function_view<iterate_over_lwps_ftype> callback)
6d4ee8c6 1815{
da4ae14a 1816 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
6d1e5673 1817 {
da4ae14a 1818 lwp_info *lwp = get_thread_lwp (thr_arg);
6d1e5673 1819
d3a70e03 1820 return callback (lwp);
6d1e5673 1821 });
6d4ee8c6 1822
9c80ecd6 1823 if (thread == NULL)
6d4ee8c6
GB
1824 return NULL;
1825
9c80ecd6 1826 return get_thread_lwp (thread);
6d4ee8c6
GB
1827}
1828
e8a625d1 1829bool
fd000fb3 1830linux_process_target::check_zombie_leaders ()
fa96cb38 1831{
e8a625d1
PA
1832 bool new_pending_event = false;
1833
1834 for_each_process ([&] (process_info *proc)
aa40a989
PA
1835 {
1836 pid_t leader_pid = pid_of (proc);
1837 lwp_info *leader_lp = find_lwp_pid (ptid_t (leader_pid));
1838
1839 threads_debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1840 "num_lwps=%d, zombie=%d",
1841 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1842 linux_proc_pid_is_zombie (leader_pid));
1843
1844 if (leader_lp != NULL && !leader_lp->stopped
1845 /* Check if there are other threads in the group, as we may
8a841a35
PA
1846 have raced with the inferior simply exiting. Note this
1847 isn't a watertight check. If the inferior is
1848 multi-threaded and is exiting, it may be we see the
1849 leader as zombie before we reap all the non-leader
1850 threads. See comments below. */
aa40a989
PA
1851 && !last_thread_of_process_p (leader_pid)
1852 && linux_proc_pid_is_zombie (leader_pid))
1853 {
8a841a35
PA
1854 /* A zombie leader in a multi-threaded program can mean one
1855 of three things:
1856
1857 #1 - Only the leader exited, not the whole program, e.g.,
1858 with pthread_exit. Since we can't reap the leader's exit
1859 status until all other threads are gone and reaped too,
1860 we want to delete the zombie leader right away, as it
1861 can't be debugged, we can't read its registers, etc.
1862 This is the main reason we check for zombie leaders
1863 disappearing.
1864
1865 #2 - The whole thread-group/process exited (a group exit,
1866 via e.g. exit(3), and there is (or will be shortly) an
1867 exit reported for each thread in the process, and then
1868 finally an exit for the leader once the non-leaders are
1869 reaped.
1870
1871 #3 - There are 3 or more threads in the group, and a
1872 thread other than the leader exec'd. See comments on
1873 exec events at the top of the file.
1874
1875 Ideally we would never delete the leader for case #2.
1876 Instead, we want to collect the exit status of each
1877 non-leader thread, and then finally collect the exit
1878 status of the leader as normal and use its exit code as
1879 whole-process exit code. Unfortunately, there's no
1880 race-free way to distinguish cases #1 and #2. We can't
1881 assume the exit events for the non-leaders threads are
1882 already pending in the kernel, nor can we assume the
1883 non-leader threads are in zombie state already. Between
1884 the leader becoming zombie and the non-leaders exiting
1885 and becoming zombie themselves, there's a small time
1886 window, so such a check would be racy. Temporarily
1887 pausing all threads and checking to see if all threads
1888 exit or not before re-resuming them would work in the
1889 case that all threads are running right now, but it
1890 wouldn't work if some thread is currently already
1891 ptrace-stopped, e.g., due to scheduler-locking.
1892
1893 So what we do is we delete the leader anyhow, and then
1894 later on when we see its exit status, we re-add it back.
1895 We also make sure that we only report a whole-process
1896 exit when we see the leader exiting, as opposed to when
1897 the last LWP in the LWP list exits, which can be a
1898 non-leader if we deleted the leader here. */
aa40a989 1899 threads_debug_printf ("Thread group leader %d zombie "
8a841a35
PA
1900 "(it exited, or another thread execd), "
1901 "deleting it.",
aa40a989 1902 leader_pid);
e8a625d1
PA
1903
1904 thread_info *leader_thread = get_lwp_thread (leader_lp);
1905 if (report_exit_events_for (leader_thread))
1906 {
1907 mark_lwp_dead (leader_lp, W_EXITCODE (0, 0), true);
1908 new_pending_event = true;
1909 }
1910 else
1911 delete_lwp (leader_lp);
aa40a989 1912 }
9179355e 1913 });
e8a625d1
PA
1914
1915 return new_pending_event;
fa96cb38 1916}
c3adc08c 1917
a1385b7b
SM
1918/* Callback for `find_thread'. Returns the first LWP that is not
1919 stopped. */
d50171e4 1920
a1385b7b
SM
1921static bool
1922not_stopped_callback (thread_info *thread, ptid_t filter)
fa96cb38 1923{
a1385b7b
SM
1924 if (!thread->id.matches (filter))
1925 return false;
47c0c975 1926
a1385b7b 1927 lwp_info *lwp = get_thread_lwp (thread);
fa96cb38 1928
a1385b7b 1929 return !lwp->stopped;
0d62e5e8 1930}
611cb4a5 1931
863d01bd
PA
1932/* Increment LWP's suspend count. */
1933
1934static void
1935lwp_suspended_inc (struct lwp_info *lwp)
1936{
1937 lwp->suspended++;
1938
c058728c
SM
1939 if (lwp->suspended > 4)
1940 threads_debug_printf
1941 ("LWP %ld has a suspiciously high suspend count, suspended=%d",
1942 lwpid_of (get_lwp_thread (lwp)), lwp->suspended);
863d01bd
PA
1943}
1944
1945/* Decrement LWP's suspend count. */
1946
1947static void
1948lwp_suspended_decr (struct lwp_info *lwp)
1949{
1950 lwp->suspended--;
1951
1952 if (lwp->suspended < 0)
1953 {
1954 struct thread_info *thread = get_lwp_thread (lwp);
1955
f34652de 1956 internal_error ("unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
863d01bd
PA
1957 lwp->suspended);
1958 }
1959}
1960
219f2f23
PA
1961/* This function should only be called if the LWP got a SIGTRAP.
1962
1963 Handle any tracepoint steps or hits. Return true if a tracepoint
1964 event was handled, 0 otherwise. */
1965
1966static int
1967handle_tracepoints (struct lwp_info *lwp)
1968{
1969 struct thread_info *tinfo = get_lwp_thread (lwp);
1970 int tpoint_related_event = 0;
1971
582511be
PA
1972 gdb_assert (lwp->suspended == 0);
1973
7984d532
PA
1974 /* If this tracepoint hit causes a tracing stop, we'll immediately
1975 uninsert tracepoints. To do this, we temporarily pause all
1976 threads, unpatch away, and then unpause threads. We need to make
1977 sure the unpausing doesn't resume LWP too. */
863d01bd 1978 lwp_suspended_inc (lwp);
7984d532 1979
219f2f23
PA
1980 /* And we need to be sure that any all-threads-stopping doesn't try
1981 to move threads out of the jump pads, as it could deadlock the
1982 inferior (LWP could be in the jump pad, maybe even holding the
1983 lock.) */
1984
1985 /* Do any necessary step collect actions. */
1986 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1987
fa593d66
PA
1988 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1989
219f2f23
PA
1990 /* See if we just hit a tracepoint and do its main collect
1991 actions. */
1992 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1993
863d01bd 1994 lwp_suspended_decr (lwp);
7984d532
PA
1995
1996 gdb_assert (lwp->suspended == 0);
229d26fc
SM
1997 gdb_assert (!stabilizing_threads
1998 || (lwp->collecting_fast_tracepoint
1999 != fast_tpoint_collect_result::not_collecting));
7984d532 2000
219f2f23
PA
2001 if (tpoint_related_event)
2002 {
c058728c 2003 threads_debug_printf ("got a tracepoint event");
219f2f23
PA
2004 return 1;
2005 }
2006
2007 return 0;
2008}
2009
13e567af
TBA
2010fast_tpoint_collect_result
2011linux_process_target::linux_fast_tracepoint_collecting
2012 (lwp_info *lwp, fast_tpoint_collect_status *status)
fa593d66
PA
2013{
2014 CORE_ADDR thread_area;
d86d4aaf 2015 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66 2016
fa593d66
PA
2017 /* Get the thread area address. This is used to recognize which
2018 thread is which when tracing with the in-process agent library.
2019 We don't read anything from the address, and treat it as opaque;
2020 it's the address itself that we assume is unique per-thread. */
13e567af 2021 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
229d26fc 2022 return fast_tpoint_collect_result::not_collecting;
fa593d66
PA
2023
2024 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2025}
2026
13e567af
TBA
2027int
2028linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
2029{
2030 return -1;
2031}
2032
d16f3f6c
TBA
2033bool
2034linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
fa593d66 2035{
24583e45
TBA
2036 scoped_restore_current_thread restore_thread;
2037 switch_to_thread (get_lwp_thread (lwp));
fa593d66
PA
2038
2039 if ((wstat == NULL
2040 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2041 && supports_fast_tracepoints ()
58b4daa5 2042 && agent_loaded_p ())
fa593d66
PA
2043 {
2044 struct fast_tpoint_collect_status status;
fa593d66 2045
c058728c
SM
2046 threads_debug_printf
2047 ("Checking whether LWP %ld needs to move out of the jump pad.",
2048 lwpid_of (current_thread));
fa593d66 2049
229d26fc
SM
2050 fast_tpoint_collect_result r
2051 = linux_fast_tracepoint_collecting (lwp, &status);
fa593d66
PA
2052
2053 if (wstat == NULL
2054 || (WSTOPSIG (*wstat) != SIGILL
2055 && WSTOPSIG (*wstat) != SIGFPE
2056 && WSTOPSIG (*wstat) != SIGSEGV
2057 && WSTOPSIG (*wstat) != SIGBUS))
2058 {
2059 lwp->collecting_fast_tracepoint = r;
2060
229d26fc 2061 if (r != fast_tpoint_collect_result::not_collecting)
fa593d66 2062 {
229d26fc
SM
2063 if (r == fast_tpoint_collect_result::before_insn
2064 && lwp->exit_jump_pad_bkpt == NULL)
fa593d66
PA
2065 {
2066 /* Haven't executed the original instruction yet.
2067 Set breakpoint there, and wait till it's hit,
2068 then single-step until exiting the jump pad. */
2069 lwp->exit_jump_pad_bkpt
2070 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2071 }
2072
c058728c
SM
2073 threads_debug_printf
2074 ("Checking whether LWP %ld needs to move out of the jump pad..."
2075 " it does", lwpid_of (current_thread));
fa593d66 2076
d16f3f6c 2077 return true;
fa593d66
PA
2078 }
2079 }
2080 else
2081 {
2082 /* If we get a synchronous signal while collecting, *and*
2083 while executing the (relocated) original instruction,
2084 reset the PC to point at the tpoint address, before
2085 reporting to GDB. Otherwise, it's an IPA lib bug: just
2086 report the signal to GDB, and pray for the best. */
2087
229d26fc
SM
2088 lwp->collecting_fast_tracepoint
2089 = fast_tpoint_collect_result::not_collecting;
fa593d66 2090
229d26fc 2091 if (r != fast_tpoint_collect_result::not_collecting
fa593d66
PA
2092 && (status.adjusted_insn_addr <= lwp->stop_pc
2093 && lwp->stop_pc < status.adjusted_insn_addr_end))
2094 {
2095 siginfo_t info;
2096 struct regcache *regcache;
2097
2098 /* The si_addr on a few signals references the address
2099 of the faulting instruction. Adjust that as
2100 well. */
2101 if ((WSTOPSIG (*wstat) == SIGILL
2102 || WSTOPSIG (*wstat) == SIGFPE
2103 || WSTOPSIG (*wstat) == SIGBUS
2104 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 2105 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2106 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
2107 /* Final check just to make sure we don't clobber
2108 the siginfo of non-kernel-sent signals. */
2109 && (uintptr_t) info.si_addr == lwp->stop_pc)
2110 {
2111 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 2112 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2113 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
2114 }
2115
0bfdf32f 2116 regcache = get_thread_regcache (current_thread, 1);
bf9ae9d8 2117 low_set_pc (regcache, status.tpoint_addr);
fa593d66
PA
2118 lwp->stop_pc = status.tpoint_addr;
2119
2120 /* Cancel any fast tracepoint lock this thread was
2121 holding. */
2122 force_unlock_trace_buffer ();
2123 }
2124
2125 if (lwp->exit_jump_pad_bkpt != NULL)
2126 {
c058728c
SM
2127 threads_debug_printf
2128 ("Cancelling fast exit-jump-pad: removing bkpt."
2129 "stopping all threads momentarily.");
fa593d66
PA
2130
2131 stop_all_lwps (1, lwp);
fa593d66
PA
2132
2133 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2134 lwp->exit_jump_pad_bkpt = NULL;
2135
2136 unstop_all_lwps (1, lwp);
2137
2138 gdb_assert (lwp->suspended >= 0);
2139 }
2140 }
2141 }
2142
c058728c
SM
2143 threads_debug_printf
2144 ("Checking whether LWP %ld needs to move out of the jump pad... no",
2145 lwpid_of (current_thread));
0cccb683 2146
d16f3f6c 2147 return false;
fa593d66
PA
2148}
2149
2150/* Enqueue one signal in the "signals to report later when out of the
2151 jump pad" list. */
2152
2153static void
2154enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2155{
d86d4aaf 2156 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66 2157
c058728c
SM
2158 threads_debug_printf ("Deferring signal %d for LWP %ld.",
2159 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2160
2161 if (debug_threads)
2162 {
013e3554 2163 for (const auto &sig : lwp->pending_signals_to_report)
c058728c 2164 threads_debug_printf (" Already queued %d", sig.signal);
fa593d66 2165
c058728c 2166 threads_debug_printf (" (no more currently queued signals)");
fa593d66
PA
2167 }
2168
1a981360
PA
2169 /* Don't enqueue non-RT signals if they are already in the deferred
2170 queue. (SIGSTOP being the easiest signal to see ending up here
2171 twice) */
2172 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2173 {
013e3554 2174 for (const auto &sig : lwp->pending_signals_to_report)
1a981360 2175 {
013e3554 2176 if (sig.signal == WSTOPSIG (*wstat))
1a981360 2177 {
c058728c
SM
2178 threads_debug_printf
2179 ("Not requeuing already queued non-RT signal %d for LWP %ld",
2180 sig.signal, lwpid_of (thread));
1a981360
PA
2181 return;
2182 }
2183 }
2184 }
2185
013e3554 2186 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
8d749320 2187
d86d4aaf 2188 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554 2189 &lwp->pending_signals_to_report.back ().info);
fa593d66
PA
2190}
2191
2192/* Dequeue one signal from the "signals to report later when out of
2193 the jump pad" list. */
2194
2195static int
2196dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2197{
d86d4aaf
DE
2198 struct thread_info *thread = get_lwp_thread (lwp);
2199
013e3554 2200 if (!lwp->pending_signals_to_report.empty ())
fa593d66 2201 {
013e3554 2202 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
fa593d66 2203
013e3554
TBA
2204 *wstat = W_STOPCODE (p_sig.signal);
2205 if (p_sig.info.si_signo != 0)
d86d4aaf 2206 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554
TBA
2207 &p_sig.info);
2208
2209 lwp->pending_signals_to_report.pop_front ();
fa593d66 2210
c058728c
SM
2211 threads_debug_printf ("Reporting deferred signal %d for LWP %ld.",
2212 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2213
2214 if (debug_threads)
2215 {
013e3554 2216 for (const auto &sig : lwp->pending_signals_to_report)
c058728c 2217 threads_debug_printf (" Still queued %d", sig.signal);
fa593d66 2218
c058728c 2219 threads_debug_printf (" (no more queued signals)");
fa593d66
PA
2220 }
2221
2222 return 1;
2223 }
2224
2225 return 0;
2226}
2227
ac1bbaca
TBA
2228bool
2229linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
582511be 2230{
24583e45
TBA
2231 scoped_restore_current_thread restore_thread;
2232 switch_to_thread (get_lwp_thread (child));
d50171e4 2233
ac1bbaca
TBA
2234 if (low_stopped_by_watchpoint ())
2235 {
2236 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2237 child->stopped_data_address = low_stopped_data_address ();
2238 }
582511be 2239
ac1bbaca
TBA
2240 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2241}
d50171e4 2242
ac1bbaca
TBA
2243bool
2244linux_process_target::low_stopped_by_watchpoint ()
2245{
2246 return false;
2247}
d50171e4 2248
ac1bbaca
TBA
2249CORE_ADDR
2250linux_process_target::low_stopped_data_address ()
2251{
2252 return 0;
c4d9ceb6
YQ
2253}
2254
de0d863e
DB
2255/* Return the ptrace options that we want to try to enable. */
2256
2257static int
2258linux_low_ptrace_options (int attached)
2259{
c12a5089 2260 client_state &cs = get_client_state ();
de0d863e
DB
2261 int options = 0;
2262
2263 if (!attached)
2264 options |= PTRACE_O_EXITKILL;
2265
c12a5089 2266 if (cs.report_fork_events)
de0d863e
DB
2267 options |= PTRACE_O_TRACEFORK;
2268
c12a5089 2269 if (cs.report_vfork_events)
c269dbdb
DB
2270 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2271
c12a5089 2272 if (cs.report_exec_events)
94585166
DB
2273 options |= PTRACE_O_TRACEEXEC;
2274
82075af2
JS
2275 options |= PTRACE_O_TRACESYSGOOD;
2276
de0d863e
DB
2277 return options;
2278}
2279
1a48f002 2280void
d16f3f6c 2281linux_process_target::filter_event (int lwpid, int wstat)
fa96cb38
PA
2282{
2283 struct lwp_info *child;
2284 struct thread_info *thread;
582511be 2285 int have_stop_pc = 0;
fa96cb38 2286
f2907e49 2287 child = find_lwp_pid (ptid_t (lwpid));
fa96cb38 2288
5406bc3f
PA
2289 /* Check for events reported by anything not in our LWP list. */
2290 if (child == nullptr)
94585166 2291 {
5406bc3f
PA
2292 if (WIFSTOPPED (wstat))
2293 {
2294 if (WSTOPSIG (wstat) == SIGTRAP
2295 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2296 {
2297 /* A non-leader thread exec'ed after we've seen the
2298 leader zombie, and removed it from our lists (in
2299 check_zombie_leaders). The non-leader thread changes
2300 its tid to the tgid. */
2301 threads_debug_printf
2302 ("Re-adding thread group leader LWP %d after exec.",
2303 lwpid);
94585166 2304
5406bc3f
PA
2305 child = add_lwp (ptid_t (lwpid, lwpid));
2306 child->stopped = 1;
2307 switch_to_thread (child->thread);
2308 }
2309 else
2310 {
2311 /* A process we are controlling has forked and the new
2312 child's stop was reported to us by the kernel. Save
2313 its PID and go back to waiting for the fork event to
2314 be reported - the stopped process might be returned
2315 from waitpid before or after the fork event is. */
2316 threads_debug_printf
2317 ("Saving LWP %d status %s in stopped_pids list",
2318 lwpid, status_to_str (wstat).c_str ());
2319 add_to_pid_list (&stopped_pids, lwpid, wstat);
2320 }
2321 }
2322 else
2323 {
2324 /* Don't report an event for the exit of an LWP not in our
2325 list, i.e. not part of any inferior we're debugging.
2326 This can happen if we detach from a program we originally
8a841a35
PA
2327 forked and then it exits. However, note that we may have
2328 earlier deleted a leader of an inferior we're debugging,
2329 in check_zombie_leaders. Re-add it back here if so. */
2330 find_process ([&] (process_info *proc)
2331 {
2332 if (proc->pid == lwpid)
2333 {
2334 threads_debug_printf
2335 ("Re-adding thread group leader LWP %d after exit.",
2336 lwpid);
2337
2338 child = add_lwp (ptid_t (lwpid, lwpid));
2339 return true;
2340 }
2341 return false;
2342 });
5406bc3f 2343 }
94585166 2344
5406bc3f
PA
2345 if (child == nullptr)
2346 return;
fa96cb38 2347 }
fa96cb38
PA
2348
2349 thread = get_lwp_thread (child);
2350
2351 child->stopped = 1;
2352
2353 child->last_status = wstat;
2354
582511be
PA
2355 /* Check if the thread has exited. */
2356 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2357 {
c058728c 2358 threads_debug_printf ("%d exited", lwpid);
f50bf8e5
YQ
2359
2360 if (finish_step_over (child))
2361 {
2362 /* Unsuspend all other LWPs, and set them back running again. */
2363 unsuspend_all_lwps (child);
2364 }
2365
8a841a35
PA
2366 /* If this is not the leader LWP, then the exit signal was not
2367 the end of the debugged application and should be ignored,
2368 unless GDB wants to hear about thread exits. */
48989498 2369 if (report_exit_events_for (thread) || is_leader (thread))
582511be 2370 {
65706a29
PA
2371 /* Since events are serialized to GDB core, and we can't
2372 report this one right now. Leave the status pending for
2373 the next time we're able to report it. */
e8a625d1 2374 mark_lwp_dead (child, wstat, false);
1a48f002 2375 return;
582511be
PA
2376 }
2377 else
2378 {
65706a29 2379 delete_lwp (child);
1a48f002 2380 return;
582511be
PA
2381 }
2382 }
2383
2384 gdb_assert (WIFSTOPPED (wstat));
2385
fa96cb38
PA
2386 if (WIFSTOPPED (wstat))
2387 {
2388 struct process_info *proc;
2389
c06cbd92 2390 /* Architecture-specific setup after inferior is running. */
fa96cb38 2391 proc = find_process_pid (pid_of (thread));
c06cbd92 2392 if (proc->tdesc == NULL)
fa96cb38 2393 {
c06cbd92
YQ
2394 if (proc->attached)
2395 {
c06cbd92
YQ
2396 /* This needs to happen after we have attached to the
2397 inferior and it is stopped for the first time, but
2398 before we access any inferior registers. */
797bcff5 2399 arch_setup_thread (thread);
c06cbd92
YQ
2400 }
2401 else
2402 {
2403 /* The process is started, but GDBserver will do
2404 architecture-specific setup after the program stops at
2405 the first instruction. */
2406 child->status_pending_p = 1;
2407 child->status_pending = wstat;
1a48f002 2408 return;
c06cbd92 2409 }
fa96cb38
PA
2410 }
2411 }
2412
fa96cb38
PA
2413 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2414 {
beed38b8 2415 struct process_info *proc = find_process_pid (pid_of (thread));
de0d863e 2416 int options = linux_low_ptrace_options (proc->attached);
beed38b8 2417
de0d863e 2418 linux_enable_event_reporting (lwpid, options);
fa96cb38
PA
2419 child->must_set_ptrace_flags = 0;
2420 }
2421
82075af2
JS
2422 /* Always update syscall_state, even if it will be filtered later. */
2423 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2424 {
2425 child->syscall_state
2426 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2427 ? TARGET_WAITKIND_SYSCALL_RETURN
2428 : TARGET_WAITKIND_SYSCALL_ENTRY);
2429 }
2430 else
2431 {
2432 /* Almost all other ptrace-stops are known to be outside of system
2433 calls, with further exceptions in handle_extended_wait. */
2434 child->syscall_state = TARGET_WAITKIND_IGNORE;
2435 }
2436
e7ad2f14
PA
2437 /* Be careful to not overwrite stop_pc until save_stop_reason is
2438 called. */
fa96cb38 2439 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2440 && linux_is_extended_waitstatus (wstat))
fa96cb38 2441 {
582511be 2442 child->stop_pc = get_pc (child);
94585166 2443 if (handle_extended_wait (&child, wstat))
de0d863e
DB
2444 {
2445 /* The event has been handled, so just return without
2446 reporting it. */
1a48f002 2447 return;
de0d863e 2448 }
fa96cb38
PA
2449 }
2450
80aea927 2451 if (linux_wstatus_maybe_breakpoint (wstat))
582511be 2452 {
e7ad2f14 2453 if (save_stop_reason (child))
582511be
PA
2454 have_stop_pc = 1;
2455 }
2456
2457 if (!have_stop_pc)
2458 child->stop_pc = get_pc (child);
2459
fa96cb38
PA
2460 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2461 && child->stop_expected)
2462 {
c058728c
SM
2463 threads_debug_printf ("Expected stop.");
2464
fa96cb38
PA
2465 child->stop_expected = 0;
2466
2467 if (thread->last_resume_kind == resume_stop)
2468 {
2469 /* We want to report the stop to the core. Treat the
2470 SIGSTOP as a normal event. */
c058728c
SM
2471 threads_debug_printf ("resume_stop SIGSTOP caught for %s.",
2472 target_pid_to_str (ptid_of (thread)).c_str ());
fa96cb38
PA
2473 }
2474 else if (stopping_threads != NOT_STOPPING_THREADS)
2475 {
2476 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2477 pending. */
c058728c
SM
2478 threads_debug_printf ("SIGSTOP caught for %s while stopping threads.",
2479 target_pid_to_str (ptid_of (thread)).c_str ());
1a48f002 2480 return;
fa96cb38
PA
2481 }
2482 else
2483 {
2bf6fb9d 2484 /* This is a delayed SIGSTOP. Filter out the event. */
c058728c 2485 threads_debug_printf ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2bf6fb9d 2486 child->stepping ? "step" : "continue",
61d7f128 2487 target_pid_to_str (ptid_of (thread)).c_str ());
2bf6fb9d 2488
df95181f 2489 resume_one_lwp (child, child->stepping, 0, NULL);
1a48f002 2490 return;
fa96cb38
PA
2491 }
2492 }
2493
582511be
PA
2494 child->status_pending_p = 1;
2495 child->status_pending = wstat;
1a48f002 2496 return;
fa96cb38
PA
2497}
2498
b31cdfa6
TBA
2499bool
2500linux_process_target::maybe_hw_step (thread_info *thread)
f79b145d 2501{
b31cdfa6
TBA
2502 if (supports_hardware_single_step ())
2503 return true;
f79b145d
YQ
2504 else
2505 {
3b9a79ef 2506 /* GDBserver must insert single-step breakpoint for software
f79b145d 2507 single step. */
3b9a79ef 2508 gdb_assert (has_single_step_breakpoints (thread));
b31cdfa6 2509 return false;
f79b145d
YQ
2510 }
2511}
2512
df95181f
TBA
2513void
2514linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
20ba1ce6 2515{
20ba1ce6
PA
2516 struct lwp_info *lp = get_thread_lwp (thread);
2517
2518 if (lp->stopped
863d01bd 2519 && !lp->suspended
20ba1ce6 2520 && !lp->status_pending_p
183be222 2521 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
20ba1ce6 2522 {
8901d193
YQ
2523 int step = 0;
2524
2525 if (thread->last_resume_kind == resume_step)
b6d8d612
KB
2526 {
2527 if (supports_software_single_step ())
2528 install_software_single_step_breakpoints (lp);
2529
2530 step = maybe_hw_step (thread);
2531 }
20ba1ce6 2532
c058728c
SM
2533 threads_debug_printf ("resuming stopped-resumed LWP %s at %s: step=%d",
2534 target_pid_to_str (ptid_of (thread)).c_str (),
2535 paddress (lp->stop_pc), step);
20ba1ce6 2536
df95181f 2537 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
20ba1ce6
PA
2538 }
2539}
2540
d16f3f6c
TBA
2541int
2542linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2543 ptid_t filter_ptid,
2544 int *wstatp, int options)
0d62e5e8 2545{
d86d4aaf 2546 struct thread_info *event_thread;
d50171e4 2547 struct lwp_info *event_child, *requested_child;
fa96cb38 2548 sigset_t block_mask, prev_mask;
d50171e4 2549
fa96cb38 2550 retry:
d86d4aaf
DE
2551 /* N.B. event_thread points to the thread_info struct that contains
2552 event_child. Keep them in sync. */
2553 event_thread = NULL;
d50171e4
PA
2554 event_child = NULL;
2555 requested_child = NULL;
0d62e5e8 2556
95954743 2557 /* Check for a lwp with a pending status. */
bd99dc85 2558
d7e15655 2559 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
0d62e5e8 2560 {
83e1b6c1
SM
2561 event_thread = find_thread_in_random ([&] (thread_info *thread)
2562 {
2563 return status_pending_p_callback (thread, filter_ptid);
2564 });
2565
d86d4aaf 2566 if (event_thread != NULL)
c058728c
SM
2567 {
2568 event_child = get_thread_lwp (event_thread);
2569 threads_debug_printf ("Got a pending child %ld", lwpid_of (event_thread));
2570 }
0d62e5e8 2571 }
d7e15655 2572 else if (filter_ptid != null_ptid)
0d62e5e8 2573 {
fa96cb38 2574 requested_child = find_lwp_pid (filter_ptid);
59487af3 2575 gdb_assert (requested_child != nullptr);
d50171e4 2576
bde24c0a 2577 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66 2578 && requested_child->status_pending_p
229d26fc
SM
2579 && (requested_child->collecting_fast_tracepoint
2580 != fast_tpoint_collect_result::not_collecting))
fa593d66
PA
2581 {
2582 enqueue_one_deferred_signal (requested_child,
2583 &requested_child->status_pending);
2584 requested_child->status_pending_p = 0;
2585 requested_child->status_pending = 0;
df95181f 2586 resume_one_lwp (requested_child, 0, 0, NULL);
fa593d66
PA
2587 }
2588
2589 if (requested_child->suspended
2590 && requested_child->status_pending_p)
38e08fca 2591 {
f34652de 2592 internal_error ("requesting an event out of a"
38e08fca
GB
2593 " suspended child?");
2594 }
fa593d66 2595
d50171e4 2596 if (requested_child->status_pending_p)
d86d4aaf
DE
2597 {
2598 event_child = requested_child;
2599 event_thread = get_lwp_thread (event_child);
2600 }
0d62e5e8 2601 }
611cb4a5 2602
0d62e5e8
DJ
2603 if (event_child != NULL)
2604 {
c058728c
SM
2605 threads_debug_printf ("Got an event from pending child %ld (%04x)",
2606 lwpid_of (event_thread),
2607 event_child->status_pending);
2608
fa96cb38 2609 *wstatp = event_child->status_pending;
bd99dc85
PA
2610 event_child->status_pending_p = 0;
2611 event_child->status_pending = 0;
24583e45 2612 switch_to_thread (event_thread);
d86d4aaf 2613 return lwpid_of (event_thread);
0d62e5e8
DJ
2614 }
2615
fa96cb38
PA
2616 /* But if we don't find a pending event, we'll have to wait.
2617
2618 We only enter this loop if no process has a pending wait status.
2619 Thus any action taken in response to a wait status inside this
2620 loop is responding as soon as we detect the status, not after any
2621 pending events. */
d8301ad1 2622
fa96cb38
PA
2623 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2624 all signals while here. */
2625 sigfillset (&block_mask);
21987b9c 2626 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
fa96cb38 2627
582511be
PA
2628 /* Always pull all events out of the kernel. We'll randomly select
2629 an event LWP out of all that have events, to prevent
2630 starvation. */
fa96cb38 2631 while (event_child == NULL)
0d62e5e8 2632 {
fa96cb38 2633 pid_t ret = 0;
0d62e5e8 2634
fa96cb38
PA
2635 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2636 quirks:
0d62e5e8 2637
fa96cb38
PA
2638 - If the thread group leader exits while other threads in the
2639 thread group still exist, waitpid(TGID, ...) hangs. That
2640 waitpid won't return an exit status until the other threads
2641 in the group are reaped.
611cb4a5 2642
fa96cb38
PA
2643 - When a non-leader thread execs, that thread just vanishes
2644 without reporting an exit (so we'd hang if we waited for it
2645 explicitly in that case). The exec event is reported to
94585166 2646 the TGID pid. */
fa96cb38
PA
2647 errno = 0;
2648 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2649
c058728c
SM
2650 threads_debug_printf ("waitpid(-1, ...) returned %d, %s",
2651 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
0d62e5e8 2652
fa96cb38 2653 if (ret > 0)
0d62e5e8 2654 {
c058728c
SM
2655 threads_debug_printf ("waitpid %ld received %s",
2656 (long) ret, status_to_str (*wstatp).c_str ());
89be2091 2657
582511be
PA
2658 /* Filter all events. IOW, leave all events pending. We'll
2659 randomly select an event LWP out of all that have events
2660 below. */
d16f3f6c 2661 filter_event (ret, *wstatp);
fa96cb38
PA
2662 /* Retry until nothing comes out of waitpid. A single
2663 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2664 continue;
2665 }
2666
20ba1ce6
PA
2667 /* Now that we've pulled all events out of the kernel, resume
2668 LWPs that don't have an interesting event to report. */
2669 if (stopping_threads == NOT_STOPPING_THREADS)
df95181f
TBA
2670 for_each_thread ([this] (thread_info *thread)
2671 {
2672 resume_stopped_resumed_lwps (thread);
2673 });
20ba1ce6
PA
2674
2675 /* ... and find an LWP with a status to report to the core, if
2676 any. */
83e1b6c1
SM
2677 event_thread = find_thread_in_random ([&] (thread_info *thread)
2678 {
2679 return status_pending_p_callback (thread, filter_ptid);
2680 });
2681
582511be
PA
2682 if (event_thread != NULL)
2683 {
2684 event_child = get_thread_lwp (event_thread);
2685 *wstatp = event_child->status_pending;
2686 event_child->status_pending_p = 0;
2687 event_child->status_pending = 0;
2688 break;
2689 }
2690
fa96cb38
PA
2691 /* Check for zombie thread group leaders. Those can't be reaped
2692 until all other threads in the thread group are. */
e8a625d1
PA
2693 if (check_zombie_leaders ())
2694 goto retry;
fa96cb38 2695
a1385b7b
SM
2696 auto not_stopped = [&] (thread_info *thread)
2697 {
2698 return not_stopped_callback (thread, wait_ptid);
2699 };
2700
fa96cb38
PA
2701 /* If there are no resumed children left in the set of LWPs we
2702 want to wait for, bail. We can't just block in
2703 waitpid/sigsuspend, because lwps might have been left stopped
2704 in trace-stop state, and we'd be stuck forever waiting for
2705 their status to change (which would only happen if we resumed
2706 them). Even if WNOHANG is set, this return code is preferred
2707 over 0 (below), as it is more detailed. */
a1385b7b 2708 if (find_thread (not_stopped) == NULL)
a6dbe5df 2709 {
c058728c
SM
2710 threads_debug_printf ("exit (no unwaited-for LWP)");
2711
21987b9c 2712 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2713 return -1;
a6dbe5df
PA
2714 }
2715
fa96cb38
PA
2716 /* No interesting event to report to the caller. */
2717 if ((options & WNOHANG))
24a09b5f 2718 {
c058728c 2719 threads_debug_printf ("WNOHANG set, no event found");
fa96cb38 2720
21987b9c 2721 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2722 return 0;
24a09b5f
DJ
2723 }
2724
fa96cb38 2725 /* Block until we get an event reported with SIGCHLD. */
c058728c 2726 threads_debug_printf ("sigsuspend'ing");
d50171e4 2727
fa96cb38 2728 sigsuspend (&prev_mask);
21987b9c 2729 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38
PA
2730 goto retry;
2731 }
d50171e4 2732
21987b9c 2733 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2734
24583e45 2735 switch_to_thread (event_thread);
d50171e4 2736
fa96cb38
PA
2737 return lwpid_of (event_thread);
2738}
2739
d16f3f6c
TBA
2740int
2741linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
fa96cb38 2742{
d16f3f6c 2743 return wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2744}
2745
6bf5e0ba
PA
2746/* Select one LWP out of those that have events pending. */
2747
2748static void
2749select_event_lwp (struct lwp_info **orig_lp)
2750{
582511be
PA
2751 struct thread_info *event_thread = NULL;
2752
2753 /* In all-stop, give preference to the LWP that is being
2754 single-stepped. There will be at most one, and it's the LWP that
2755 the core is most interested in. If we didn't do this, then we'd
2756 have to handle pending step SIGTRAPs somehow in case the core
2757 later continues the previously-stepped thread, otherwise we'd
2758 report the pending SIGTRAP, and the core, not having stepped the
2759 thread, wouldn't understand what the trap was for, and therefore
2760 would report it to the user as a random signal. */
2761 if (!non_stop)
6bf5e0ba 2762 {
39a64da5
SM
2763 event_thread = find_thread ([] (thread_info *thread)
2764 {
2765 lwp_info *lp = get_thread_lwp (thread);
2766
183be222 2767 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
39a64da5
SM
2768 && thread->last_resume_kind == resume_step
2769 && lp->status_pending_p);
2770 });
2771
582511be 2772 if (event_thread != NULL)
c058728c
SM
2773 threads_debug_printf
2774 ("Select single-step %s",
2775 target_pid_to_str (ptid_of (event_thread)).c_str ());
6bf5e0ba 2776 }
582511be 2777 if (event_thread == NULL)
6bf5e0ba
PA
2778 {
2779 /* No single-stepping LWP. Select one at random, out of those
dda83cd7 2780 which have had events. */
6bf5e0ba 2781
b0319eaa 2782 event_thread = find_thread_in_random ([&] (thread_info *thread)
39a64da5
SM
2783 {
2784 lwp_info *lp = get_thread_lwp (thread);
2785
b0319eaa 2786 /* Only resumed LWPs that have an event pending. */
183be222 2787 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
b0319eaa 2788 && lp->status_pending_p);
39a64da5 2789 });
6bf5e0ba
PA
2790 }
2791
d86d4aaf 2792 if (event_thread != NULL)
6bf5e0ba 2793 {
d86d4aaf
DE
2794 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2795
6bf5e0ba
PA
2796 /* Switch the event LWP. */
2797 *orig_lp = event_lp;
2798 }
2799}
2800
7984d532
PA
2801/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2802 NULL. */
2803
2804static void
2805unsuspend_all_lwps (struct lwp_info *except)
2806{
139720c5
SM
2807 for_each_thread ([&] (thread_info *thread)
2808 {
2809 lwp_info *lwp = get_thread_lwp (thread);
2810
2811 if (lwp != except)
2812 lwp_suspended_decr (lwp);
2813 });
7984d532
PA
2814}
2815
5a6b0a41 2816static bool lwp_running (thread_info *thread);
fa593d66
PA
2817
2818/* Stabilize threads (move out of jump pads).
2819
2820 If a thread is midway collecting a fast tracepoint, we need to
2821 finish the collection and move it out of the jump pad before
2822 reporting the signal.
2823
2824 This avoids recursion while collecting (when a signal arrives
2825 midway, and the signal handler itself collects), which would trash
2826 the trace buffer. In case the user set a breakpoint in a signal
2827 handler, this avoids the backtrace showing the jump pad, etc..
2828 Most importantly, there are certain things we can't do safely if
2829 threads are stopped in a jump pad (or in its callee's). For
2830 example:
2831
2832 - starting a new trace run. A thread still collecting the
2833 previous run, could trash the trace buffer when resumed. The trace
2834 buffer control structures would have been reset but the thread had
2835 no way to tell. The thread could even midway memcpy'ing to the
2836 buffer, which would mean that when resumed, it would clobber the
2837 trace buffer that had been set for a new run.
2838
2839 - we can't rewrite/reuse the jump pads for new tracepoints
2840 safely. Say you do tstart while a thread is stopped midway while
2841 collecting. When the thread is later resumed, it finishes the
2842 collection, and returns to the jump pad, to execute the original
2843 instruction that was under the tracepoint jump at the time the
2844 older run had been started. If the jump pad had been rewritten
2845 since for something else in the new run, the thread would now
2846 execute the wrong / random instructions. */
2847
5c9eb2f2
TBA
2848void
2849linux_process_target::stabilize_threads ()
fa593d66 2850{
13e567af
TBA
2851 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2852 {
2853 return stuck_in_jump_pad (thread);
2854 });
fa593d66 2855
d86d4aaf 2856 if (thread_stuck != NULL)
fa593d66 2857 {
c058728c
SM
2858 threads_debug_printf ("can't stabilize, LWP %ld is stuck in jump pad",
2859 lwpid_of (thread_stuck));
fa593d66
PA
2860 return;
2861 }
2862
24583e45 2863 scoped_restore_current_thread restore_thread;
fa593d66
PA
2864
2865 stabilizing_threads = 1;
2866
2867 /* Kick 'em all. */
d16f3f6c
TBA
2868 for_each_thread ([this] (thread_info *thread)
2869 {
2870 move_out_of_jump_pad (thread);
2871 });
fa593d66
PA
2872
2873 /* Loop until all are stopped out of the jump pads. */
5a6b0a41 2874 while (find_thread (lwp_running) != NULL)
fa593d66
PA
2875 {
2876 struct target_waitstatus ourstatus;
2877 struct lwp_info *lwp;
fa593d66
PA
2878 int wstat;
2879
2880 /* Note that we go through the full wait even loop. While
2881 moving threads out of jump pad, we need to be able to step
2882 over internal breakpoints and such. */
d16f3f6c 2883 wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66 2884
183be222 2885 if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
fa593d66 2886 {
0bfdf32f 2887 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2888
2889 /* Lock it. */
863d01bd 2890 lwp_suspended_inc (lwp);
fa593d66 2891
183be222 2892 if (ourstatus.sig () != GDB_SIGNAL_0
0bfdf32f 2893 || current_thread->last_resume_kind == resume_stop)
fa593d66 2894 {
183be222 2895 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
fa593d66
PA
2896 enqueue_one_deferred_signal (lwp, &wstat);
2897 }
2898 }
2899 }
2900
fcdad592 2901 unsuspend_all_lwps (NULL);
fa593d66
PA
2902
2903 stabilizing_threads = 0;
2904
b4d51a55 2905 if (debug_threads)
fa593d66 2906 {
13e567af
TBA
2907 thread_stuck = find_thread ([this] (thread_info *thread)
2908 {
2909 return stuck_in_jump_pad (thread);
2910 });
fcb056a5 2911
d86d4aaf 2912 if (thread_stuck != NULL)
c058728c
SM
2913 threads_debug_printf
2914 ("couldn't stabilize, LWP %ld got stuck in jump pad",
2915 lwpid_of (thread_stuck));
fa593d66
PA
2916 }
2917}
2918
582511be
PA
2919/* Convenience function that is called when the kernel reports an
2920 event that is not passed out to GDB. */
2921
2922static ptid_t
2923ignore_event (struct target_waitstatus *ourstatus)
2924{
2925 /* If we got an event, there may still be others, as a single
2926 SIGCHLD can indicate more than one child stopped. This forces
2927 another target_wait call. */
2928 async_file_mark ();
2929
183be222 2930 ourstatus->set_ignore ();
582511be
PA
2931 return null_ptid;
2932}
2933
fd000fb3
TBA
2934ptid_t
2935linux_process_target::filter_exit_event (lwp_info *event_child,
2936 target_waitstatus *ourstatus)
65706a29
PA
2937{
2938 struct thread_info *thread = get_lwp_thread (event_child);
2939 ptid_t ptid = ptid_of (thread);
2940
e8a625d1
PA
2941 if (ourstatus->kind () == TARGET_WAITKIND_THREAD_EXITED)
2942 {
2943 /* We're reporting a thread exit for the leader. The exit was
2944 detected by check_zombie_leaders. */
2945 gdb_assert (is_leader (thread));
2946 gdb_assert (report_exit_events_for (thread));
2947
2948 delete_lwp (event_child);
2949 return ptid;
2950 }
2951
48989498
PA
2952 /* Note we must filter TARGET_WAITKIND_SIGNALLED as well, otherwise
2953 if a non-leader thread exits with a signal, we'd report it to the
2954 core which would interpret it as the whole-process exiting.
2955 There is no TARGET_WAITKIND_THREAD_SIGNALLED event kind. */
2956 if (ourstatus->kind () != TARGET_WAITKIND_EXITED
2957 && ourstatus->kind () != TARGET_WAITKIND_SIGNALLED)
2958 return ptid;
2959
8a841a35 2960 if (!is_leader (thread))
65706a29 2961 {
48989498 2962 if (report_exit_events_for (thread))
183be222 2963 ourstatus->set_thread_exited (0);
65706a29 2964 else
183be222 2965 ourstatus->set_ignore ();
65706a29
PA
2966
2967 delete_lwp (event_child);
2968 }
2969 return ptid;
2970}
2971
82075af2
JS
2972/* Returns 1 if GDB is interested in any event_child syscalls. */
2973
2974static int
2975gdb_catching_syscalls_p (struct lwp_info *event_child)
2976{
2977 struct thread_info *thread = get_lwp_thread (event_child);
2978 struct process_info *proc = get_thread_process (thread);
2979
f27866ba 2980 return !proc->syscalls_to_catch.empty ();
82075af2
JS
2981}
2982
9eedd27d
TBA
2983bool
2984linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
82075af2 2985{
4cc32bec 2986 int sysno;
82075af2
JS
2987 struct thread_info *thread = get_lwp_thread (event_child);
2988 struct process_info *proc = get_thread_process (thread);
2989
f27866ba 2990 if (proc->syscalls_to_catch.empty ())
9eedd27d 2991 return false;
82075af2 2992
f27866ba 2993 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
9eedd27d 2994 return true;
82075af2 2995
4cc32bec 2996 get_syscall_trapinfo (event_child, &sysno);
f27866ba
SM
2997
2998 for (int iter : proc->syscalls_to_catch)
82075af2 2999 if (iter == sysno)
9eedd27d 3000 return true;
82075af2 3001
9eedd27d 3002 return false;
82075af2
JS
3003}
3004
d16f3f6c
TBA
3005ptid_t
3006linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
b60cea74 3007 target_wait_flags target_options)
da6d8c04 3008{
c058728c
SM
3009 THREADS_SCOPED_DEBUG_ENTER_EXIT;
3010
c12a5089 3011 client_state &cs = get_client_state ();
e5f1222d 3012 int w;
fc7238bb 3013 struct lwp_info *event_child;
bd99dc85 3014 int options;
bd99dc85 3015 int pid;
6bf5e0ba
PA
3016 int step_over_finished;
3017 int bp_explains_trap;
3018 int maybe_internal_trap;
3019 int report_to_gdb;
219f2f23 3020 int trace_event;
c2d6af84 3021 int in_step_range;
bd99dc85 3022
c058728c 3023 threads_debug_printf ("[%s]", target_pid_to_str (ptid).c_str ());
87ce2a04 3024
bd99dc85
PA
3025 /* Translate generic target options into linux options. */
3026 options = __WALL;
3027 if (target_options & TARGET_WNOHANG)
3028 options |= WNOHANG;
0d62e5e8 3029
fa593d66
PA
3030 bp_explains_trap = 0;
3031 trace_event = 0;
c2d6af84 3032 in_step_range = 0;
183be222 3033 ourstatus->set_ignore ();
bd99dc85 3034
ef980d65 3035 bool was_any_resumed = any_resumed ();
f2faf941 3036
d7e15655 3037 if (step_over_bkpt == null_ptid)
d16f3f6c 3038 pid = wait_for_event (ptid, &w, options);
6bf5e0ba
PA
3039 else
3040 {
c058728c
SM
3041 threads_debug_printf ("step_over_bkpt set [%s], doing a blocking wait",
3042 target_pid_to_str (step_over_bkpt).c_str ());
d16f3f6c 3043 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
6bf5e0ba
PA
3044 }
3045
ef980d65 3046 if (pid == 0 || (pid == -1 && !was_any_resumed))
87ce2a04 3047 {
fa96cb38
PA
3048 gdb_assert (target_options & TARGET_WNOHANG);
3049
c058728c 3050 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_IGNORE");
fa96cb38 3051
183be222 3052 ourstatus->set_ignore ();
87ce2a04
DE
3053 return null_ptid;
3054 }
fa96cb38
PA
3055 else if (pid == -1)
3056 {
c058728c 3057 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_NO_RESUMED");
bd99dc85 3058
183be222 3059 ourstatus->set_no_resumed ();
fa96cb38
PA
3060 return null_ptid;
3061 }
0d62e5e8 3062
0bfdf32f 3063 event_child = get_thread_lwp (current_thread);
0d62e5e8 3064
d16f3f6c 3065 /* wait_for_event only returns an exit status for the last
fa96cb38
PA
3066 child of a process. Report it. */
3067 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 3068 {
fa96cb38 3069 if (WIFEXITED (w))
0d62e5e8 3070 {
e8a625d1
PA
3071 /* If we already have the exit recorded in waitstatus, use
3072 it. This will happen when we detect a zombie leader,
3073 when we had GDB_THREAD_OPTION_EXIT enabled for it. We
3074 want to report its exit as TARGET_WAITKIND_THREAD_EXITED,
3075 as the whole process hasn't exited yet. */
3076 const target_waitstatus &ws = event_child->waitstatus;
3077 if (ws.kind () != TARGET_WAITKIND_IGNORE)
3078 {
3079 gdb_assert (ws.kind () == TARGET_WAITKIND_EXITED
3080 || ws.kind () == TARGET_WAITKIND_THREAD_EXITED);
3081 *ourstatus = ws;
3082 }
3083 else
3084 ourstatus->set_exited (WEXITSTATUS (w));
bd99dc85 3085
c058728c
SM
3086 threads_debug_printf
3087 ("ret = %s, exited with retcode %d",
3088 target_pid_to_str (ptid_of (current_thread)).c_str (),
3089 WEXITSTATUS (w));
fa96cb38
PA
3090 }
3091 else
3092 {
183be222 3093 ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
5b1c542e 3094
c058728c
SM
3095 threads_debug_printf
3096 ("ret = %s, terminated with signal %d",
3097 target_pid_to_str (ptid_of (current_thread)).c_str (),
3098 WTERMSIG (w));
0d62e5e8 3099 }
fa96cb38 3100
48989498 3101 return filter_exit_event (event_child, ourstatus);
da6d8c04
DJ
3102 }
3103
2d97cd35
AT
3104 /* If step-over executes a breakpoint instruction, in the case of a
3105 hardware single step it means a gdb/gdbserver breakpoint had been
3106 planted on top of a permanent breakpoint, in the case of a software
3107 single step it may just mean that gdbserver hit the reinsert breakpoint.
e7ad2f14 3108 The PC has been adjusted by save_stop_reason to point at
2d97cd35
AT
3109 the breakpoint address.
3110 So in the case of the hardware single step advance the PC manually
3111 past the breakpoint and in the case of software single step advance only
3b9a79ef 3112 if it's not the single_step_breakpoint we are hitting.
2d97cd35
AT
3113 This avoids that a program would keep trapping a permanent breakpoint
3114 forever. */
d7e15655 3115 if (step_over_bkpt != null_ptid
2d97cd35
AT
3116 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3117 && (event_child->stepping
3b9a79ef 3118 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
8090aef2 3119 {
dd373349
AT
3120 int increment_pc = 0;
3121 int breakpoint_kind = 0;
3122 CORE_ADDR stop_pc = event_child->stop_pc;
3123
d16f3f6c
TBA
3124 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3125 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
8090aef2 3126
c058728c
SM
3127 threads_debug_printf
3128 ("step-over for %s executed software breakpoint",
3129 target_pid_to_str (ptid_of (current_thread)).c_str ());
8090aef2
PA
3130
3131 if (increment_pc != 0)
3132 {
3133 struct regcache *regcache
3134 = get_thread_regcache (current_thread, 1);
3135
3136 event_child->stop_pc += increment_pc;
bf9ae9d8 3137 low_set_pc (regcache, event_child->stop_pc);
8090aef2 3138
d7146cda 3139 if (!low_breakpoint_at (event_child->stop_pc))
15c66dd6 3140 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
3141 }
3142 }
3143
6bf5e0ba
PA
3144 /* If this event was not handled before, and is not a SIGTRAP, we
3145 report it. SIGILL and SIGSEGV are also treated as traps in case
3146 a breakpoint is inserted at the current PC. If this target does
3147 not support internal breakpoints at all, we also report the
3148 SIGTRAP without further processing; it's of no concern to us. */
3149 maybe_internal_trap
bf9ae9d8 3150 = (low_supports_breakpoints ()
6bf5e0ba
PA
3151 && (WSTOPSIG (w) == SIGTRAP
3152 || ((WSTOPSIG (w) == SIGILL
3153 || WSTOPSIG (w) == SIGSEGV)
d7146cda 3154 && low_breakpoint_at (event_child->stop_pc))));
6bf5e0ba
PA
3155
3156 if (maybe_internal_trap)
3157 {
3158 /* Handle anything that requires bookkeeping before deciding to
3159 report the event or continue waiting. */
3160
3161 /* First check if we can explain the SIGTRAP with an internal
3162 breakpoint, or if we should possibly report the event to GDB.
3163 Do this before anything that may remove or insert a
3164 breakpoint. */
3165 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3166
3167 /* We have a SIGTRAP, possibly a step-over dance has just
3168 finished. If so, tweak the state machine accordingly,
3b9a79ef
YQ
3169 reinsert breakpoints and delete any single-step
3170 breakpoints. */
6bf5e0ba
PA
3171 step_over_finished = finish_step_over (event_child);
3172
3173 /* Now invoke the callbacks of any internal breakpoints there. */
3174 check_breakpoints (event_child->stop_pc);
3175
219f2f23
PA
3176 /* Handle tracepoint data collecting. This may overflow the
3177 trace buffer, and cause a tracing stop, removing
3178 breakpoints. */
3179 trace_event = handle_tracepoints (event_child);
3180
6bf5e0ba 3181 if (bp_explains_trap)
c058728c 3182 threads_debug_printf ("Hit a gdbserver breakpoint.");
6bf5e0ba
PA
3183 }
3184 else
3185 {
3186 /* We have some other signal, possibly a step-over dance was in
3187 progress, and it should be cancelled too. */
3188 step_over_finished = finish_step_over (event_child);
fa593d66
PA
3189 }
3190
3191 /* We have all the data we need. Either report the event to GDB, or
3192 resume threads and keep waiting for more. */
3193
3194 /* If we're collecting a fast tracepoint, finish the collection and
3195 move out of the jump pad before delivering a signal. See
3196 linux_stabilize_threads. */
3197
3198 if (WIFSTOPPED (w)
3199 && WSTOPSIG (w) != SIGTRAP
3200 && supports_fast_tracepoints ()
58b4daa5 3201 && agent_loaded_p ())
fa593d66 3202 {
c058728c
SM
3203 threads_debug_printf ("Got signal %d for LWP %ld. Check if we need "
3204 "to defer or adjust it.",
3205 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3206
3207 /* Allow debugging the jump pad itself. */
0bfdf32f 3208 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
3209 && maybe_move_out_of_jump_pad (event_child, &w))
3210 {
3211 enqueue_one_deferred_signal (event_child, &w);
3212
c058728c
SM
3213 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad)",
3214 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66 3215
df95181f 3216 resume_one_lwp (event_child, 0, 0, NULL);
582511be
PA
3217
3218 return ignore_event (ourstatus);
fa593d66
PA
3219 }
3220 }
219f2f23 3221
229d26fc
SM
3222 if (event_child->collecting_fast_tracepoint
3223 != fast_tpoint_collect_result::not_collecting)
fa593d66 3224 {
c058728c
SM
3225 threads_debug_printf
3226 ("LWP %ld was trying to move out of the jump pad (%d). "
3227 "Check if we're already there.",
3228 lwpid_of (current_thread),
3229 (int) event_child->collecting_fast_tracepoint);
fa593d66
PA
3230
3231 trace_event = 1;
3232
3233 event_child->collecting_fast_tracepoint
3234 = linux_fast_tracepoint_collecting (event_child, NULL);
3235
229d26fc
SM
3236 if (event_child->collecting_fast_tracepoint
3237 != fast_tpoint_collect_result::before_insn)
fa593d66
PA
3238 {
3239 /* No longer need this breakpoint. */
3240 if (event_child->exit_jump_pad_bkpt != NULL)
3241 {
c058728c
SM
3242 threads_debug_printf
3243 ("No longer need exit-jump-pad bkpt; removing it."
3244 "stopping all threads momentarily.");
fa593d66
PA
3245
3246 /* Other running threads could hit this breakpoint.
3247 We don't handle moribund locations like GDB does,
3248 instead we always pause all threads when removing
3249 breakpoints, so that any step-over or
3250 decr_pc_after_break adjustment is always taken
3251 care of while the breakpoint is still
3252 inserted. */
3253 stop_all_lwps (1, event_child);
fa593d66
PA
3254
3255 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3256 event_child->exit_jump_pad_bkpt = NULL;
3257
3258 unstop_all_lwps (1, event_child);
3259
3260 gdb_assert (event_child->suspended >= 0);
3261 }
3262 }
3263
229d26fc
SM
3264 if (event_child->collecting_fast_tracepoint
3265 == fast_tpoint_collect_result::not_collecting)
fa593d66 3266 {
c058728c
SM
3267 threads_debug_printf
3268 ("fast tracepoint finished collecting successfully.");
fa593d66
PA
3269
3270 /* We may have a deferred signal to report. */
3271 if (dequeue_one_deferred_signal (event_child, &w))
c058728c 3272 threads_debug_printf ("dequeued one signal.");
3c11dd79 3273 else
fa593d66 3274 {
c058728c 3275 threads_debug_printf ("no deferred signals.");
fa593d66
PA
3276
3277 if (stabilizing_threads)
3278 {
183be222 3279 ourstatus->set_stopped (GDB_SIGNAL_0);
87ce2a04 3280
c058728c
SM
3281 threads_debug_printf
3282 ("ret = %s, stopped while stabilizing threads",
3283 target_pid_to_str (ptid_of (current_thread)).c_str ());
87ce2a04 3284
0bfdf32f 3285 return ptid_of (current_thread);
fa593d66
PA
3286 }
3287 }
3288 }
6bf5e0ba
PA
3289 }
3290
e471f25b
PA
3291 /* Check whether GDB would be interested in this event. */
3292
82075af2
JS
3293 /* Check if GDB is interested in this syscall. */
3294 if (WIFSTOPPED (w)
3295 && WSTOPSIG (w) == SYSCALL_SIGTRAP
9eedd27d 3296 && !gdb_catch_this_syscall (event_child))
82075af2 3297 {
c058728c
SM
3298 threads_debug_printf ("Ignored syscall for LWP %ld.",
3299 lwpid_of (current_thread));
82075af2 3300
df95181f 3301 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
edeeb602 3302
82075af2
JS
3303 return ignore_event (ourstatus);
3304 }
3305
e471f25b
PA
3306 /* If GDB is not interested in this signal, don't stop other
3307 threads, and don't report it to GDB. Just resume the inferior
3308 right away. We do this for threading-related signals as well as
3309 any that GDB specifically requested we ignore. But never ignore
3310 SIGSTOP if we sent it ourselves, and do not ignore signals when
3311 stepping - they may require special handling to skip the signal
c9587f88
AT
3312 handler. Also never ignore signals that could be caused by a
3313 breakpoint. */
e471f25b 3314 if (WIFSTOPPED (w)
0bfdf32f 3315 && current_thread->last_resume_kind != resume_step
e471f25b 3316 && (
1a981360 3317#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 3318 (current_process ()->priv->thread_db != NULL
e471f25b
PA
3319 && (WSTOPSIG (w) == __SIGRTMIN
3320 || WSTOPSIG (w) == __SIGRTMIN + 1))
3321 ||
3322#endif
c12a5089 3323 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 3324 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
3325 && current_thread->last_resume_kind == resume_stop)
3326 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
3327 {
3328 siginfo_t info, *info_p;
3329
c058728c
SM
3330 threads_debug_printf ("Ignored signal %d for LWP %ld.",
3331 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 3332
0bfdf32f 3333 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 3334 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
3335 info_p = &info;
3336 else
3337 info_p = NULL;
863d01bd
PA
3338
3339 if (step_over_finished)
3340 {
3341 /* We cancelled this thread's step-over above. We still
3342 need to unsuspend all other LWPs, and set them back
3343 running again while the signal handler runs. */
3344 unsuspend_all_lwps (event_child);
3345
3346 /* Enqueue the pending signal info so that proceed_all_lwps
3347 doesn't lose it. */
3348 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3349
3350 proceed_all_lwps ();
3351 }
3352 else
3353 {
df95181f
TBA
3354 resume_one_lwp (event_child, event_child->stepping,
3355 WSTOPSIG (w), info_p);
863d01bd 3356 }
edeeb602 3357
582511be 3358 return ignore_event (ourstatus);
e471f25b
PA
3359 }
3360
c2d6af84
PA
3361 /* Note that all addresses are always "out of the step range" when
3362 there's no range to begin with. */
3363 in_step_range = lwp_in_step_range (event_child);
3364
3365 /* If GDB wanted this thread to single step, and the thread is out
3366 of the step range, we always want to report the SIGTRAP, and let
3367 GDB handle it. Watchpoints should always be reported. So should
3368 signals we can't explain. A SIGTRAP we can't explain could be a
3369 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3370 do, we're be able to handle GDB breakpoints on top of internal
3371 breakpoints, by handling the internal breakpoint and still
3372 reporting the event to GDB. If we don't, we're out of luck, GDB
863d01bd
PA
3373 won't see the breakpoint hit. If we see a single-step event but
3374 the thread should be continuing, don't pass the trap to gdb.
3375 That indicates that we had previously finished a single-step but
3376 left the single-step pending -- see
3377 complete_ongoing_step_over. */
6bf5e0ba 3378 report_to_gdb = (!maybe_internal_trap
0bfdf32f 3379 || (current_thread->last_resume_kind == resume_step
c2d6af84 3380 && !in_step_range)
15c66dd6 3381 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
863d01bd
PA
3382 || (!in_step_range
3383 && !bp_explains_trap
3384 && !trace_event
3385 && !step_over_finished
3386 && !(current_thread->last_resume_kind == resume_continue
3387 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
9f3a5c85 3388 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5 3389 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
de0d863e 3390 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
183be222 3391 || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
d3ce09f5
SS
3392
3393 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
3394
3395 /* We found no reason GDB would want us to stop. We either hit one
3396 of our own breakpoints, or finished an internal step GDB
3397 shouldn't know about. */
3398 if (!report_to_gdb)
3399 {
c058728c
SM
3400 if (bp_explains_trap)
3401 threads_debug_printf ("Hit a gdbserver breakpoint.");
3402
3403 if (step_over_finished)
3404 threads_debug_printf ("Step-over finished.");
3405
3406 if (trace_event)
3407 threads_debug_printf ("Tracepoint event.");
3408
3409 if (lwp_in_step_range (event_child))
3410 threads_debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).",
3411 paddress (event_child->stop_pc),
3412 paddress (event_child->step_range_start),
3413 paddress (event_child->step_range_end));
6bf5e0ba
PA
3414
3415 /* We're not reporting this breakpoint to GDB, so apply the
3416 decr_pc_after_break adjustment to the inferior's regcache
3417 ourselves. */
3418
bf9ae9d8 3419 if (low_supports_breakpoints ())
6bf5e0ba
PA
3420 {
3421 struct regcache *regcache
0bfdf32f 3422 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3423 low_set_pc (regcache, event_child->stop_pc);
6bf5e0ba
PA
3424 }
3425
7984d532 3426 if (step_over_finished)
e3652c84
YQ
3427 {
3428 /* If we have finished stepping over a breakpoint, we've
3429 stopped and suspended all LWPs momentarily except the
3430 stepping one. This is where we resume them all again.
3431 We're going to keep waiting, so use proceed, which
3432 handles stepping over the next breakpoint. */
3433 unsuspend_all_lwps (event_child);
3434 }
3435 else
3436 {
3437 /* Remove the single-step breakpoints if any. Note that
3438 there isn't single-step breakpoint if we finished stepping
3439 over. */
7582c77c 3440 if (supports_software_single_step ()
e3652c84
YQ
3441 && has_single_step_breakpoints (current_thread))
3442 {
3443 stop_all_lwps (0, event_child);
3444 delete_single_step_breakpoints (current_thread);
3445 unstop_all_lwps (0, event_child);
3446 }
3447 }
7984d532 3448
c058728c 3449 threads_debug_printf ("proceeding all threads.");
edeeb602 3450
c058728c 3451 proceed_all_lwps ();
edeeb602 3452
582511be 3453 return ignore_event (ourstatus);
6bf5e0ba
PA
3454 }
3455
c058728c
SM
3456 if (debug_threads)
3457 {
3458 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3459 threads_debug_printf ("LWP %ld: extended event with waitstatus %s",
3460 lwpid_of (get_lwp_thread (event_child)),
3461 event_child->waitstatus.to_string ().c_str ());
3462
3463 if (current_thread->last_resume_kind == resume_step)
3464 {
3465 if (event_child->step_range_start == event_child->step_range_end)
3466 threads_debug_printf
3467 ("GDB wanted to single-step, reporting event.");
3468 else if (!lwp_in_step_range (event_child))
3469 threads_debug_printf ("Out of step range, reporting event.");
3470 }
3471
3472 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3473 threads_debug_printf ("Stopped by watchpoint.");
3474 else if (gdb_breakpoint_here (event_child->stop_pc))
3475 threads_debug_printf ("Stopped by GDB breakpoint.");
3476 }
3477
3478 threads_debug_printf ("Hit a non-gdbserver trap event.");
6bf5e0ba
PA
3479
3480 /* Alright, we're going to report a stop. */
3481
3b9a79ef 3482 /* Remove single-step breakpoints. */
7582c77c 3483 if (supports_software_single_step ())
8901d193 3484 {
3b9a79ef 3485 /* Remove single-step breakpoints or not. It it is true, stop all
8901d193
YQ
3486 lwps, so that other threads won't hit the breakpoint in the
3487 staled memory. */
3b9a79ef 3488 int remove_single_step_breakpoints_p = 0;
8901d193
YQ
3489
3490 if (non_stop)
3491 {
3b9a79ef
YQ
3492 remove_single_step_breakpoints_p
3493 = has_single_step_breakpoints (current_thread);
8901d193
YQ
3494 }
3495 else
3496 {
3497 /* In all-stop, a stop reply cancels all previous resume
3b9a79ef 3498 requests. Delete all single-step breakpoints. */
8901d193 3499
9c80ecd6
SM
3500 find_thread ([&] (thread_info *thread) {
3501 if (has_single_step_breakpoints (thread))
3502 {
3503 remove_single_step_breakpoints_p = 1;
3504 return true;
3505 }
8901d193 3506
9c80ecd6
SM
3507 return false;
3508 });
8901d193
YQ
3509 }
3510
3b9a79ef 3511 if (remove_single_step_breakpoints_p)
8901d193 3512 {
3b9a79ef 3513 /* If we remove single-step breakpoints from memory, stop all lwps,
8901d193
YQ
3514 so that other threads won't hit the breakpoint in the staled
3515 memory. */
3516 stop_all_lwps (0, event_child);
3517
3518 if (non_stop)
3519 {
3b9a79ef
YQ
3520 gdb_assert (has_single_step_breakpoints (current_thread));
3521 delete_single_step_breakpoints (current_thread);
8901d193
YQ
3522 }
3523 else
3524 {
9c80ecd6
SM
3525 for_each_thread ([] (thread_info *thread){
3526 if (has_single_step_breakpoints (thread))
3527 delete_single_step_breakpoints (thread);
3528 });
8901d193
YQ
3529 }
3530
3531 unstop_all_lwps (0, event_child);
3532 }
3533 }
3534
582511be 3535 if (!stabilizing_threads)
6bf5e0ba
PA
3536 {
3537 /* In all-stop, stop all threads. */
582511be
PA
3538 if (!non_stop)
3539 stop_all_lwps (0, NULL);
6bf5e0ba 3540
c03e6ccc 3541 if (step_over_finished)
582511be
PA
3542 {
3543 if (!non_stop)
3544 {
3545 /* If we were doing a step-over, all other threads but
3546 the stepping one had been paused in start_step_over,
3547 with their suspend counts incremented. We don't want
3548 to do a full unstop/unpause, because we're in
3549 all-stop mode (so we want threads stopped), but we
3550 still need to unsuspend the other threads, to
3551 decrement their `suspended' count back. */
3552 unsuspend_all_lwps (event_child);
3553 }
3554 else
3555 {
3556 /* If we just finished a step-over, then all threads had
3557 been momentarily paused. In all-stop, that's fine,
3558 we want threads stopped by now anyway. In non-stop,
3559 we need to re-resume threads that GDB wanted to be
3560 running. */
3561 unstop_all_lwps (1, event_child);
3562 }
3563 }
c03e6ccc 3564
3aa5cfa0
AT
3565 /* If we're not waiting for a specific LWP, choose an event LWP
3566 from among those that have had events. Giving equal priority
3567 to all LWPs that have had events helps prevent
3568 starvation. */
d7e15655 3569 if (ptid == minus_one_ptid)
3aa5cfa0
AT
3570 {
3571 event_child->status_pending_p = 1;
3572 event_child->status_pending = w;
3573
3574 select_event_lwp (&event_child);
3575
3576 /* current_thread and event_child must stay in sync. */
24583e45 3577 switch_to_thread (get_lwp_thread (event_child));
3aa5cfa0
AT
3578
3579 event_child->status_pending_p = 0;
3580 w = event_child->status_pending;
3581 }
3582
3583
fa593d66 3584 /* Stabilize threads (move out of jump pads). */
582511be 3585 if (!non_stop)
5c9eb2f2 3586 target_stabilize_threads ();
6bf5e0ba
PA
3587 }
3588 else
3589 {
3590 /* If we just finished a step-over, then all threads had been
3591 momentarily paused. In all-stop, that's fine, we want
3592 threads stopped by now anyway. In non-stop, we need to
3593 re-resume threads that GDB wanted to be running. */
3594 if (step_over_finished)
7984d532 3595 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3596 }
3597
e88cf517
SM
3598 /* At this point, we haven't set OURSTATUS. This is where we do it. */
3599 gdb_assert (ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3600
183be222 3601 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
de0d863e 3602 {
393a6b59
PA
3603 /* If the reported event is an exit, fork, vfork, clone or exec,
3604 let GDB know. */
5a04c4cf 3605
393a6b59
PA
3606 /* Break the unreported fork/vfork/clone relationship chain. */
3607 if (is_new_child_status (event_child->waitstatus.kind ()))
5a04c4cf 3608 {
393a6b59
PA
3609 event_child->relative->relative = NULL;
3610 event_child->relative = NULL;
5a04c4cf
PA
3611 }
3612
00db26fa 3613 *ourstatus = event_child->waitstatus;
de0d863e 3614 /* Clear the event lwp's waitstatus since we handled it already. */
183be222 3615 event_child->waitstatus.set_ignore ();
de0d863e
DB
3616 }
3617 else
183be222 3618 {
e88cf517 3619 /* The LWP stopped due to a plain signal or a syscall signal. Either way,
3bfdcabb 3620 event_child->waitstatus wasn't filled in with the details, so look at
e88cf517
SM
3621 the wait status W. */
3622 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3623 {
3624 int syscall_number;
3625
3626 get_syscall_trapinfo (event_child, &syscall_number);
3627 if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3628 ourstatus->set_syscall_entry (syscall_number);
3629 else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3630 ourstatus->set_syscall_return (syscall_number);
3631 else
3632 gdb_assert_not_reached ("unexpected syscall state");
3633 }
3634 else if (current_thread->last_resume_kind == resume_stop
3635 && WSTOPSIG (w) == SIGSTOP)
3636 {
3637 /* A thread that has been requested to stop by GDB with vCont;t,
3638 and it stopped cleanly, so report as SIG0. The use of
3639 SIGSTOP is an implementation detail. */
3640 ourstatus->set_stopped (GDB_SIGNAL_0);
3641 }
3642 else
3643 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
183be222 3644 }
5b1c542e 3645
582511be 3646 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3647 it was a software breakpoint, and the client doesn't know we can
3648 adjust the breakpoint ourselves. */
3649 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
c12a5089 3650 && !cs.swbreak_feature)
582511be 3651 {
d4807ea2 3652 int decr_pc = low_decr_pc_after_break ();
582511be
PA
3653
3654 if (decr_pc != 0)
3655 {
3656 struct regcache *regcache
3657 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3658 low_set_pc (regcache, event_child->stop_pc + decr_pc);
582511be
PA
3659 }
3660 }
3661
d7e15655 3662 gdb_assert (step_over_bkpt == null_ptid);
d50171e4 3663
e48359ea 3664 threads_debug_printf ("ret = %s, %s",
c058728c 3665 target_pid_to_str (ptid_of (current_thread)).c_str (),
e48359ea 3666 ourstatus->to_string ().c_str ());
bd99dc85 3667
48989498 3668 return filter_exit_event (event_child, ourstatus);
bd99dc85
PA
3669}
3670
3671/* Get rid of any pending event in the pipe. */
3672static void
3673async_file_flush (void)
3674{
cdc8e9b2 3675 linux_event_pipe.flush ();
bd99dc85
PA
3676}
3677
3678/* Put something in the pipe, so the event loop wakes up. */
3679static void
3680async_file_mark (void)
3681{
cdc8e9b2 3682 linux_event_pipe.mark ();
bd99dc85
PA
3683}
3684
6532e7e3
TBA
3685ptid_t
3686linux_process_target::wait (ptid_t ptid,
3687 target_waitstatus *ourstatus,
b60cea74 3688 target_wait_flags target_options)
bd99dc85 3689{
95954743 3690 ptid_t event_ptid;
bd99dc85 3691
bd99dc85
PA
3692 /* Flush the async file first. */
3693 if (target_is_async_p ())
3694 async_file_flush ();
3695
582511be
PA
3696 do
3697 {
d16f3f6c 3698 event_ptid = wait_1 (ptid, ourstatus, target_options);
582511be
PA
3699 }
3700 while ((target_options & TARGET_WNOHANG) == 0
183be222 3701 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3702
3703 /* If at least one stop was reported, there may be more. A single
3704 SIGCHLD can signal more than one child stop. */
3705 if (target_is_async_p ()
3706 && (target_options & TARGET_WNOHANG) != 0
d7e15655 3707 && event_ptid != null_ptid)
bd99dc85
PA
3708 async_file_mark ();
3709
3710 return event_ptid;
da6d8c04
DJ
3711}
3712
c5f62d5f 3713/* Send a signal to an LWP. */
fd500816
DJ
3714
3715static int
a1928bad 3716kill_lwp (unsigned long lwpid, int signo)
fd500816 3717{
4a6ed09b 3718 int ret;
fd500816 3719
4a6ed09b
PA
3720 errno = 0;
3721 ret = syscall (__NR_tkill, lwpid, signo);
3722 if (errno == ENOSYS)
3723 {
3724 /* If tkill fails, then we are not using nptl threads, a
3725 configuration we no longer support. */
3726 perror_with_name (("tkill"));
3727 }
3728 return ret;
fd500816
DJ
3729}
3730
964e4306
PA
3731void
3732linux_stop_lwp (struct lwp_info *lwp)
3733{
3734 send_sigstop (lwp);
3735}
3736
0d62e5e8 3737static void
02fc4de7 3738send_sigstop (struct lwp_info *lwp)
0d62e5e8 3739{
bd99dc85 3740 int pid;
0d62e5e8 3741
d86d4aaf 3742 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3743
0d62e5e8
DJ
3744 /* If we already have a pending stop signal for this process, don't
3745 send another. */
54a0b537 3746 if (lwp->stop_expected)
0d62e5e8 3747 {
c058728c 3748 threads_debug_printf ("Have pending sigstop for lwp %d", pid);
ae13219e 3749
0d62e5e8
DJ
3750 return;
3751 }
3752
c058728c 3753 threads_debug_printf ("Sending sigstop to lwp %d", pid);
0d62e5e8 3754
d50171e4 3755 lwp->stop_expected = 1;
bd99dc85 3756 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3757}
3758
df3e4dbe
SM
3759static void
3760send_sigstop (thread_info *thread, lwp_info *except)
02fc4de7 3761{
d86d4aaf 3762 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3763
7984d532
PA
3764 /* Ignore EXCEPT. */
3765 if (lwp == except)
df3e4dbe 3766 return;
7984d532 3767
02fc4de7 3768 if (lwp->stopped)
df3e4dbe 3769 return;
02fc4de7
PA
3770
3771 send_sigstop (lwp);
7984d532
PA
3772}
3773
3774/* Increment the suspend count of an LWP, and stop it, if not stopped
3775 yet. */
df3e4dbe
SM
3776static void
3777suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
7984d532 3778{
d86d4aaf 3779 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3780
3781 /* Ignore EXCEPT. */
3782 if (lwp == except)
df3e4dbe 3783 return;
7984d532 3784
863d01bd 3785 lwp_suspended_inc (lwp);
7984d532 3786
df3e4dbe 3787 send_sigstop (thread, except);
02fc4de7
PA
3788}
3789
e8a625d1
PA
3790/* Mark LWP dead, with WSTAT as exit status pending to report later.
3791 If THREAD_EVENT is true, interpret WSTAT as a thread exit event
3792 instead of a process exit event. This is meaningful for the leader
3793 thread, as we normally report a process-wide exit event when we see
3794 the leader exit, and a thread exit event when we see any other
3795 thread exit. */
3796
95954743 3797static void
e8a625d1 3798mark_lwp_dead (struct lwp_info *lwp, int wstat, bool thread_event)
95954743 3799{
95954743
PA
3800 /* Store the exit status for later. */
3801 lwp->status_pending_p = 1;
3802 lwp->status_pending = wstat;
3803
00db26fa
PA
3804 /* Store in waitstatus as well, as there's nothing else to process
3805 for this event. */
3806 if (WIFEXITED (wstat))
e8a625d1
PA
3807 {
3808 if (thread_event)
3809 lwp->waitstatus.set_thread_exited (WEXITSTATUS (wstat));
3810 else
3811 lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
3812 }
00db26fa 3813 else if (WIFSIGNALED (wstat))
e8a625d1
PA
3814 {
3815 gdb_assert (!thread_event);
3816 lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
3817 }
3818 else
3819 gdb_assert_not_reached ("unknown status kind");
00db26fa 3820
95954743
PA
3821 /* Prevent trying to stop it. */
3822 lwp->stopped = 1;
3823
3824 /* No further stops are expected from a dead lwp. */
3825 lwp->stop_expected = 0;
3826}
3827
00db26fa
PA
3828/* Return true if LWP has exited already, and has a pending exit event
3829 to report to GDB. */
3830
3831static int
3832lwp_is_marked_dead (struct lwp_info *lwp)
3833{
3834 return (lwp->status_pending_p
3835 && (WIFEXITED (lwp->status_pending)
3836 || WIFSIGNALED (lwp->status_pending)));
3837}
3838
d16f3f6c
TBA
3839void
3840linux_process_target::wait_for_sigstop ()
0d62e5e8 3841{
0bfdf32f 3842 struct thread_info *saved_thread;
95954743 3843 ptid_t saved_tid;
fa96cb38
PA
3844 int wstat;
3845 int ret;
0d62e5e8 3846
0bfdf32f
GB
3847 saved_thread = current_thread;
3848 if (saved_thread != NULL)
9c80ecd6 3849 saved_tid = saved_thread->id;
bd99dc85 3850 else
95954743 3851 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3852
20ac1cdb
TBA
3853 scoped_restore_current_thread restore_thread;
3854
c058728c 3855 threads_debug_printf ("pulling events");
d50171e4 3856
fa96cb38
PA
3857 /* Passing NULL_PTID as filter indicates we want all events to be
3858 left pending. Eventually this returns when there are no
3859 unwaited-for children left. */
d16f3f6c 3860 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
fa96cb38 3861 gdb_assert (ret == -1);
0d62e5e8 3862
13d3d99b 3863 if (saved_thread == NULL || mythread_alive (saved_tid))
20ac1cdb 3864 return;
0d62e5e8
DJ
3865 else
3866 {
c058728c 3867 threads_debug_printf ("Previously current thread died.");
0d62e5e8 3868
f0db101d
PA
3869 /* We can't change the current inferior behind GDB's back,
3870 otherwise, a subsequent command may apply to the wrong
3871 process. */
20ac1cdb
TBA
3872 restore_thread.dont_restore ();
3873 switch_to_thread (nullptr);
0d62e5e8
DJ
3874 }
3875}
3876
13e567af
TBA
3877bool
3878linux_process_target::stuck_in_jump_pad (thread_info *thread)
fa593d66 3879{
d86d4aaf 3880 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3881
863d01bd
PA
3882 if (lwp->suspended != 0)
3883 {
f34652de 3884 internal_error ("LWP %ld is suspended, suspended=%d\n",
863d01bd
PA
3885 lwpid_of (thread), lwp->suspended);
3886 }
fa593d66
PA
3887 gdb_assert (lwp->stopped);
3888
3889 /* Allow debugging the jump pad, gdb_collect, etc.. */
3890 return (supports_fast_tracepoints ()
58b4daa5 3891 && agent_loaded_p ()
fa593d66 3892 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3893 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66 3894 || thread->last_resume_kind == resume_step)
229d26fc
SM
3895 && (linux_fast_tracepoint_collecting (lwp, NULL)
3896 != fast_tpoint_collect_result::not_collecting));
fa593d66
PA
3897}
3898
d16f3f6c
TBA
3899void
3900linux_process_target::move_out_of_jump_pad (thread_info *thread)
fa593d66 3901{
d86d4aaf 3902 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3903 int *wstat;
3904
863d01bd
PA
3905 if (lwp->suspended != 0)
3906 {
f34652de 3907 internal_error ("LWP %ld is suspended, suspended=%d\n",
863d01bd
PA
3908 lwpid_of (thread), lwp->suspended);
3909 }
fa593d66
PA
3910 gdb_assert (lwp->stopped);
3911
f0ce0d3a 3912 /* For gdb_breakpoint_here. */
24583e45
TBA
3913 scoped_restore_current_thread restore_thread;
3914 switch_to_thread (thread);
f0ce0d3a 3915
fa593d66
PA
3916 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3917
3918 /* Allow debugging the jump pad, gdb_collect, etc. */
3919 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3920 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3921 && thread->last_resume_kind != resume_step
3922 && maybe_move_out_of_jump_pad (lwp, wstat))
3923 {
c058728c
SM
3924 threads_debug_printf ("LWP %ld needs stabilizing (in jump pad)",
3925 lwpid_of (thread));
fa593d66
PA
3926
3927 if (wstat)
3928 {
3929 lwp->status_pending_p = 0;
3930 enqueue_one_deferred_signal (lwp, wstat);
3931
c058728c
SM
3932 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad",
3933 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
3934 }
3935
df95181f 3936 resume_one_lwp (lwp, 0, 0, NULL);
fa593d66
PA
3937 }
3938 else
863d01bd 3939 lwp_suspended_inc (lwp);
fa593d66
PA
3940}
3941
5a6b0a41
SM
3942static bool
3943lwp_running (thread_info *thread)
fa593d66 3944{
d86d4aaf 3945 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3946
00db26fa 3947 if (lwp_is_marked_dead (lwp))
5a6b0a41
SM
3948 return false;
3949
3950 return !lwp->stopped;
fa593d66
PA
3951}
3952
d16f3f6c
TBA
3953void
3954linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
0d62e5e8 3955{
bde24c0a
PA
3956 /* Should not be called recursively. */
3957 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3958
c058728c
SM
3959 THREADS_SCOPED_DEBUG_ENTER_EXIT;
3960
3961 threads_debug_printf
3962 ("%s, except=%s", suspend ? "stop-and-suspend" : "stop",
3963 (except != NULL
3964 ? target_pid_to_str (ptid_of (get_lwp_thread (except))).c_str ()
3965 : "none"));
87ce2a04 3966
bde24c0a
PA
3967 stopping_threads = (suspend
3968 ? STOPPING_AND_SUSPENDING_THREADS
3969 : STOPPING_THREADS);
7984d532
PA
3970
3971 if (suspend)
df3e4dbe
SM
3972 for_each_thread ([&] (thread_info *thread)
3973 {
3974 suspend_and_send_sigstop (thread, except);
3975 });
7984d532 3976 else
df3e4dbe
SM
3977 for_each_thread ([&] (thread_info *thread)
3978 {
3979 send_sigstop (thread, except);
3980 });
3981
fa96cb38 3982 wait_for_sigstop ();
bde24c0a 3983 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04 3984
c058728c 3985 threads_debug_printf ("setting stopping_threads back to !stopping");
0d62e5e8
DJ
3986}
3987
863d01bd
PA
3988/* Enqueue one signal in the chain of signals which need to be
3989 delivered to this process on next resume. */
3990
3991static void
3992enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3993{
013e3554
TBA
3994 lwp->pending_signals.emplace_back (signal);
3995 if (info == nullptr)
3996 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
863d01bd 3997 else
013e3554 3998 lwp->pending_signals.back ().info = *info;
863d01bd
PA
3999}
4000
df95181f
TBA
4001void
4002linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
fa5308bd 4003{
984a2c04
YQ
4004 struct thread_info *thread = get_lwp_thread (lwp);
4005 struct regcache *regcache = get_thread_regcache (thread, 1);
8ce47547 4006
24583e45 4007 scoped_restore_current_thread restore_thread;
984a2c04 4008
24583e45 4009 switch_to_thread (thread);
7582c77c 4010 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
fa5308bd 4011
a0ff9e1a 4012 for (CORE_ADDR pc : next_pcs)
3b9a79ef 4013 set_single_step_breakpoint (pc, current_ptid);
fa5308bd
AT
4014}
4015
df95181f
TBA
4016int
4017linux_process_target::single_step (lwp_info* lwp)
7fe5e27e
AT
4018{
4019 int step = 0;
4020
b31cdfa6 4021 if (supports_hardware_single_step ())
7fe5e27e
AT
4022 {
4023 step = 1;
4024 }
7582c77c 4025 else if (supports_software_single_step ())
7fe5e27e
AT
4026 {
4027 install_software_single_step_breakpoints (lwp);
4028 step = 0;
4029 }
4030 else
c058728c 4031 threads_debug_printf ("stepping is not implemented on this target");
7fe5e27e
AT
4032
4033 return step;
4034}
4035
35ac8b3e 4036/* The signal can be delivered to the inferior if we are not trying to
5b061e98
YQ
4037 finish a fast tracepoint collect. Since signal can be delivered in
4038 the step-over, the program may go to signal handler and trap again
4039 after return from the signal handler. We can live with the spurious
4040 double traps. */
35ac8b3e
YQ
4041
4042static int
4043lwp_signal_can_be_delivered (struct lwp_info *lwp)
4044{
229d26fc
SM
4045 return (lwp->collecting_fast_tracepoint
4046 == fast_tpoint_collect_result::not_collecting);
35ac8b3e
YQ
4047}
4048
df95181f
TBA
4049void
4050linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4051 int signal, siginfo_t *info)
da6d8c04 4052{
d86d4aaf 4053 struct thread_info *thread = get_lwp_thread (lwp);
82075af2 4054 int ptrace_request;
c06cbd92
YQ
4055 struct process_info *proc = get_thread_process (thread);
4056
4057 /* Note that target description may not be initialised
4058 (proc->tdesc == NULL) at this point because the program hasn't
4059 stopped at the first instruction yet. It means GDBserver skips
4060 the extra traps from the wrapper program (see option --wrapper).
4061 Code in this function that requires register access should be
4062 guarded by proc->tdesc == NULL or something else. */
0d62e5e8 4063
54a0b537 4064 if (lwp->stopped == 0)
0d62e5e8
DJ
4065 return;
4066
183be222 4067 gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
65706a29 4068
229d26fc
SM
4069 fast_tpoint_collect_result fast_tp_collecting
4070 = lwp->collecting_fast_tracepoint;
fa593d66 4071
229d26fc
SM
4072 gdb_assert (!stabilizing_threads
4073 || (fast_tp_collecting
4074 != fast_tpoint_collect_result::not_collecting));
fa593d66 4075
219f2f23
PA
4076 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4077 user used the "jump" command, or "set $pc = foo"). */
c06cbd92 4078 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
219f2f23
PA
4079 {
4080 /* Collecting 'while-stepping' actions doesn't make sense
4081 anymore. */
d86d4aaf 4082 release_while_stepping_state_list (thread);
219f2f23
PA
4083 }
4084
0d62e5e8 4085 /* If we have pending signals or status, and a new signal, enqueue the
35ac8b3e
YQ
4086 signal. Also enqueue the signal if it can't be delivered to the
4087 inferior right now. */
0d62e5e8 4088 if (signal != 0
fa593d66 4089 && (lwp->status_pending_p
013e3554 4090 || !lwp->pending_signals.empty ()
35ac8b3e 4091 || !lwp_signal_can_be_delivered (lwp)))
94610ec4
YQ
4092 {
4093 enqueue_pending_signal (lwp, signal, info);
4094
4095 /* Postpone any pending signal. It was enqueued above. */
4096 signal = 0;
4097 }
0d62e5e8 4098
d50171e4
PA
4099 if (lwp->status_pending_p)
4100 {
c058728c
SM
4101 threads_debug_printf
4102 ("Not resuming lwp %ld (%s, stop %s); has pending status",
4103 lwpid_of (thread), step ? "step" : "continue",
4104 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
4105 return;
4106 }
0d62e5e8 4107
24583e45
TBA
4108 scoped_restore_current_thread restore_thread;
4109 switch_to_thread (thread);
0d62e5e8 4110
0d62e5e8
DJ
4111 /* This bit needs some thinking about. If we get a signal that
4112 we must report while a single-step reinsert is still pending,
4113 we often end up resuming the thread. It might be better to
4114 (ew) allow a stack of pending events; then we could be sure that
4115 the reinsert happened right away and not lose any signals.
4116
4117 Making this stack would also shrink the window in which breakpoints are
54a0b537 4118 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
4119 complete correctness, so it won't solve that problem. It may be
4120 worthwhile just to solve this one, however. */
54a0b537 4121 if (lwp->bp_reinsert != 0)
0d62e5e8 4122 {
c058728c
SM
4123 threads_debug_printf (" pending reinsert at 0x%s",
4124 paddress (lwp->bp_reinsert));
d50171e4 4125
b31cdfa6 4126 if (supports_hardware_single_step ())
d50171e4 4127 {
229d26fc 4128 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
4129 {
4130 if (step == 0)
9986ba08 4131 warning ("BAD - reinserting but not stepping.");
fa593d66 4132 if (lwp->suspended)
9986ba08
PA
4133 warning ("BAD - reinserting and suspended(%d).",
4134 lwp->suspended);
fa593d66 4135 }
d50171e4 4136 }
f79b145d
YQ
4137
4138 step = maybe_hw_step (thread);
0d62e5e8
DJ
4139 }
4140
229d26fc 4141 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
c058728c
SM
4142 threads_debug_printf
4143 ("lwp %ld wants to get out of fast tracepoint jump pad "
4144 "(exit-jump-pad-bkpt)", lwpid_of (thread));
4145
229d26fc 4146 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
fa593d66 4147 {
c058728c
SM
4148 threads_debug_printf
4149 ("lwp %ld wants to get out of fast tracepoint jump pad single-stepping",
4150 lwpid_of (thread));
fa593d66 4151
b31cdfa6 4152 if (supports_hardware_single_step ())
fa593d66
PA
4153 step = 1;
4154 else
38e08fca 4155 {
f34652de 4156 internal_error ("moving out of jump pad single-stepping"
38e08fca
GB
4157 " not implemented on this target");
4158 }
fa593d66
PA
4159 }
4160
219f2f23
PA
4161 /* If we have while-stepping actions in this thread set it stepping.
4162 If we have a signal to deliver, it may or may not be set to
4163 SIG_IGN, we don't know. Assume so, and allow collecting
4164 while-stepping into a signal handler. A possible smart thing to
4165 do would be to set an internal breakpoint at the signal return
4166 address, continue, and carry on catching this while-stepping
4167 action only when that breakpoint is hit. A future
4168 enhancement. */
7fe5e27e 4169 if (thread->while_stepping != NULL)
219f2f23 4170 {
c058728c
SM
4171 threads_debug_printf
4172 ("lwp %ld has a while-stepping action -> forcing step.",
4173 lwpid_of (thread));
7fe5e27e
AT
4174
4175 step = single_step (lwp);
219f2f23
PA
4176 }
4177
bf9ae9d8 4178 if (proc->tdesc != NULL && low_supports_breakpoints ())
0d62e5e8 4179 {
0bfdf32f 4180 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be 4181
bf9ae9d8 4182 lwp->stop_pc = low_get_pc (regcache);
582511be 4183
c058728c
SM
4184 threads_debug_printf (" %s from pc 0x%lx", step ? "step" : "continue",
4185 (long) lwp->stop_pc);
0d62e5e8
DJ
4186 }
4187
35ac8b3e
YQ
4188 /* If we have pending signals, consume one if it can be delivered to
4189 the inferior. */
013e3554 4190 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
0d62e5e8 4191 {
013e3554 4192 const pending_signal &p_sig = lwp->pending_signals.front ();
0d62e5e8 4193
013e3554
TBA
4194 signal = p_sig.signal;
4195 if (p_sig.info.si_signo != 0)
d86d4aaf 4196 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554 4197 &p_sig.info);
32ca6d61 4198
013e3554 4199 lwp->pending_signals.pop_front ();
0d62e5e8
DJ
4200 }
4201
c058728c
SM
4202 threads_debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)",
4203 lwpid_of (thread), step ? "step" : "continue", signal,
4204 lwp->stop_expected ? "expected" : "not expected");
94610ec4 4205
d7599cc0 4206 low_prepare_to_resume (lwp);
aa5ca48f 4207
d86d4aaf 4208 regcache_invalidate_thread (thread);
da6d8c04 4209 errno = 0;
54a0b537 4210 lwp->stepping = step;
82075af2
JS
4211 if (step)
4212 ptrace_request = PTRACE_SINGLESTEP;
4213 else if (gdb_catching_syscalls_p (lwp))
4214 ptrace_request = PTRACE_SYSCALL;
4215 else
4216 ptrace_request = PTRACE_CONT;
4217 ptrace (ptrace_request,
4218 lwpid_of (thread),
b8e1b30e 4219 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
4220 /* Coerce to a uintptr_t first to avoid potential gcc warning
4221 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4222 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 4223
da6d8c04 4224 if (errno)
20471e00
SM
4225 {
4226 int saved_errno = errno;
4227
4228 threads_debug_printf ("ptrace errno = %d (%s)",
4229 saved_errno, strerror (saved_errno));
4230
4231 errno = saved_errno;
4232 perror_with_name ("resuming thread");
4233 }
23f238d3
PA
4234
4235 /* Successfully resumed. Clear state that no longer makes sense,
4236 and mark the LWP as running. Must not do this before resuming
4237 otherwise if that fails other code will be confused. E.g., we'd
4238 later try to stop the LWP and hang forever waiting for a stop
4239 status. Note that we must not throw after this is cleared,
4240 otherwise handle_zombie_lwp_error would get confused. */
4241 lwp->stopped = 0;
4242 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4243}
4244
d7599cc0
TBA
4245void
4246linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4247{
4248 /* Nop. */
4249}
4250
23f238d3
PA
4251/* Called when we try to resume a stopped LWP and that errors out. If
4252 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4253 or about to become), discard the error, clear any pending status
4254 the LWP may have, and return true (we'll collect the exit status
4255 soon enough). Otherwise, return false. */
4256
4257static int
4258check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4259{
4260 struct thread_info *thread = get_lwp_thread (lp);
4261
4262 /* If we get an error after resuming the LWP successfully, we'd
4263 confuse !T state for the LWP being gone. */
4264 gdb_assert (lp->stopped);
4265
4266 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4267 because even if ptrace failed with ESRCH, the tracee may be "not
4268 yet fully dead", but already refusing ptrace requests. In that
4269 case the tracee has 'R (Running)' state for a little bit
4270 (observed in Linux 3.18). See also the note on ESRCH in the
4271 ptrace(2) man page. Instead, check whether the LWP has any state
4272 other than ptrace-stopped. */
4273
4274 /* Don't assume anything if /proc/PID/status can't be read. */
4275 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 4276 {
23f238d3
PA
4277 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4278 lp->status_pending_p = 0;
4279 return 1;
4280 }
4281 return 0;
4282}
4283
df95181f
TBA
4284void
4285linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4286 siginfo_t *info)
23f238d3 4287{
a70b8144 4288 try
23f238d3 4289 {
df95181f 4290 resume_one_lwp_throw (lwp, step, signal, info);
23f238d3 4291 }
230d2906 4292 catch (const gdb_exception_error &ex)
23f238d3 4293 {
20471e00
SM
4294 if (check_ptrace_stopped_lwp_gone (lwp))
4295 {
4296 /* This could because we tried to resume an LWP after its leader
4297 exited. Mark it as resumed, so we can collect an exit event
4298 from it. */
4299 lwp->stopped = 0;
4300 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4301 }
4302 else
eedc3f4f 4303 throw;
3221518c 4304 }
da6d8c04
DJ
4305}
4306
5fdda392
SM
4307/* This function is called once per thread via for_each_thread.
4308 We look up which resume request applies to THREAD and mark it with a
4309 pointer to the appropriate resume request.
5544ad89
DJ
4310
4311 This algorithm is O(threads * resume elements), but resume elements
4312 is small (and will remain small at least until GDB supports thread
4313 suspension). */
ebcf782c 4314
5fdda392
SM
4315static void
4316linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
0d62e5e8 4317{
d86d4aaf 4318 struct lwp_info *lwp = get_thread_lwp (thread);
64386c31 4319
5fdda392 4320 for (int ndx = 0; ndx < n; ndx++)
95954743 4321 {
5fdda392 4322 ptid_t ptid = resume[ndx].thread;
d7e15655 4323 if (ptid == minus_one_ptid
9c80ecd6 4324 || ptid == thread->id
0c9070b3
YQ
4325 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4326 of PID'. */
e99b03dc 4327 || (ptid.pid () == pid_of (thread)
0e998d96 4328 && (ptid.is_pid ()
e38504b3 4329 || ptid.lwp () == -1)))
95954743 4330 {
5fdda392 4331 if (resume[ndx].kind == resume_stop
8336d594 4332 && thread->last_resume_kind == resume_stop)
d50171e4 4333 {
c058728c
SM
4334 threads_debug_printf
4335 ("already %s LWP %ld at GDB's request",
4336 (thread->last_status.kind () == TARGET_WAITKIND_STOPPED
4337 ? "stopped" : "stopping"),
4338 lwpid_of (thread));
d50171e4
PA
4339
4340 continue;
4341 }
4342
5a04c4cf
PA
4343 /* Ignore (wildcard) resume requests for already-resumed
4344 threads. */
5fdda392 4345 if (resume[ndx].kind != resume_stop
5a04c4cf
PA
4346 && thread->last_resume_kind != resume_stop)
4347 {
c058728c
SM
4348 threads_debug_printf
4349 ("already %s LWP %ld at GDB's request",
4350 (thread->last_resume_kind == resume_step
4351 ? "stepping" : "continuing"),
4352 lwpid_of (thread));
5a04c4cf
PA
4353 continue;
4354 }
4355
393a6b59
PA
4356 /* Don't let wildcard resumes resume fork/vfork/clone
4357 children that GDB does not yet know are new children. */
4358 if (lwp->relative != NULL)
5a04c4cf 4359 {
393a6b59 4360 struct lwp_info *rel = lwp->relative;
5a04c4cf
PA
4361
4362 if (rel->status_pending_p
393a6b59 4363 && is_new_child_status (rel->waitstatus.kind ()))
5a04c4cf 4364 {
c058728c
SM
4365 threads_debug_printf
4366 ("not resuming LWP %ld: has queued stop reply",
4367 lwpid_of (thread));
5a04c4cf
PA
4368 continue;
4369 }
4370 }
4371
4372 /* If the thread has a pending event that has already been
4373 reported to GDBserver core, but GDB has not pulled the
4374 event out of the vStopped queue yet, likewise, ignore the
4375 (wildcard) resume request. */
9c80ecd6 4376 if (in_queued_stop_replies (thread->id))
5a04c4cf 4377 {
c058728c
SM
4378 threads_debug_printf
4379 ("not resuming LWP %ld: has queued stop reply",
4380 lwpid_of (thread));
5a04c4cf
PA
4381 continue;
4382 }
4383
5fdda392 4384 lwp->resume = &resume[ndx];
8336d594 4385 thread->last_resume_kind = lwp->resume->kind;
fa593d66 4386
c2d6af84
PA
4387 lwp->step_range_start = lwp->resume->step_range_start;
4388 lwp->step_range_end = lwp->resume->step_range_end;
4389
fa593d66
PA
4390 /* If we had a deferred signal to report, dequeue one now.
4391 This can happen if LWP gets more than one signal while
4392 trying to get out of a jump pad. */
4393 if (lwp->stopped
4394 && !lwp->status_pending_p
4395 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4396 {
4397 lwp->status_pending_p = 1;
4398
c058728c
SM
4399 threads_debug_printf
4400 ("Dequeueing deferred signal %d for LWP %ld, "
4401 "leaving status pending.",
4402 WSTOPSIG (lwp->status_pending),
4403 lwpid_of (thread));
fa593d66
PA
4404 }
4405
5fdda392 4406 return;
95954743
PA
4407 }
4408 }
2bd7c093
PA
4409
4410 /* No resume action for this thread. */
4411 lwp->resume = NULL;
5544ad89
DJ
4412}
4413
df95181f
TBA
4414bool
4415linux_process_target::resume_status_pending (thread_info *thread)
5544ad89 4416{
d86d4aaf 4417 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4418
bd99dc85
PA
4419 /* LWPs which will not be resumed are not interesting, because
4420 we might not wait for them next time through linux_wait. */
2bd7c093 4421 if (lwp->resume == NULL)
25c28b4d 4422 return false;
64386c31 4423
df95181f 4424 return thread_still_has_status_pending (thread);
d50171e4
PA
4425}
4426
df95181f
TBA
4427bool
4428linux_process_target::thread_needs_step_over (thread_info *thread)
d50171e4 4429{
d86d4aaf 4430 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4431 CORE_ADDR pc;
c06cbd92
YQ
4432 struct process_info *proc = get_thread_process (thread);
4433
4434 /* GDBserver is skipping the extra traps from the wrapper program,
4435 don't have to do step over. */
4436 if (proc->tdesc == NULL)
eca55aec 4437 return false;
d50171e4
PA
4438
4439 /* LWPs which will not be resumed are not interesting, because we
4440 might not wait for them next time through linux_wait. */
4441
4442 if (!lwp->stopped)
4443 {
c058728c
SM
4444 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped",
4445 lwpid_of (thread));
eca55aec 4446 return false;
d50171e4
PA
4447 }
4448
8336d594 4449 if (thread->last_resume_kind == resume_stop)
d50171e4 4450 {
c058728c
SM
4451 threads_debug_printf
4452 ("Need step over [LWP %ld]? Ignoring, should remain stopped",
4453 lwpid_of (thread));
eca55aec 4454 return false;
d50171e4
PA
4455 }
4456
7984d532
PA
4457 gdb_assert (lwp->suspended >= 0);
4458
4459 if (lwp->suspended)
4460 {
c058728c
SM
4461 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, suspended",
4462 lwpid_of (thread));
eca55aec 4463 return false;
7984d532
PA
4464 }
4465
bd99dc85 4466 if (lwp->status_pending_p)
d50171e4 4467 {
c058728c
SM
4468 threads_debug_printf
4469 ("Need step over [LWP %ld]? Ignoring, has pending status.",
4470 lwpid_of (thread));
eca55aec 4471 return false;
d50171e4
PA
4472 }
4473
4474 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4475 or we have. */
4476 pc = get_pc (lwp);
4477
4478 /* If the PC has changed since we stopped, then don't do anything,
4479 and let the breakpoint/tracepoint be hit. This happens if, for
4480 instance, GDB handled the decr_pc_after_break subtraction itself,
4481 GDB is OOL stepping this thread, or the user has issued a "jump"
4482 command, or poked thread's registers herself. */
4483 if (pc != lwp->stop_pc)
4484 {
c058728c
SM
4485 threads_debug_printf
4486 ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4487 "Old stop_pc was 0x%s, PC is now 0x%s", lwpid_of (thread),
4488 paddress (lwp->stop_pc), paddress (pc));
eca55aec 4489 return false;
d50171e4
PA
4490 }
4491
484b3c32
YQ
4492 /* On software single step target, resume the inferior with signal
4493 rather than stepping over. */
7582c77c 4494 if (supports_software_single_step ()
013e3554 4495 && !lwp->pending_signals.empty ()
484b3c32
YQ
4496 && lwp_signal_can_be_delivered (lwp))
4497 {
c058728c
SM
4498 threads_debug_printf
4499 ("Need step over [LWP %ld]? Ignoring, has pending signals.",
4500 lwpid_of (thread));
484b3c32 4501
eca55aec 4502 return false;
484b3c32
YQ
4503 }
4504
24583e45
TBA
4505 scoped_restore_current_thread restore_thread;
4506 switch_to_thread (thread);
d50171e4 4507
8b07ae33 4508 /* We can only step over breakpoints we know about. */
fa593d66 4509 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 4510 {
8b07ae33 4511 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
4512 though. If the condition is being evaluated on the target's side
4513 and it evaluate to false, step over this breakpoint as well. */
4514 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
4515 && gdb_condition_true_at_breakpoint (pc)
4516 && gdb_no_commands_at_breakpoint (pc))
8b07ae33 4517 {
c058728c
SM
4518 threads_debug_printf ("Need step over [LWP %ld]? yes, but found"
4519 " GDB breakpoint at 0x%s; skipping step over",
4520 lwpid_of (thread), paddress (pc));
d50171e4 4521
eca55aec 4522 return false;
8b07ae33
PA
4523 }
4524 else
4525 {
c058728c
SM
4526 threads_debug_printf ("Need step over [LWP %ld]? yes, "
4527 "found breakpoint at 0x%s",
4528 lwpid_of (thread), paddress (pc));
d50171e4 4529
8b07ae33 4530 /* We've found an lwp that needs stepping over --- return 1 so
8f86d7aa 4531 that find_thread stops looking. */
eca55aec 4532 return true;
8b07ae33 4533 }
d50171e4
PA
4534 }
4535
c058728c
SM
4536 threads_debug_printf
4537 ("Need step over [LWP %ld]? No, no breakpoint found at 0x%s",
4538 lwpid_of (thread), paddress (pc));
c6ecbae5 4539
eca55aec 4540 return false;
5544ad89
DJ
4541}
4542
d16f3f6c
TBA
4543void
4544linux_process_target::start_step_over (lwp_info *lwp)
d50171e4 4545{
d86d4aaf 4546 struct thread_info *thread = get_lwp_thread (lwp);
d50171e4 4547 CORE_ADDR pc;
d50171e4 4548
c058728c
SM
4549 threads_debug_printf ("Starting step-over on LWP %ld. Stopping all threads",
4550 lwpid_of (thread));
d50171e4 4551
7984d532 4552 stop_all_lwps (1, lwp);
863d01bd
PA
4553
4554 if (lwp->suspended != 0)
4555 {
f34652de 4556 internal_error ("LWP %ld suspended=%d\n", lwpid_of (thread),
863d01bd
PA
4557 lwp->suspended);
4558 }
d50171e4 4559
c058728c 4560 threads_debug_printf ("Done stopping all threads for step-over.");
d50171e4
PA
4561
4562 /* Note, we should always reach here with an already adjusted PC,
4563 either by GDB (if we're resuming due to GDB's request), or by our
4564 caller, if we just finished handling an internal breakpoint GDB
4565 shouldn't care about. */
4566 pc = get_pc (lwp);
4567
24583e45
TBA
4568 bool step = false;
4569 {
4570 scoped_restore_current_thread restore_thread;
4571 switch_to_thread (thread);
d50171e4 4572
24583e45
TBA
4573 lwp->bp_reinsert = pc;
4574 uninsert_breakpoints_at (pc);
4575 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4 4576
24583e45
TBA
4577 step = single_step (lwp);
4578 }
d50171e4 4579
df95181f 4580 resume_one_lwp (lwp, step, 0, NULL);
d50171e4
PA
4581
4582 /* Require next event from this LWP. */
9c80ecd6 4583 step_over_bkpt = thread->id;
d50171e4
PA
4584}
4585
b31cdfa6
TBA
4586bool
4587linux_process_target::finish_step_over (lwp_info *lwp)
d50171e4
PA
4588{
4589 if (lwp->bp_reinsert != 0)
4590 {
24583e45 4591 scoped_restore_current_thread restore_thread;
f79b145d 4592
c058728c 4593 threads_debug_printf ("Finished step over.");
d50171e4 4594
24583e45 4595 switch_to_thread (get_lwp_thread (lwp));
f79b145d 4596
d50171e4
PA
4597 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4598 may be no breakpoint to reinsert there by now. */
4599 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4600 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4601
4602 lwp->bp_reinsert = 0;
4603
3b9a79ef
YQ
4604 /* Delete any single-step breakpoints. No longer needed. We
4605 don't have to worry about other threads hitting this trap,
4606 and later not being able to explain it, because we were
4607 stepping over a breakpoint, and we hold all threads but
4608 LWP stopped while doing that. */
b31cdfa6 4609 if (!supports_hardware_single_step ())
f79b145d 4610 {
3b9a79ef
YQ
4611 gdb_assert (has_single_step_breakpoints (current_thread));
4612 delete_single_step_breakpoints (current_thread);
f79b145d 4613 }
d50171e4
PA
4614
4615 step_over_bkpt = null_ptid;
b31cdfa6 4616 return true;
d50171e4
PA
4617 }
4618 else
b31cdfa6 4619 return false;
d50171e4
PA
4620}
4621
d16f3f6c
TBA
4622void
4623linux_process_target::complete_ongoing_step_over ()
863d01bd 4624{
d7e15655 4625 if (step_over_bkpt != null_ptid)
863d01bd
PA
4626 {
4627 struct lwp_info *lwp;
4628 int wstat;
4629 int ret;
4630
c058728c 4631 threads_debug_printf ("detach: step over in progress, finish it first");
863d01bd
PA
4632
4633 /* Passing NULL_PTID as filter indicates we want all events to
4634 be left pending. Eventually this returns when there are no
4635 unwaited-for children left. */
d16f3f6c
TBA
4636 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4637 __WALL);
863d01bd
PA
4638 gdb_assert (ret == -1);
4639
4640 lwp = find_lwp_pid (step_over_bkpt);
4641 if (lwp != NULL)
7e9cf1fe
PA
4642 {
4643 finish_step_over (lwp);
4644
4645 /* If we got our step SIGTRAP, don't leave it pending,
4646 otherwise we would report it to GDB as a spurious
4647 SIGTRAP. */
4648 gdb_assert (lwp->status_pending_p);
4649 if (WIFSTOPPED (lwp->status_pending)
4650 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4651 {
4652 thread_info *thread = get_lwp_thread (lwp);
4653 if (thread->last_resume_kind != resume_step)
4654 {
c058728c 4655 threads_debug_printf ("detach: discard step-over SIGTRAP");
7e9cf1fe
PA
4656
4657 lwp->status_pending_p = 0;
4658 lwp->status_pending = 0;
4659 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4660 }
4661 else
c058728c
SM
4662 threads_debug_printf
4663 ("detach: resume_step, not discarding step-over SIGTRAP");
7e9cf1fe
PA
4664 }
4665 }
863d01bd
PA
4666 step_over_bkpt = null_ptid;
4667 unsuspend_all_lwps (lwp);
4668 }
4669}
4670
df95181f
TBA
4671void
4672linux_process_target::resume_one_thread (thread_info *thread,
4673 bool leave_all_stopped)
5544ad89 4674{
d86d4aaf 4675 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4676 int leave_pending;
5544ad89 4677
2bd7c093 4678 if (lwp->resume == NULL)
c80825ff 4679 return;
5544ad89 4680
bd99dc85 4681 if (lwp->resume->kind == resume_stop)
5544ad89 4682 {
c058728c
SM
4683 threads_debug_printf ("resume_stop request for LWP %ld",
4684 lwpid_of (thread));
bd99dc85
PA
4685
4686 if (!lwp->stopped)
4687 {
c058728c 4688 threads_debug_printf ("stopping LWP %ld", lwpid_of (thread));
bd99dc85 4689
d50171e4
PA
4690 /* Stop the thread, and wait for the event asynchronously,
4691 through the event loop. */
02fc4de7 4692 send_sigstop (lwp);
bd99dc85
PA
4693 }
4694 else
4695 {
c058728c 4696 threads_debug_printf ("already stopped LWP %ld", lwpid_of (thread));
d50171e4
PA
4697
4698 /* The LWP may have been stopped in an internal event that
4699 was not meant to be notified back to GDB (e.g., gdbserver
4700 breakpoint), so we should be reporting a stop event in
4701 this case too. */
4702
4703 /* If the thread already has a pending SIGSTOP, this is a
4704 no-op. Otherwise, something later will presumably resume
4705 the thread and this will cause it to cancel any pending
4706 operation, due to last_resume_kind == resume_stop. If
4707 the thread already has a pending status to report, we
4708 will still report it the next time we wait - see
4709 status_pending_p_callback. */
1a981360
PA
4710
4711 /* If we already have a pending signal to report, then
4712 there's no need to queue a SIGSTOP, as this means we're
4713 midway through moving the LWP out of the jumppad, and we
4714 will report the pending signal as soon as that is
4715 finished. */
013e3554 4716 if (lwp->pending_signals_to_report.empty ())
1a981360 4717 send_sigstop (lwp);
bd99dc85 4718 }
32ca6d61 4719
bd99dc85
PA
4720 /* For stop requests, we're done. */
4721 lwp->resume = NULL;
183be222 4722 thread->last_status.set_ignore ();
c80825ff 4723 return;
5544ad89
DJ
4724 }
4725
bd99dc85 4726 /* If this thread which is about to be resumed has a pending status,
863d01bd
PA
4727 then don't resume it - we can just report the pending status.
4728 Likewise if it is suspended, because e.g., another thread is
4729 stepping past a breakpoint. Make sure to queue any signals that
4730 would otherwise be sent. In all-stop mode, we do this decision
4731 based on if *any* thread has a pending status. If there's a
4732 thread that needs the step-over-breakpoint dance, then don't
4733 resume any other thread but that particular one. */
4734 leave_pending = (lwp->suspended
4735 || lwp->status_pending_p
4736 || leave_all_stopped);
5544ad89 4737
0e9a339e
YQ
4738 /* If we have a new signal, enqueue the signal. */
4739 if (lwp->resume->sig != 0)
4740 {
4741 siginfo_t info, *info_p;
4742
4743 /* If this is the same signal we were previously stopped by,
4744 make sure to queue its siginfo. */
4745 if (WIFSTOPPED (lwp->last_status)
4746 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4747 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4748 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4749 info_p = &info;
4750 else
4751 info_p = NULL;
4752
4753 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4754 }
4755
d50171e4 4756 if (!leave_pending)
bd99dc85 4757 {
c058728c 4758 threads_debug_printf ("resuming LWP %ld", lwpid_of (thread));
5544ad89 4759
9c80ecd6 4760 proceed_one_lwp (thread, NULL);
bd99dc85
PA
4761 }
4762 else
c058728c 4763 threads_debug_printf ("leaving LWP %ld stopped", lwpid_of (thread));
5544ad89 4764
183be222 4765 thread->last_status.set_ignore ();
bd99dc85 4766 lwp->resume = NULL;
0d62e5e8
DJ
4767}
4768
0e4d7e35
TBA
4769void
4770linux_process_target::resume (thread_resume *resume_info, size_t n)
0d62e5e8 4771{
d86d4aaf 4772 struct thread_info *need_step_over = NULL;
c6ecbae5 4773
c058728c 4774 THREADS_SCOPED_DEBUG_ENTER_EXIT;
87ce2a04 4775
5fdda392
SM
4776 for_each_thread ([&] (thread_info *thread)
4777 {
4778 linux_set_resume_request (thread, resume_info, n);
4779 });
5544ad89 4780
d50171e4
PA
4781 /* If there is a thread which would otherwise be resumed, which has
4782 a pending status, then don't resume any threads - we can just
4783 report the pending status. Make sure to queue any signals that
4784 would otherwise be sent. In non-stop mode, we'll apply this
4785 logic to each thread individually. We consume all pending events
4786 before considering to start a step-over (in all-stop). */
25c28b4d 4787 bool any_pending = false;
bd99dc85 4788 if (!non_stop)
df95181f
TBA
4789 any_pending = find_thread ([this] (thread_info *thread)
4790 {
4791 return resume_status_pending (thread);
4792 }) != nullptr;
d50171e4
PA
4793
4794 /* If there is a thread which would otherwise be resumed, which is
4795 stopped at a breakpoint that needs stepping over, then don't
4796 resume any threads - have it step over the breakpoint with all
4797 other threads stopped, then resume all threads again. Make sure
4798 to queue any signals that would otherwise be delivered or
4799 queued. */
bf9ae9d8 4800 if (!any_pending && low_supports_breakpoints ())
df95181f
TBA
4801 need_step_over = find_thread ([this] (thread_info *thread)
4802 {
4803 return thread_needs_step_over (thread);
4804 });
d50171e4 4805
c80825ff 4806 bool leave_all_stopped = (need_step_over != NULL || any_pending);
d50171e4 4807
c058728c
SM
4808 if (need_step_over != NULL)
4809 threads_debug_printf ("Not resuming all, need step over");
4810 else if (any_pending)
4811 threads_debug_printf ("Not resuming, all-stop and found "
4812 "an LWP with pending status");
4813 else
4814 threads_debug_printf ("Resuming, no pending status or step over needed");
d50171e4
PA
4815
4816 /* Even if we're leaving threads stopped, queue all signals we'd
4817 otherwise deliver. */
c80825ff
SM
4818 for_each_thread ([&] (thread_info *thread)
4819 {
df95181f 4820 resume_one_thread (thread, leave_all_stopped);
c80825ff 4821 });
d50171e4
PA
4822
4823 if (need_step_over)
d86d4aaf 4824 start_step_over (get_thread_lwp (need_step_over));
87ce2a04 4825
1bebeeca
PA
4826 /* We may have events that were pending that can/should be sent to
4827 the client now. Trigger a linux_wait call. */
4828 if (target_is_async_p ())
4829 async_file_mark ();
d50171e4
PA
4830}
4831
df95181f
TBA
4832void
4833linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
d50171e4 4834{
d86d4aaf 4835 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4836 int step;
4837
7984d532 4838 if (lwp == except)
e2b44075 4839 return;
d50171e4 4840
c058728c 4841 threads_debug_printf ("lwp %ld", lwpid_of (thread));
d50171e4
PA
4842
4843 if (!lwp->stopped)
4844 {
c058728c 4845 threads_debug_printf (" LWP %ld already running", lwpid_of (thread));
e2b44075 4846 return;
d50171e4
PA
4847 }
4848
02fc4de7 4849 if (thread->last_resume_kind == resume_stop
183be222 4850 && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
d50171e4 4851 {
c058728c
SM
4852 threads_debug_printf (" client wants LWP to remain %ld stopped",
4853 lwpid_of (thread));
e2b44075 4854 return;
d50171e4
PA
4855 }
4856
4857 if (lwp->status_pending_p)
4858 {
c058728c
SM
4859 threads_debug_printf (" LWP %ld has pending status, leaving stopped",
4860 lwpid_of (thread));
e2b44075 4861 return;
d50171e4
PA
4862 }
4863
7984d532
PA
4864 gdb_assert (lwp->suspended >= 0);
4865
d50171e4
PA
4866 if (lwp->suspended)
4867 {
c058728c 4868 threads_debug_printf (" LWP %ld is suspended", lwpid_of (thread));
e2b44075 4869 return;
d50171e4
PA
4870 }
4871
1a981360 4872 if (thread->last_resume_kind == resume_stop
013e3554 4873 && lwp->pending_signals_to_report.empty ()
229d26fc
SM
4874 && (lwp->collecting_fast_tracepoint
4875 == fast_tpoint_collect_result::not_collecting))
02fc4de7
PA
4876 {
4877 /* We haven't reported this LWP as stopped yet (otherwise, the
4878 last_status.kind check above would catch it, and we wouldn't
4879 reach here. This LWP may have been momentarily paused by a
4880 stop_all_lwps call while handling for example, another LWP's
4881 step-over. In that case, the pending expected SIGSTOP signal
4882 that was queued at vCont;t handling time will have already
4883 been consumed by wait_for_sigstop, and so we need to requeue
4884 another one here. Note that if the LWP already has a SIGSTOP
4885 pending, this is a no-op. */
4886
c058728c
SM
4887 threads_debug_printf
4888 ("Client wants LWP %ld to stop. Making sure it has a SIGSTOP pending",
4889 lwpid_of (thread));
02fc4de7
PA
4890
4891 send_sigstop (lwp);
4892 }
4893
863d01bd
PA
4894 if (thread->last_resume_kind == resume_step)
4895 {
c058728c
SM
4896 threads_debug_printf (" stepping LWP %ld, client wants it stepping",
4897 lwpid_of (thread));
8901d193 4898
3b9a79ef 4899 /* If resume_step is requested by GDB, install single-step
8901d193 4900 breakpoints when the thread is about to be actually resumed if
3b9a79ef 4901 the single-step breakpoints weren't removed. */
7582c77c 4902 if (supports_software_single_step ()
3b9a79ef 4903 && !has_single_step_breakpoints (thread))
8901d193
YQ
4904 install_software_single_step_breakpoints (lwp);
4905
4906 step = maybe_hw_step (thread);
863d01bd
PA
4907 }
4908 else if (lwp->bp_reinsert != 0)
4909 {
c058728c
SM
4910 threads_debug_printf (" stepping LWP %ld, reinsert set",
4911 lwpid_of (thread));
f79b145d
YQ
4912
4913 step = maybe_hw_step (thread);
863d01bd
PA
4914 }
4915 else
4916 step = 0;
4917
df95181f 4918 resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
4919}
4920
df95181f
TBA
4921void
4922linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4923 lwp_info *except)
7984d532 4924{
d86d4aaf 4925 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
4926
4927 if (lwp == except)
e2b44075 4928 return;
7984d532 4929
863d01bd 4930 lwp_suspended_decr (lwp);
7984d532 4931
e2b44075 4932 proceed_one_lwp (thread, except);
d50171e4
PA
4933}
4934
d16f3f6c
TBA
4935void
4936linux_process_target::proceed_all_lwps ()
d50171e4 4937{
d86d4aaf 4938 struct thread_info *need_step_over;
d50171e4
PA
4939
4940 /* If there is a thread which would otherwise be resumed, which is
4941 stopped at a breakpoint that needs stepping over, then don't
4942 resume any threads - have it step over the breakpoint with all
4943 other threads stopped, then resume all threads again. */
4944
bf9ae9d8 4945 if (low_supports_breakpoints ())
d50171e4 4946 {
df95181f
TBA
4947 need_step_over = find_thread ([this] (thread_info *thread)
4948 {
4949 return thread_needs_step_over (thread);
4950 });
d50171e4
PA
4951
4952 if (need_step_over != NULL)
4953 {
c058728c
SM
4954 threads_debug_printf ("found thread %ld needing a step-over",
4955 lwpid_of (need_step_over));
d50171e4 4956
d86d4aaf 4957 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
4958 return;
4959 }
4960 }
5544ad89 4961
c058728c 4962 threads_debug_printf ("Proceeding, no step-over needed");
d50171e4 4963
df95181f 4964 for_each_thread ([this] (thread_info *thread)
e2b44075
SM
4965 {
4966 proceed_one_lwp (thread, NULL);
4967 });
d50171e4
PA
4968}
4969
d16f3f6c
TBA
4970void
4971linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
d50171e4 4972{
c058728c
SM
4973 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4974
4975 if (except)
4976 threads_debug_printf ("except=(LWP %ld)",
4977 lwpid_of (get_lwp_thread (except)));
4978 else
4979 threads_debug_printf ("except=nullptr");
5544ad89 4980
7984d532 4981 if (unsuspend)
e2b44075
SM
4982 for_each_thread ([&] (thread_info *thread)
4983 {
4984 unsuspend_and_proceed_one_lwp (thread, except);
4985 });
7984d532 4986 else
e2b44075
SM
4987 for_each_thread ([&] (thread_info *thread)
4988 {
4989 proceed_one_lwp (thread, except);
4990 });
0d62e5e8
DJ
4991}
4992
58caa3dc
DJ
4993
4994#ifdef HAVE_LINUX_REGSETS
4995
1faeff08
MR
4996#define use_linux_regsets 1
4997
030031ee
PA
4998/* Returns true if REGSET has been disabled. */
4999
5000static int
5001regset_disabled (struct regsets_info *info, struct regset_info *regset)
5002{
5003 return (info->disabled_regsets != NULL
5004 && info->disabled_regsets[regset - info->regsets]);
5005}
5006
5007/* Disable REGSET. */
5008
5009static void
5010disable_regset (struct regsets_info *info, struct regset_info *regset)
5011{
5012 int dr_offset;
5013
5014 dr_offset = regset - info->regsets;
5015 if (info->disabled_regsets == NULL)
224c3ddb 5016 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
030031ee
PA
5017 info->disabled_regsets[dr_offset] = 1;
5018}
5019
58caa3dc 5020static int
3aee8918
PA
5021regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5022 struct regcache *regcache)
58caa3dc
DJ
5023{
5024 struct regset_info *regset;
e9d25b98 5025 int saw_general_regs = 0;
95954743 5026 int pid;
1570b33e 5027 struct iovec iov;
58caa3dc 5028
0bfdf32f 5029 pid = lwpid_of (current_thread);
28eef672 5030 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5031 {
1570b33e
L
5032 void *buf, *data;
5033 int nt_type, res;
58caa3dc 5034
030031ee 5035 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 5036 continue;
58caa3dc 5037
bca929d3 5038 buf = xmalloc (regset->size);
1570b33e
L
5039
5040 nt_type = regset->nt_type;
5041 if (nt_type)
5042 {
5043 iov.iov_base = buf;
5044 iov.iov_len = regset->size;
5045 data = (void *) &iov;
5046 }
5047 else
5048 data = buf;
5049
dfb64f85 5050#ifndef __sparc__
f15f9948 5051 res = ptrace (regset->get_request, pid,
b8e1b30e 5052 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5053#else
1570b33e 5054 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5055#endif
58caa3dc
DJ
5056 if (res < 0)
5057 {
1ef53e6b
AH
5058 if (errno == EIO
5059 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5060 {
1ef53e6b
AH
5061 /* If we get EIO on a regset, or an EINVAL and the regset is
5062 optional, do not try it again for this process mode. */
030031ee 5063 disable_regset (regsets_info, regset);
58caa3dc 5064 }
e5a9158d
AA
5065 else if (errno == ENODATA)
5066 {
5067 /* ENODATA may be returned if the regset is currently
5068 not "active". This can happen in normal operation,
5069 so suppress the warning in this case. */
5070 }
fcd4a73d
YQ
5071 else if (errno == ESRCH)
5072 {
5073 /* At this point, ESRCH should mean the process is
5074 already gone, in which case we simply ignore attempts
5075 to read its registers. */
5076 }
58caa3dc
DJ
5077 else
5078 {
0d62e5e8 5079 char s[256];
95954743
PA
5080 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5081 pid);
0d62e5e8 5082 perror (s);
58caa3dc
DJ
5083 }
5084 }
098dbe61
AA
5085 else
5086 {
5087 if (regset->type == GENERAL_REGS)
5088 saw_general_regs = 1;
5089 regset->store_function (regcache, buf);
5090 }
fdeb2a12 5091 free (buf);
58caa3dc 5092 }
e9d25b98
DJ
5093 if (saw_general_regs)
5094 return 0;
5095 else
5096 return 1;
58caa3dc
DJ
5097}
5098
5099static int
3aee8918
PA
5100regsets_store_inferior_registers (struct regsets_info *regsets_info,
5101 struct regcache *regcache)
58caa3dc
DJ
5102{
5103 struct regset_info *regset;
e9d25b98 5104 int saw_general_regs = 0;
95954743 5105 int pid;
1570b33e 5106 struct iovec iov;
58caa3dc 5107
0bfdf32f 5108 pid = lwpid_of (current_thread);
28eef672 5109 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5110 {
1570b33e
L
5111 void *buf, *data;
5112 int nt_type, res;
58caa3dc 5113
feea5f36
AA
5114 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5115 || regset->fill_function == NULL)
28eef672 5116 continue;
58caa3dc 5117
bca929d3 5118 buf = xmalloc (regset->size);
545587ee
DJ
5119
5120 /* First fill the buffer with the current register set contents,
5121 in case there are any items in the kernel's regset that are
5122 not in gdbserver's regcache. */
1570b33e
L
5123
5124 nt_type = regset->nt_type;
5125 if (nt_type)
5126 {
5127 iov.iov_base = buf;
5128 iov.iov_len = regset->size;
5129 data = (void *) &iov;
5130 }
5131 else
5132 data = buf;
5133
dfb64f85 5134#ifndef __sparc__
f15f9948 5135 res = ptrace (regset->get_request, pid,
b8e1b30e 5136 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5137#else
689cc2ae 5138 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5139#endif
545587ee
DJ
5140
5141 if (res == 0)
5142 {
5143 /* Then overlay our cached registers on that. */
442ea881 5144 regset->fill_function (regcache, buf);
545587ee
DJ
5145
5146 /* Only now do we write the register set. */
dfb64f85 5147#ifndef __sparc__
f15f9948 5148 res = ptrace (regset->set_request, pid,
b8e1b30e 5149 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5150#else
1570b33e 5151 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 5152#endif
545587ee
DJ
5153 }
5154
58caa3dc
DJ
5155 if (res < 0)
5156 {
1ef53e6b
AH
5157 if (errno == EIO
5158 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5159 {
1ef53e6b
AH
5160 /* If we get EIO on a regset, or an EINVAL and the regset is
5161 optional, do not try it again for this process mode. */
030031ee 5162 disable_regset (regsets_info, regset);
58caa3dc 5163 }
3221518c
UW
5164 else if (errno == ESRCH)
5165 {
1b3f6016
PA
5166 /* At this point, ESRCH should mean the process is
5167 already gone, in which case we simply ignore attempts
5168 to change its registers. See also the related
df95181f 5169 comment in resume_one_lwp. */
fdeb2a12 5170 free (buf);
3221518c
UW
5171 return 0;
5172 }
58caa3dc
DJ
5173 else
5174 {
ce3a066d 5175 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
5176 }
5177 }
e9d25b98
DJ
5178 else if (regset->type == GENERAL_REGS)
5179 saw_general_regs = 1;
09ec9b38 5180 free (buf);
58caa3dc 5181 }
e9d25b98
DJ
5182 if (saw_general_regs)
5183 return 0;
5184 else
5185 return 1;
58caa3dc
DJ
5186}
5187
1faeff08 5188#else /* !HAVE_LINUX_REGSETS */
58caa3dc 5189
1faeff08 5190#define use_linux_regsets 0
3aee8918
PA
5191#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5192#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 5193
58caa3dc 5194#endif
1faeff08
MR
5195
5196/* Return 1 if register REGNO is supported by one of the regset ptrace
5197 calls or 0 if it has to be transferred individually. */
5198
5199static int
3aee8918 5200linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
5201{
5202 unsigned char mask = 1 << (regno % 8);
5203 size_t index = regno / 8;
5204
5205 return (use_linux_regsets
3aee8918
PA
5206 && (regs_info->regset_bitmap == NULL
5207 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
5208}
5209
58caa3dc 5210#ifdef HAVE_LINUX_USRREGS
1faeff08 5211
5b3da067 5212static int
3aee8918 5213register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
5214{
5215 int addr;
5216
3aee8918 5217 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
5218 error ("Invalid register number %d.", regnum);
5219
3aee8918 5220 addr = usrregs->regmap[regnum];
1faeff08
MR
5221
5222 return addr;
5223}
5224
daca57a7
TBA
5225
5226void
5227linux_process_target::fetch_register (const usrregs_info *usrregs,
5228 regcache *regcache, int regno)
1faeff08
MR
5229{
5230 CORE_ADDR regaddr;
5231 int i, size;
5232 char *buf;
5233 int pid;
5234
3aee8918 5235 if (regno >= usrregs->num_regs)
1faeff08 5236 return;
daca57a7 5237 if (low_cannot_fetch_register (regno))
1faeff08
MR
5238 return;
5239
3aee8918 5240 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5241 if (regaddr == -1)
5242 return;
5243
3aee8918
PA
5244 size = ((register_size (regcache->tdesc, regno)
5245 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5246 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5247 buf = (char *) alloca (size);
1faeff08 5248
0bfdf32f 5249 pid = lwpid_of (current_thread);
1faeff08
MR
5250 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5251 {
5252 errno = 0;
5253 *(PTRACE_XFER_TYPE *) (buf + i) =
5254 ptrace (PTRACE_PEEKUSER, pid,
5255 /* Coerce to a uintptr_t first to avoid potential gcc warning
5256 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5257 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
5258 regaddr += sizeof (PTRACE_XFER_TYPE);
5259 if (errno != 0)
9a70f35c
YQ
5260 {
5261 /* Mark register REGNO unavailable. */
5262 supply_register (regcache, regno, NULL);
5263 return;
5264 }
1faeff08
MR
5265 }
5266
b35db733 5267 low_supply_ptrace_register (regcache, regno, buf);
1faeff08
MR
5268}
5269
daca57a7
TBA
5270void
5271linux_process_target::store_register (const usrregs_info *usrregs,
5272 regcache *regcache, int regno)
1faeff08
MR
5273{
5274 CORE_ADDR regaddr;
5275 int i, size;
5276 char *buf;
5277 int pid;
5278
3aee8918 5279 if (regno >= usrregs->num_regs)
1faeff08 5280 return;
daca57a7 5281 if (low_cannot_store_register (regno))
1faeff08
MR
5282 return;
5283
3aee8918 5284 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5285 if (regaddr == -1)
5286 return;
5287
3aee8918
PA
5288 size = ((register_size (regcache->tdesc, regno)
5289 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5290 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5291 buf = (char *) alloca (size);
1faeff08
MR
5292 memset (buf, 0, size);
5293
b35db733 5294 low_collect_ptrace_register (regcache, regno, buf);
1faeff08 5295
0bfdf32f 5296 pid = lwpid_of (current_thread);
1faeff08
MR
5297 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5298 {
5299 errno = 0;
5300 ptrace (PTRACE_POKEUSER, pid,
5301 /* Coerce to a uintptr_t first to avoid potential gcc warning
5302 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5303 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5304 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
5305 if (errno != 0)
5306 {
5307 /* At this point, ESRCH should mean the process is
5308 already gone, in which case we simply ignore attempts
5309 to change its registers. See also the related
df95181f 5310 comment in resume_one_lwp. */
1faeff08
MR
5311 if (errno == ESRCH)
5312 return;
5313
daca57a7
TBA
5314
5315 if (!low_cannot_store_register (regno))
6d91ce9a 5316 error ("writing register %d: %s", regno, safe_strerror (errno));
1faeff08
MR
5317 }
5318 regaddr += sizeof (PTRACE_XFER_TYPE);
5319 }
5320}
daca57a7 5321#endif /* HAVE_LINUX_USRREGS */
1faeff08 5322
b35db733
TBA
5323void
5324linux_process_target::low_collect_ptrace_register (regcache *regcache,
5325 int regno, char *buf)
5326{
5327 collect_register (regcache, regno, buf);
5328}
5329
5330void
5331linux_process_target::low_supply_ptrace_register (regcache *regcache,
5332 int regno, const char *buf)
5333{
5334 supply_register (regcache, regno, buf);
5335}
5336
daca57a7
TBA
5337void
5338linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5339 regcache *regcache,
5340 int regno, int all)
1faeff08 5341{
daca57a7 5342#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5343 struct usrregs_info *usr = regs_info->usrregs;
5344
1faeff08
MR
5345 if (regno == -1)
5346 {
3aee8918
PA
5347 for (regno = 0; regno < usr->num_regs; regno++)
5348 if (all || !linux_register_in_regsets (regs_info, regno))
5349 fetch_register (usr, regcache, regno);
1faeff08
MR
5350 }
5351 else
3aee8918 5352 fetch_register (usr, regcache, regno);
daca57a7 5353#endif
1faeff08
MR
5354}
5355
daca57a7
TBA
5356void
5357linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5358 regcache *regcache,
5359 int regno, int all)
1faeff08 5360{
daca57a7 5361#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5362 struct usrregs_info *usr = regs_info->usrregs;
5363
1faeff08
MR
5364 if (regno == -1)
5365 {
3aee8918
PA
5366 for (regno = 0; regno < usr->num_regs; regno++)
5367 if (all || !linux_register_in_regsets (regs_info, regno))
5368 store_register (usr, regcache, regno);
1faeff08
MR
5369 }
5370 else
3aee8918 5371 store_register (usr, regcache, regno);
58caa3dc 5372#endif
daca57a7 5373}
1faeff08 5374
a5a4d4cd
TBA
5375void
5376linux_process_target::fetch_registers (regcache *regcache, int regno)
1faeff08
MR
5377{
5378 int use_regsets;
5379 int all = 0;
aa8d21c9 5380 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5381
5382 if (regno == -1)
5383 {
bd70b1f2 5384 if (regs_info->usrregs != NULL)
3aee8918 5385 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
bd70b1f2 5386 low_fetch_register (regcache, regno);
c14dfd32 5387
3aee8918
PA
5388 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5389 if (regs_info->usrregs != NULL)
5390 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
5391 }
5392 else
5393 {
bd70b1f2 5394 if (low_fetch_register (regcache, regno))
c14dfd32
PA
5395 return;
5396
3aee8918 5397 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5398 if (use_regsets)
3aee8918
PA
5399 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5400 regcache);
5401 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5402 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5403 }
58caa3dc
DJ
5404}
5405
a5a4d4cd
TBA
5406void
5407linux_process_target::store_registers (regcache *regcache, int regno)
58caa3dc 5408{
1faeff08
MR
5409 int use_regsets;
5410 int all = 0;
aa8d21c9 5411 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5412
5413 if (regno == -1)
5414 {
3aee8918
PA
5415 all = regsets_store_inferior_registers (regs_info->regsets_info,
5416 regcache);
5417 if (regs_info->usrregs != NULL)
5418 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
5419 }
5420 else
5421 {
3aee8918 5422 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5423 if (use_regsets)
3aee8918
PA
5424 all = regsets_store_inferior_registers (regs_info->regsets_info,
5425 regcache);
5426 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5427 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5428 }
58caa3dc
DJ
5429}
5430
bd70b1f2
TBA
5431bool
5432linux_process_target::low_fetch_register (regcache *regcache, int regno)
5433{
5434 return false;
5435}
da6d8c04 5436
e2558df3 5437/* A wrapper for the read_memory target op. */
da6d8c04 5438
c3e735a6 5439static int
f450004a 5440linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
e2558df3 5441{
52405d85 5442 return the_target->read_memory (memaddr, myaddr, len);
e2558df3
TBA
5443}
5444
e2558df3 5445
421490af
PA
5446/* Helper for read_memory/write_memory using /proc/PID/mem. Because
5447 we can use a single read/write call, this can be much more
5448 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
5449 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running threads.
5450 One an only one of READBUF and WRITEBUF is non-null. If READBUF is
5451 not null, then we're reading, otherwise we're writing. */
5452
5453static int
5454proc_xfer_memory (CORE_ADDR memaddr, unsigned char *readbuf,
5455 const gdb_byte *writebuf, int len)
da6d8c04 5456{
421490af 5457 gdb_assert ((readbuf == nullptr) != (writebuf == nullptr));
fd462a61 5458
421490af
PA
5459 process_info *proc = current_process ();
5460
5461 int fd = proc->priv->mem_fd;
5462 if (fd == -1)
5463 return EIO;
5464
5465 while (len > 0)
fd462a61 5466 {
4934b29e
MR
5467 int bytes;
5468
31a56a22
PA
5469 /* Use pread64/pwrite64 if available, since they save a syscall
5470 and can handle 64-bit offsets even on 32-bit platforms (for
5471 instance, SPARC debugging a SPARC64 application). But only
5472 use them if the offset isn't so high that when cast to off_t
5473 it'd be negative, as seen on SPARC64. pread64/pwrite64
5474 outright reject such offsets. lseek does not. */
fd462a61 5475#ifdef HAVE_PREAD64
31a56a22 5476 if ((off_t) memaddr >= 0)
421490af 5477 bytes = (readbuf != nullptr
31a56a22
PA
5478 ? pread64 (fd, readbuf, len, memaddr)
5479 : pwrite64 (fd, writebuf, len, memaddr));
5480 else
fd462a61 5481#endif
31a56a22
PA
5482 {
5483 bytes = -1;
5484 if (lseek (fd, memaddr, SEEK_SET) != -1)
5485 bytes = (readbuf != nullptr
5486 ? read (fd, readbuf, len)
5487 : write (fd, writebuf, len));
5488 }
fd462a61 5489
421490af
PA
5490 if (bytes < 0)
5491 return errno;
5492 else if (bytes == 0)
4934b29e 5493 {
421490af
PA
5494 /* EOF means the address space is gone, the whole process
5495 exited or execed. */
5496 return EIO;
4934b29e 5497 }
da6d8c04 5498
421490af
PA
5499 memaddr += bytes;
5500 if (readbuf != nullptr)
5501 readbuf += bytes;
5502 else
5503 writebuf += bytes;
5504 len -= bytes;
da6d8c04
DJ
5505 }
5506
421490af
PA
5507 return 0;
5508}
c3e735a6 5509
421490af
PA
5510int
5511linux_process_target::read_memory (CORE_ADDR memaddr,
5512 unsigned char *myaddr, int len)
5513{
5514 return proc_xfer_memory (memaddr, myaddr, nullptr, len);
da6d8c04
DJ
5515}
5516
93ae6fdc
PA
5517/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5518 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 5519 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 5520
e2558df3
TBA
5521int
5522linux_process_target::write_memory (CORE_ADDR memaddr,
5523 const unsigned char *myaddr, int len)
da6d8c04 5524{
0d62e5e8
DJ
5525 if (debug_threads)
5526 {
58d6951d 5527 /* Dump up to four bytes. */
bf47e248
PA
5528 char str[4 * 2 + 1];
5529 char *p = str;
5530 int dump = len < 4 ? len : 4;
5531
421490af 5532 for (int i = 0; i < dump; i++)
bf47e248
PA
5533 {
5534 sprintf (p, "%02x", myaddr[i]);
5535 p += 2;
5536 }
5537 *p = '\0';
5538
c058728c 5539 threads_debug_printf ("Writing %s to 0x%08lx in process %d",
421490af 5540 str, (long) memaddr, current_process ()->pid);
0d62e5e8
DJ
5541 }
5542
421490af 5543 return proc_xfer_memory (memaddr, nullptr, myaddr, len);
da6d8c04 5544}
2f2893d9 5545
2a31c7aa
TBA
5546void
5547linux_process_target::look_up_symbols ()
2f2893d9 5548{
0d62e5e8 5549#ifdef USE_THREAD_DB
95954743
PA
5550 struct process_info *proc = current_process ();
5551
fe978cb0 5552 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5553 return;
5554
9b4c5f87 5555 thread_db_init ();
0d62e5e8
DJ
5556#endif
5557}
5558
eb497a2a
TBA
5559void
5560linux_process_target::request_interrupt ()
e5379b03 5561{
78708b7c
PA
5562 /* Send a SIGINT to the process group. This acts just like the user
5563 typed a ^C on the controlling terminal. */
4c35c4c6
TV
5564 int res = ::kill (-signal_pid, SIGINT);
5565 if (res == -1)
5566 warning (_("Sending SIGINT to process group of pid %ld failed: %s"),
5567 signal_pid, safe_strerror (errno));
e5379b03
DJ
5568}
5569
eac215cc
TBA
5570bool
5571linux_process_target::supports_read_auxv ()
5572{
5573 return true;
5574}
5575
aa691b87
RM
5576/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5577 to debugger memory starting at MYADDR. */
5578
eac215cc 5579int
43e5fbd8
TJB
5580linux_process_target::read_auxv (int pid, CORE_ADDR offset,
5581 unsigned char *myaddr, unsigned int len)
aa691b87
RM
5582{
5583 char filename[PATH_MAX];
5584 int fd, n;
5585
6cebaf6e 5586 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5587
5588 fd = open (filename, O_RDONLY);
5589 if (fd < 0)
5590 return -1;
5591
5592 if (offset != (CORE_ADDR) 0
5593 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5594 n = -1;
5595 else
5596 n = read (fd, myaddr, len);
5597
5598 close (fd);
5599
5600 return n;
5601}
5602
7e0bde70
TBA
5603int
5604linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5605 int size, raw_breakpoint *bp)
e013ee27 5606{
c8f4bfdd
YQ
5607 if (type == raw_bkpt_type_sw)
5608 return insert_memory_breakpoint (bp);
e013ee27 5609 else
9db9aa23
TBA
5610 return low_insert_point (type, addr, size, bp);
5611}
5612
5613int
5614linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5615 int size, raw_breakpoint *bp)
5616{
5617 /* Unsupported (see target.h). */
5618 return 1;
e013ee27
OF
5619}
5620
7e0bde70
TBA
5621int
5622linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5623 int size, raw_breakpoint *bp)
e013ee27 5624{
c8f4bfdd
YQ
5625 if (type == raw_bkpt_type_sw)
5626 return remove_memory_breakpoint (bp);
e013ee27 5627 else
9db9aa23
TBA
5628 return low_remove_point (type, addr, size, bp);
5629}
5630
5631int
5632linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5633 int size, raw_breakpoint *bp)
5634{
5635 /* Unsupported (see target.h). */
5636 return 1;
e013ee27
OF
5637}
5638
84320c4e 5639/* Implement the stopped_by_sw_breakpoint target_ops
3e572f71
PA
5640 method. */
5641
84320c4e
TBA
5642bool
5643linux_process_target::stopped_by_sw_breakpoint ()
3e572f71
PA
5644{
5645 struct lwp_info *lwp = get_thread_lwp (current_thread);
5646
5647 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5648}
5649
84320c4e 5650/* Implement the supports_stopped_by_sw_breakpoint target_ops
3e572f71
PA
5651 method. */
5652
84320c4e
TBA
5653bool
5654linux_process_target::supports_stopped_by_sw_breakpoint ()
3e572f71
PA
5655{
5656 return USE_SIGTRAP_SIGINFO;
5657}
5658
93fe88b2 5659/* Implement the stopped_by_hw_breakpoint target_ops
3e572f71
PA
5660 method. */
5661
93fe88b2
TBA
5662bool
5663linux_process_target::stopped_by_hw_breakpoint ()
3e572f71
PA
5664{
5665 struct lwp_info *lwp = get_thread_lwp (current_thread);
5666
5667 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5668}
5669
93fe88b2 5670/* Implement the supports_stopped_by_hw_breakpoint target_ops
3e572f71
PA
5671 method. */
5672
93fe88b2
TBA
5673bool
5674linux_process_target::supports_stopped_by_hw_breakpoint ()
3e572f71
PA
5675{
5676 return USE_SIGTRAP_SIGINFO;
5677}
5678
70b90b91 5679/* Implement the supports_hardware_single_step target_ops method. */
45614f15 5680
22aa6223
TBA
5681bool
5682linux_process_target::supports_hardware_single_step ()
45614f15 5683{
b31cdfa6 5684 return true;
45614f15
YQ
5685}
5686
6eeb5c55
TBA
5687bool
5688linux_process_target::stopped_by_watchpoint ()
e013ee27 5689{
0bfdf32f 5690 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 5691
15c66dd6 5692 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
5693}
5694
6eeb5c55
TBA
5695CORE_ADDR
5696linux_process_target::stopped_data_address ()
e013ee27 5697{
0bfdf32f 5698 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
5699
5700 return lwp->stopped_data_address;
e013ee27
OF
5701}
5702
db0dfaa0
LM
5703/* This is only used for targets that define PT_TEXT_ADDR,
5704 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5705 the target has different ways of acquiring this information, like
5706 loadmaps. */
52fb6437 5707
5203ae1e
TBA
5708bool
5709linux_process_target::supports_read_offsets ()
5710{
5711#ifdef SUPPORTS_READ_OFFSETS
5712 return true;
5713#else
5714 return false;
5715#endif
5716}
5717
52fb6437
NS
5718/* Under uClinux, programs are loaded at non-zero offsets, which we need
5719 to tell gdb about. */
5720
5203ae1e
TBA
5721int
5722linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
52fb6437 5723{
5203ae1e 5724#ifdef SUPPORTS_READ_OFFSETS
52fb6437 5725 unsigned long text, text_end, data;
62828379 5726 int pid = lwpid_of (current_thread);
52fb6437
NS
5727
5728 errno = 0;
5729
b8e1b30e
LM
5730 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5731 (PTRACE_TYPE_ARG4) 0);
5732 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5733 (PTRACE_TYPE_ARG4) 0);
5734 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5735 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
5736
5737 if (errno == 0)
5738 {
5739 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
5740 used by gdb) are relative to the beginning of the program,
5741 with the data segment immediately following the text segment.
5742 However, the actual runtime layout in memory may put the data
5743 somewhere else, so when we send gdb a data base-address, we
5744 use the real data base address and subtract the compile-time
5745 data base-address from it (which is just the length of the
5746 text segment). BSS immediately follows data in both
5747 cases. */
52fb6437
NS
5748 *text_p = text;
5749 *data_p = data - (text_end - text);
1b3f6016 5750
52fb6437
NS
5751 return 1;
5752 }
5203ae1e
TBA
5753 return 0;
5754#else
5755 gdb_assert_not_reached ("target op read_offsets not supported");
52fb6437 5756#endif
5203ae1e 5757}
52fb6437 5758
6e3fd7e9
TBA
5759bool
5760linux_process_target::supports_get_tls_address ()
5761{
5762#ifdef USE_THREAD_DB
5763 return true;
5764#else
5765 return false;
5766#endif
5767}
5768
5769int
5770linux_process_target::get_tls_address (thread_info *thread,
5771 CORE_ADDR offset,
5772 CORE_ADDR load_module,
5773 CORE_ADDR *address)
5774{
5775#ifdef USE_THREAD_DB
5776 return thread_db_get_tls_address (thread, offset, load_module, address);
5777#else
5778 return -1;
5779#endif
5780}
5781
2d0795ee
TBA
5782bool
5783linux_process_target::supports_qxfer_osdata ()
5784{
5785 return true;
5786}
5787
5788int
5789linux_process_target::qxfer_osdata (const char *annex,
5790 unsigned char *readbuf,
5791 unsigned const char *writebuf,
5792 CORE_ADDR offset, int len)
07e059b5 5793{
d26e3629 5794 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
5795}
5796
cb63de7c
TBA
5797void
5798linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5799 gdb_byte *inf_siginfo, int direction)
d0722149 5800{
cb63de7c 5801 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
d0722149
DE
5802
5803 /* If there was no callback, or the callback didn't do anything,
5804 then just do a straight memcpy. */
5805 if (!done)
5806 {
5807 if (direction == 1)
a5362b9a 5808 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 5809 else
a5362b9a 5810 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
5811 }
5812}
5813
cb63de7c
TBA
5814bool
5815linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5816 int direction)
5817{
5818 return false;
5819}
5820
d7abedf7
TBA
5821bool
5822linux_process_target::supports_qxfer_siginfo ()
5823{
5824 return true;
5825}
5826
5827int
5828linux_process_target::qxfer_siginfo (const char *annex,
5829 unsigned char *readbuf,
5830 unsigned const char *writebuf,
5831 CORE_ADDR offset, int len)
4aa995e1 5832{
d0722149 5833 int pid;
a5362b9a 5834 siginfo_t siginfo;
8adce034 5835 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1 5836
0bfdf32f 5837 if (current_thread == NULL)
4aa995e1
PA
5838 return -1;
5839
0bfdf32f 5840 pid = lwpid_of (current_thread);
4aa995e1 5841
c058728c
SM
5842 threads_debug_printf ("%s siginfo for lwp %d.",
5843 readbuf != NULL ? "Reading" : "Writing",
5844 pid);
4aa995e1 5845
0adea5f7 5846 if (offset >= sizeof (siginfo))
4aa995e1
PA
5847 return -1;
5848
b8e1b30e 5849 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5850 return -1;
5851
d0722149
DE
5852 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5853 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5854 inferior with a 64-bit GDBSERVER should look the same as debugging it
5855 with a 32-bit GDBSERVER, we need to convert it. */
5856 siginfo_fixup (&siginfo, inf_siginfo, 0);
5857
4aa995e1
PA
5858 if (offset + len > sizeof (siginfo))
5859 len = sizeof (siginfo) - offset;
5860
5861 if (readbuf != NULL)
d0722149 5862 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
5863 else
5864 {
d0722149
DE
5865 memcpy (inf_siginfo + offset, writebuf, len);
5866
5867 /* Convert back to ptrace layout before flushing it out. */
5868 siginfo_fixup (&siginfo, inf_siginfo, 1);
5869
b8e1b30e 5870 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5871 return -1;
5872 }
5873
5874 return len;
5875}
5876
bd99dc85
PA
5877/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5878 so we notice when children change state; as the handler for the
5879 sigsuspend in my_waitpid. */
5880
5881static void
5882sigchld_handler (int signo)
5883{
5884 int old_errno = errno;
5885
5886 if (debug_threads)
e581f2b4
PA
5887 {
5888 do
5889 {
a7e559cc
AH
5890 /* Use the async signal safe debug function. */
5891 if (debug_write ("sigchld_handler\n",
5892 sizeof ("sigchld_handler\n") - 1) < 0)
e581f2b4
PA
5893 break; /* just ignore */
5894 } while (0);
5895 }
bd99dc85
PA
5896
5897 if (target_is_async_p ())
5898 async_file_mark (); /* trigger a linux_wait */
5899
5900 errno = old_errno;
5901}
5902
0dc587d4
TBA
5903bool
5904linux_process_target::supports_non_stop ()
bd99dc85 5905{
0dc587d4 5906 return true;
bd99dc85
PA
5907}
5908
0dc587d4
TBA
5909bool
5910linux_process_target::async (bool enable)
bd99dc85 5911{
0dc587d4 5912 bool previous = target_is_async_p ();
bd99dc85 5913
c058728c
SM
5914 threads_debug_printf ("async (%d), previous=%d",
5915 enable, previous);
8336d594 5916
bd99dc85
PA
5917 if (previous != enable)
5918 {
5919 sigset_t mask;
5920 sigemptyset (&mask);
5921 sigaddset (&mask, SIGCHLD);
5922
21987b9c 5923 gdb_sigmask (SIG_BLOCK, &mask, NULL);
bd99dc85
PA
5924
5925 if (enable)
5926 {
8674f082 5927 if (!linux_event_pipe.open_pipe ())
aa96c426 5928 {
21987b9c 5929 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
aa96c426
GB
5930
5931 warning ("creating event pipe failed.");
5932 return previous;
5933 }
bd99dc85 5934
bd99dc85 5935 /* Register the event loop handler. */
cdc8e9b2 5936 add_file_handler (linux_event_pipe.event_fd (),
2554f6f5
SM
5937 handle_target_event, NULL,
5938 "linux-low");
bd99dc85
PA
5939
5940 /* Always trigger a linux_wait. */
5941 async_file_mark ();
5942 }
5943 else
5944 {
cdc8e9b2 5945 delete_file_handler (linux_event_pipe.event_fd ());
bd99dc85 5946
8674f082 5947 linux_event_pipe.close_pipe ();
bd99dc85
PA
5948 }
5949
21987b9c 5950 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
bd99dc85
PA
5951 }
5952
5953 return previous;
5954}
5955
0dc587d4
TBA
5956int
5957linux_process_target::start_non_stop (bool nonstop)
bd99dc85
PA
5958{
5959 /* Register or unregister from event-loop accordingly. */
0dc587d4 5960 target_async (nonstop);
aa96c426 5961
0dc587d4 5962 if (target_is_async_p () != (nonstop != false))
aa96c426
GB
5963 return -1;
5964
bd99dc85
PA
5965 return 0;
5966}
5967
652aef77
TBA
5968bool
5969linux_process_target::supports_multi_process ()
cf8fd78b 5970{
652aef77 5971 return true;
cf8fd78b
PA
5972}
5973
89245bc0
DB
5974/* Check if fork events are supported. */
5975
9690a72a
TBA
5976bool
5977linux_process_target::supports_fork_events ()
89245bc0 5978{
a2885186 5979 return true;
89245bc0
DB
5980}
5981
5982/* Check if vfork events are supported. */
5983
9690a72a
TBA
5984bool
5985linux_process_target::supports_vfork_events ()
89245bc0 5986{
a2885186 5987 return true;
89245bc0
DB
5988}
5989
393a6b59
PA
5990/* Return the set of supported thread options. */
5991
5992gdb_thread_options
5993linux_process_target::supported_thread_options ()
5994{
48989498 5995 return GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
393a6b59
PA
5996}
5997
94585166
DB
5998/* Check if exec events are supported. */
5999
9690a72a
TBA
6000bool
6001linux_process_target::supports_exec_events ()
94585166 6002{
a2885186 6003 return true;
94585166
DB
6004}
6005
de0d863e
DB
6006/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6007 ptrace flags for all inferiors. This is in case the new GDB connection
6008 doesn't support the same set of events that the previous one did. */
6009
fb00dfce
TBA
6010void
6011linux_process_target::handle_new_gdb_connection ()
de0d863e 6012{
de0d863e 6013 /* Request that all the lwps reset their ptrace options. */
bbf550d5
SM
6014 for_each_thread ([] (thread_info *thread)
6015 {
6016 struct lwp_info *lwp = get_thread_lwp (thread);
6017
6018 if (!lwp->stopped)
6019 {
6020 /* Stop the lwp so we can modify its ptrace options. */
6021 lwp->must_set_ptrace_flags = 1;
6022 linux_stop_lwp (lwp);
6023 }
6024 else
6025 {
6026 /* Already stopped; go ahead and set the ptrace options. */
6027 struct process_info *proc = find_process_pid (pid_of (thread));
6028 int options = linux_low_ptrace_options (proc->attached);
6029
6030 linux_enable_event_reporting (lwpid_of (thread), options);
6031 lwp->must_set_ptrace_flags = 0;
6032 }
6033 });
de0d863e
DB
6034}
6035
55cf3021
TBA
6036int
6037linux_process_target::handle_monitor_command (char *mon)
6038{
6039#ifdef USE_THREAD_DB
6040 return thread_db_handle_monitor_command (mon);
6041#else
6042 return 0;
6043#endif
6044}
6045
95a45fc1
TBA
6046int
6047linux_process_target::core_of_thread (ptid_t ptid)
6048{
6049 return linux_common_core_of_thread (ptid);
6050}
6051
c756403b
TBA
6052bool
6053linux_process_target::supports_disable_randomization ()
03583c20 6054{
c756403b 6055 return true;
03583c20 6056}
efcbbd14 6057
c0245cb9
TBA
6058bool
6059linux_process_target::supports_agent ()
d1feda86 6060{
c0245cb9 6061 return true;
d1feda86
YQ
6062}
6063
2526e0cd
TBA
6064bool
6065linux_process_target::supports_range_stepping ()
c2d6af84 6066{
7582c77c 6067 if (supports_software_single_step ())
2526e0cd 6068 return true;
c2d6af84 6069
9cfd8715
TBA
6070 return low_supports_range_stepping ();
6071}
6072
6073bool
6074linux_process_target::low_supports_range_stepping ()
6075{
6076 return false;
c2d6af84
PA
6077}
6078
8247b823
TBA
6079bool
6080linux_process_target::supports_pid_to_exec_file ()
6081{
6082 return true;
6083}
6084
04977957 6085const char *
8247b823
TBA
6086linux_process_target::pid_to_exec_file (int pid)
6087{
6088 return linux_proc_pid_to_exec_file (pid);
6089}
6090
c9b7b804
TBA
6091bool
6092linux_process_target::supports_multifs ()
6093{
6094 return true;
6095}
6096
6097int
6098linux_process_target::multifs_open (int pid, const char *filename,
6099 int flags, mode_t mode)
6100{
6101 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6102}
6103
6104int
6105linux_process_target::multifs_unlink (int pid, const char *filename)
6106{
6107 return linux_mntns_unlink (pid, filename);
6108}
6109
6110ssize_t
6111linux_process_target::multifs_readlink (int pid, const char *filename,
6112 char *buf, size_t bufsiz)
6113{
6114 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6115}
6116
723b724b 6117#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
6118struct target_loadseg
6119{
6120 /* Core address to which the segment is mapped. */
6121 Elf32_Addr addr;
6122 /* VMA recorded in the program header. */
6123 Elf32_Addr p_vaddr;
6124 /* Size of this segment in memory. */
6125 Elf32_Word p_memsz;
6126};
6127
723b724b 6128# if defined PT_GETDSBT
78d85199
YQ
6129struct target_loadmap
6130{
6131 /* Protocol version number, must be zero. */
6132 Elf32_Word version;
6133 /* Pointer to the DSBT table, its size, and the DSBT index. */
6134 unsigned *dsbt_table;
6135 unsigned dsbt_size, dsbt_index;
6136 /* Number of segments in this map. */
6137 Elf32_Word nsegs;
6138 /* The actual memory map. */
6139 struct target_loadseg segs[/*nsegs*/];
6140};
723b724b
MF
6141# define LINUX_LOADMAP PT_GETDSBT
6142# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6143# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6144# else
6145struct target_loadmap
6146{
6147 /* Protocol version number, must be zero. */
6148 Elf32_Half version;
6149 /* Number of segments in this map. */
6150 Elf32_Half nsegs;
6151 /* The actual memory map. */
6152 struct target_loadseg segs[/*nsegs*/];
6153};
6154# define LINUX_LOADMAP PTRACE_GETFDPIC
6155# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6156# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6157# endif
78d85199 6158
9da41fda
TBA
6159bool
6160linux_process_target::supports_read_loadmap ()
6161{
6162 return true;
6163}
6164
6165int
6166linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6167 unsigned char *myaddr, unsigned int len)
78d85199 6168{
0bfdf32f 6169 int pid = lwpid_of (current_thread);
78d85199
YQ
6170 int addr = -1;
6171 struct target_loadmap *data = NULL;
6172 unsigned int actual_length, copy_length;
6173
6174 if (strcmp (annex, "exec") == 0)
723b724b 6175 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 6176 else if (strcmp (annex, "interp") == 0)
723b724b 6177 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
6178 else
6179 return -1;
6180
723b724b 6181 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
6182 return -1;
6183
6184 if (data == NULL)
6185 return -1;
6186
6187 actual_length = sizeof (struct target_loadmap)
6188 + sizeof (struct target_loadseg) * data->nsegs;
6189
6190 if (offset < 0 || offset > actual_length)
6191 return -1;
6192
6193 copy_length = actual_length - offset < len ? actual_length - offset : len;
6194 memcpy (myaddr, (char *) data + offset, copy_length);
6195 return copy_length;
6196}
723b724b 6197#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 6198
bc8d3ae4
TBA
6199bool
6200linux_process_target::supports_catch_syscall ()
82075af2 6201{
a2885186 6202 return low_supports_catch_syscall ();
82075af2
JS
6203}
6204
9eedd27d
TBA
6205bool
6206linux_process_target::low_supports_catch_syscall ()
6207{
6208 return false;
6209}
6210
770d8f6a
TBA
6211CORE_ADDR
6212linux_process_target::read_pc (regcache *regcache)
219f2f23 6213{
bf9ae9d8 6214 if (!low_supports_breakpoints ())
219f2f23
PA
6215 return 0;
6216
bf9ae9d8 6217 return low_get_pc (regcache);
219f2f23
PA
6218}
6219
770d8f6a
TBA
6220void
6221linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
219f2f23 6222{
bf9ae9d8 6223 gdb_assert (low_supports_breakpoints ());
219f2f23 6224
bf9ae9d8 6225 low_set_pc (regcache, pc);
219f2f23
PA
6226}
6227
68119632
TBA
6228bool
6229linux_process_target::supports_thread_stopped ()
6230{
6231 return true;
6232}
6233
6234bool
6235linux_process_target::thread_stopped (thread_info *thread)
8336d594
PA
6236{
6237 return get_thread_lwp (thread)->stopped;
6238}
6239
ef980d65
PA
6240bool
6241linux_process_target::any_resumed ()
6242{
6243 bool any_resumed;
6244
6245 auto status_pending_p_any = [&] (thread_info *thread)
6246 {
6247 return status_pending_p_callback (thread, minus_one_ptid);
6248 };
6249
6250 auto not_stopped = [&] (thread_info *thread)
6251 {
6252 return not_stopped_callback (thread, minus_one_ptid);
6253 };
6254
6255 /* Find a resumed LWP, if any. */
6256 if (find_thread (status_pending_p_any) != NULL)
6257 any_resumed = 1;
6258 else if (find_thread (not_stopped) != NULL)
6259 any_resumed = 1;
6260 else
6261 any_resumed = 0;
6262
6263 return any_resumed;
6264}
6265
8336d594
PA
6266/* This exposes stop-all-threads functionality to other modules. */
6267
29e8dc09
TBA
6268void
6269linux_process_target::pause_all (bool freeze)
8336d594 6270{
7984d532
PA
6271 stop_all_lwps (freeze, NULL);
6272}
6273
6274/* This exposes unstop-all-threads functionality to other gdbserver
6275 modules. */
6276
29e8dc09
TBA
6277void
6278linux_process_target::unpause_all (bool unfreeze)
7984d532
PA
6279{
6280 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
6281}
6282
2268b414
JK
6283/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6284
6285static int
6286get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6287 CORE_ADDR *phdr_memaddr, int *num_phdr)
6288{
6289 char filename[PATH_MAX];
6290 int fd;
6291 const int auxv_size = is_elf64
6292 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6293 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6294
6295 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6296
6297 fd = open (filename, O_RDONLY);
6298 if (fd < 0)
6299 return 1;
6300
6301 *phdr_memaddr = 0;
6302 *num_phdr = 0;
6303 while (read (fd, buf, auxv_size) == auxv_size
6304 && (*phdr_memaddr == 0 || *num_phdr == 0))
6305 {
6306 if (is_elf64)
6307 {
6308 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6309
6310 switch (aux->a_type)
6311 {
6312 case AT_PHDR:
6313 *phdr_memaddr = aux->a_un.a_val;
6314 break;
6315 case AT_PHNUM:
6316 *num_phdr = aux->a_un.a_val;
6317 break;
6318 }
6319 }
6320 else
6321 {
6322 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6323
6324 switch (aux->a_type)
6325 {
6326 case AT_PHDR:
6327 *phdr_memaddr = aux->a_un.a_val;
6328 break;
6329 case AT_PHNUM:
6330 *num_phdr = aux->a_un.a_val;
6331 break;
6332 }
6333 }
6334 }
6335
6336 close (fd);
6337
6338 if (*phdr_memaddr == 0 || *num_phdr == 0)
6339 {
6340 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6341 "phdr_memaddr = %ld, phdr_num = %d",
6342 (long) *phdr_memaddr, *num_phdr);
6343 return 2;
6344 }
6345
6346 return 0;
6347}
6348
6349/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6350
6351static CORE_ADDR
6352get_dynamic (const int pid, const int is_elf64)
6353{
6354 CORE_ADDR phdr_memaddr, relocation;
db1ff28b 6355 int num_phdr, i;
2268b414 6356 unsigned char *phdr_buf;
db1ff28b 6357 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
2268b414
JK
6358
6359 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6360 return 0;
6361
6362 gdb_assert (num_phdr < 100); /* Basic sanity check. */
224c3ddb 6363 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
2268b414
JK
6364
6365 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6366 return 0;
6367
6368 /* Compute relocation: it is expected to be 0 for "regular" executables,
6369 non-zero for PIE ones. */
6370 relocation = -1;
db1ff28b
JK
6371 for (i = 0; relocation == -1 && i < num_phdr; i++)
6372 if (is_elf64)
6373 {
6374 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6375
6376 if (p->p_type == PT_PHDR)
6377 relocation = phdr_memaddr - p->p_vaddr;
6378 }
6379 else
6380 {
6381 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6382
6383 if (p->p_type == PT_PHDR)
6384 relocation = phdr_memaddr - p->p_vaddr;
6385 }
6386
2268b414
JK
6387 if (relocation == -1)
6388 {
e237a7e2
JK
6389 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6390 any real world executables, including PIE executables, have always
6391 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6392 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6393 or present DT_DEBUG anyway (fpc binaries are statically linked).
6394
6395 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6396
6397 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6398
2268b414
JK
6399 return 0;
6400 }
6401
db1ff28b
JK
6402 for (i = 0; i < num_phdr; i++)
6403 {
6404 if (is_elf64)
6405 {
6406 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6407
6408 if (p->p_type == PT_DYNAMIC)
6409 return p->p_vaddr + relocation;
6410 }
6411 else
6412 {
6413 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
2268b414 6414
db1ff28b
JK
6415 if (p->p_type == PT_DYNAMIC)
6416 return p->p_vaddr + relocation;
6417 }
6418 }
2268b414
JK
6419
6420 return 0;
6421}
6422
6423/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
6424 can be 0 if the inferior does not yet have the library list initialized.
6425 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6426 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
6427
6428static CORE_ADDR
6429get_r_debug (const int pid, const int is_elf64)
6430{
6431 CORE_ADDR dynamic_memaddr;
6432 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6433 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 6434 CORE_ADDR map = -1;
2268b414
JK
6435
6436 dynamic_memaddr = get_dynamic (pid, is_elf64);
6437 if (dynamic_memaddr == 0)
367ba2c2 6438 return map;
2268b414
JK
6439
6440 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6441 {
6442 if (is_elf64)
6443 {
6444 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
a738da3a 6445#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6446 union
6447 {
6448 Elf64_Xword map;
6449 unsigned char buf[sizeof (Elf64_Xword)];
6450 }
6451 rld_map;
a738da3a
MF
6452#endif
6453#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6454 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6455 {
6456 if (linux_read_memory (dyn->d_un.d_val,
6457 rld_map.buf, sizeof (rld_map.buf)) == 0)
6458 return rld_map.map;
6459 else
6460 break;
6461 }
75f62ce7 6462#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6463#ifdef DT_MIPS_RLD_MAP_REL
6464 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6465 {
6466 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6467 rld_map.buf, sizeof (rld_map.buf)) == 0)
6468 return rld_map.map;
6469 else
6470 break;
6471 }
6472#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6473
367ba2c2
MR
6474 if (dyn->d_tag == DT_DEBUG && map == -1)
6475 map = dyn->d_un.d_val;
2268b414
JK
6476
6477 if (dyn->d_tag == DT_NULL)
6478 break;
6479 }
6480 else
6481 {
6482 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
a738da3a 6483#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6484 union
6485 {
6486 Elf32_Word map;
6487 unsigned char buf[sizeof (Elf32_Word)];
6488 }
6489 rld_map;
a738da3a
MF
6490#endif
6491#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6492 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6493 {
6494 if (linux_read_memory (dyn->d_un.d_val,
6495 rld_map.buf, sizeof (rld_map.buf)) == 0)
6496 return rld_map.map;
6497 else
6498 break;
6499 }
75f62ce7 6500#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6501#ifdef DT_MIPS_RLD_MAP_REL
6502 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6503 {
6504 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6505 rld_map.buf, sizeof (rld_map.buf)) == 0)
6506 return rld_map.map;
6507 else
6508 break;
6509 }
6510#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6511
367ba2c2
MR
6512 if (dyn->d_tag == DT_DEBUG && map == -1)
6513 map = dyn->d_un.d_val;
2268b414
JK
6514
6515 if (dyn->d_tag == DT_NULL)
6516 break;
6517 }
6518
6519 dynamic_memaddr += dyn_size;
6520 }
6521
367ba2c2 6522 return map;
2268b414
JK
6523}
6524
6525/* Read one pointer from MEMADDR in the inferior. */
6526
6527static int
6528read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6529{
485f1ee4
PA
6530 int ret;
6531
6532 /* Go through a union so this works on either big or little endian
6533 hosts, when the inferior's pointer size is smaller than the size
6534 of CORE_ADDR. It is assumed the inferior's endianness is the
6535 same of the superior's. */
6536 union
6537 {
6538 CORE_ADDR core_addr;
6539 unsigned int ui;
6540 unsigned char uc;
6541 } addr;
6542
6543 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6544 if (ret == 0)
6545 {
6546 if (ptr_size == sizeof (CORE_ADDR))
6547 *ptr = addr.core_addr;
6548 else if (ptr_size == sizeof (unsigned int))
6549 *ptr = addr.ui;
6550 else
6551 gdb_assert_not_reached ("unhandled pointer size");
6552 }
6553 return ret;
2268b414
JK
6554}
6555
974387bb
TBA
6556bool
6557linux_process_target::supports_qxfer_libraries_svr4 ()
6558{
6559 return true;
6560}
6561
2268b414
JK
6562struct link_map_offsets
6563 {
6564 /* Offset and size of r_debug.r_version. */
6565 int r_version_offset;
6566
6567 /* Offset and size of r_debug.r_map. */
6568 int r_map_offset;
6569
8d56636a
MM
6570 /* Offset of r_debug_extended.r_next. */
6571 int r_next_offset;
6572
2268b414
JK
6573 /* Offset to l_addr field in struct link_map. */
6574 int l_addr_offset;
6575
6576 /* Offset to l_name field in struct link_map. */
6577 int l_name_offset;
6578
6579 /* Offset to l_ld field in struct link_map. */
6580 int l_ld_offset;
6581
6582 /* Offset to l_next field in struct link_map. */
6583 int l_next_offset;
6584
6585 /* Offset to l_prev field in struct link_map. */
6586 int l_prev_offset;
6587 };
6588
8d56636a
MM
6589static const link_map_offsets lmo_32bit_offsets =
6590 {
6591 0, /* r_version offset. */
6592 4, /* r_debug.r_map offset. */
6593 20, /* r_debug_extended.r_next. */
6594 0, /* l_addr offset in link_map. */
6595 4, /* l_name offset in link_map. */
6596 8, /* l_ld offset in link_map. */
6597 12, /* l_next offset in link_map. */
6598 16 /* l_prev offset in link_map. */
6599 };
6600
6601static const link_map_offsets lmo_64bit_offsets =
6602 {
6603 0, /* r_version offset. */
6604 8, /* r_debug.r_map offset. */
6605 40, /* r_debug_extended.r_next. */
6606 0, /* l_addr offset in link_map. */
6607 8, /* l_name offset in link_map. */
6608 16, /* l_ld offset in link_map. */
6609 24, /* l_next offset in link_map. */
6610 32 /* l_prev offset in link_map. */
6611 };
6612
6613/* Get the loaded shared libraries from one namespace. */
6614
6615static void
2733d9d5
MM
6616read_link_map (std::string &document, CORE_ADDR lmid, CORE_ADDR lm_addr,
6617 CORE_ADDR lm_prev, int ptr_size, const link_map_offsets *lmo)
8d56636a
MM
6618{
6619 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6620
6621 while (lm_addr
6622 && read_one_ptr (lm_addr + lmo->l_name_offset,
6623 &l_name, ptr_size) == 0
6624 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6625 &l_addr, ptr_size) == 0
6626 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6627 &l_ld, ptr_size) == 0
6628 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6629 &l_prev, ptr_size) == 0
6630 && read_one_ptr (lm_addr + lmo->l_next_offset,
6631 &l_next, ptr_size) == 0)
6632 {
6633 unsigned char libname[PATH_MAX];
6634
6635 if (lm_prev != l_prev)
6636 {
6637 warning ("Corrupted shared library list: 0x%s != 0x%s",
6638 paddress (lm_prev), paddress (l_prev));
6639 break;
6640 }
6641
ad10f44e
MM
6642 /* Not checking for error because reading may stop before we've got
6643 PATH_MAX worth of characters. */
6644 libname[0] = '\0';
6645 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6646 libname[sizeof (libname) - 1] = '\0';
6647 if (libname[0] != '\0')
8d56636a 6648 {
ad10f44e 6649 string_appendf (document, "<library name=\"");
de75275f 6650 xml_escape_text_append (document, (char *) libname);
ad10f44e 6651 string_appendf (document, "\" lm=\"0x%s\" l_addr=\"0x%s\" "
2733d9d5 6652 "l_ld=\"0x%s\" lmid=\"0x%s\"/>",
ad10f44e 6653 paddress (lm_addr), paddress (l_addr),
2733d9d5 6654 paddress (l_ld), paddress (lmid));
8d56636a
MM
6655 }
6656
6657 lm_prev = lm_addr;
6658 lm_addr = l_next;
6659 }
6660}
6661
fb723180 6662/* Construct qXfer:libraries-svr4:read reply. */
2268b414 6663
974387bb
TBA
6664int
6665linux_process_target::qxfer_libraries_svr4 (const char *annex,
6666 unsigned char *readbuf,
6667 unsigned const char *writebuf,
6668 CORE_ADDR offset, int len)
2268b414 6669{
fe978cb0 6670 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
6671 char filename[PATH_MAX];
6672 int pid, is_elf64;
214d508e 6673 unsigned int machine;
2733d9d5 6674 CORE_ADDR lmid = 0, lm_addr = 0, lm_prev = 0;
2268b414
JK
6675
6676 if (writebuf != NULL)
6677 return -2;
6678 if (readbuf == NULL)
6679 return -1;
6680
0bfdf32f 6681 pid = lwpid_of (current_thread);
2268b414 6682 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 6683 is_elf64 = elf_64_file_p (filename, &machine);
8d56636a
MM
6684 const link_map_offsets *lmo;
6685 int ptr_size;
6686 if (is_elf64)
6687 {
6688 lmo = &lmo_64bit_offsets;
6689 ptr_size = 8;
6690 }
6691 else
6692 {
6693 lmo = &lmo_32bit_offsets;
6694 ptr_size = 4;
6695 }
2268b414 6696
b1fbec62
GB
6697 while (annex[0] != '\0')
6698 {
6699 const char *sep;
6700 CORE_ADDR *addrp;
da4ae14a 6701 int name_len;
2268b414 6702
b1fbec62
GB
6703 sep = strchr (annex, '=');
6704 if (sep == NULL)
6705 break;
0c5bf5a9 6706
da4ae14a 6707 name_len = sep - annex;
2733d9d5
MM
6708 if (name_len == 4 && startswith (annex, "lmid"))
6709 addrp = &lmid;
6710 else if (name_len == 5 && startswith (annex, "start"))
b1fbec62 6711 addrp = &lm_addr;
da4ae14a 6712 else if (name_len == 4 && startswith (annex, "prev"))
b1fbec62
GB
6713 addrp = &lm_prev;
6714 else
6715 {
6716 annex = strchr (sep, ';');
6717 if (annex == NULL)
6718 break;
6719 annex++;
6720 continue;
6721 }
6722
6723 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 6724 }
b1fbec62 6725
8d56636a
MM
6726 std::string document = "<library-list-svr4 version=\"1.0\"";
6727
6728 /* When the starting LM_ADDR is passed in the annex, only traverse that
2733d9d5 6729 namespace, which is assumed to be identified by LMID.
8d56636a
MM
6730
6731 Otherwise, start with R_DEBUG and traverse all namespaces we find. */
6732 if (lm_addr != 0)
ad10f44e
MM
6733 {
6734 document += ">";
2733d9d5 6735 read_link_map (document, lmid, lm_addr, lm_prev, ptr_size, lmo);
ad10f44e 6736 }
8d56636a 6737 else
2268b414 6738 {
8d56636a
MM
6739 if (lm_prev != 0)
6740 warning ("ignoring prev=0x%s without start", paddress (lm_prev));
b1fbec62 6741
2733d9d5
MM
6742 /* We could interpret LMID as 'provide only the libraries for this
6743 namespace' but GDB is currently only providing lmid, start, and
6744 prev, or nothing. */
6745 if (lmid != 0)
6746 warning ("ignoring lmid=0x%s without start", paddress (lmid));
6747
8d56636a
MM
6748 CORE_ADDR r_debug = priv->r_debug;
6749 if (r_debug == 0)
6750 r_debug = priv->r_debug = get_r_debug (pid, is_elf64);
b1fbec62
GB
6751
6752 /* We failed to find DT_DEBUG. Such situation will not change
6753 for this inferior - do not retry it. Report it to GDB as
6754 E01, see for the reasons at the GDB solib-svr4.c side. */
8d56636a 6755 if (r_debug == (CORE_ADDR) -1)
b1fbec62
GB
6756 return -1;
6757
ad10f44e
MM
6758 /* Terminate the header if we end up with an empty list. */
6759 if (r_debug == 0)
6760 document += ">";
6761
8d56636a 6762 while (r_debug != 0)
2268b414 6763 {
8d56636a
MM
6764 int r_version = 0;
6765 if (linux_read_memory (r_debug + lmo->r_version_offset,
b1fbec62 6766 (unsigned char *) &r_version,
8d56636a
MM
6767 sizeof (r_version)) != 0)
6768 {
6769 warning ("unable to read r_version from 0x%s",
6770 paddress (r_debug + lmo->r_version_offset));
6771 break;
6772 }
6773
6774 if (r_version < 1)
b1fbec62
GB
6775 {
6776 warning ("unexpected r_debug version %d", r_version);
8d56636a 6777 break;
b1fbec62 6778 }
8d56636a
MM
6779
6780 if (read_one_ptr (r_debug + lmo->r_map_offset, &lm_addr,
6781 ptr_size) != 0)
b1fbec62 6782 {
8d56636a
MM
6783 warning ("unable to read r_map from 0x%s",
6784 paddress (r_debug + lmo->r_map_offset));
6785 break;
b1fbec62 6786 }
2268b414 6787
ad10f44e
MM
6788 /* We read the entire namespace. */
6789 lm_prev = 0;
6790
6791 /* The first entry corresponds to the main executable unless the
6792 dynamic loader was loaded late by a static executable. But
6793 in such case the main executable does not have PT_DYNAMIC
6794 present and we would not have gotten here. */
6795 if (r_debug == priv->r_debug)
6796 {
6797 if (lm_addr != 0)
6798 string_appendf (document, " main-lm=\"0x%s\">",
6799 paddress (lm_addr));
6800 else
6801 document += ">";
6802
6803 lm_prev = lm_addr;
6804 if (read_one_ptr (lm_addr + lmo->l_next_offset,
6805 &lm_addr, ptr_size) != 0)
6806 {
6807 warning ("unable to read l_next from 0x%s",
6808 paddress (lm_addr + lmo->l_next_offset));
6809 break;
6810 }
6811 }
6812
2733d9d5 6813 read_link_map (document, r_debug, lm_addr, lm_prev, ptr_size, lmo);
b1fbec62 6814
8d56636a
MM
6815 if (r_version < 2)
6816 break;
b1fbec62 6817
8d56636a
MM
6818 if (read_one_ptr (r_debug + lmo->r_next_offset, &r_debug,
6819 ptr_size) != 0)
2268b414 6820 {
8d56636a
MM
6821 warning ("unable to read r_next from 0x%s",
6822 paddress (r_debug + lmo->r_next_offset));
6823 break;
d878444c 6824 }
0afae3cf 6825 }
2268b414
JK
6826 }
6827
ad10f44e 6828 document += "</library-list-svr4>";
b1fbec62 6829
f6e8a41e 6830 int document_len = document.length ();
2268b414
JK
6831 if (offset < document_len)
6832 document_len -= offset;
6833 else
6834 document_len = 0;
6835 if (len > document_len)
6836 len = document_len;
6837
f6e8a41e 6838 memcpy (readbuf, document.data () + offset, len);
2268b414
JK
6839
6840 return len;
6841}
6842
9accd112
MM
6843#ifdef HAVE_LINUX_BTRACE
6844
8263b346
TBA
6845bool
6846linux_process_target::supports_btrace ()
6847{
6848 return true;
6849}
6850
79597bdd 6851btrace_target_info *
696c0d5e 6852linux_process_target::enable_btrace (thread_info *tp,
79597bdd
TBA
6853 const btrace_config *conf)
6854{
696c0d5e 6855 return linux_enable_btrace (tp->id, conf);
79597bdd
TBA
6856}
6857
969c39fb 6858/* See to_disable_btrace target method. */
9accd112 6859
79597bdd
TBA
6860int
6861linux_process_target::disable_btrace (btrace_target_info *tinfo)
969c39fb
MM
6862{
6863 enum btrace_error err;
6864
6865 err = linux_disable_btrace (tinfo);
6866 return (err == BTRACE_ERR_NONE ? 0 : -1);
6867}
6868
bc504a31 6869/* Encode an Intel Processor Trace configuration. */
b20a6524
MM
6870
6871static void
873a185b 6872linux_low_encode_pt_config (std::string *buffer,
b20a6524
MM
6873 const struct btrace_data_pt_config *config)
6874{
873a185b 6875 *buffer += "<pt-config>\n";
b20a6524
MM
6876
6877 switch (config->cpu.vendor)
6878 {
6879 case CV_INTEL:
873a185b
TT
6880 string_xml_appendf (*buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6881 "model=\"%u\" stepping=\"%u\"/>\n",
6882 config->cpu.family, config->cpu.model,
6883 config->cpu.stepping);
b20a6524
MM
6884 break;
6885
6886 default:
6887 break;
6888 }
6889
873a185b 6890 *buffer += "</pt-config>\n";
b20a6524
MM
6891}
6892
6893/* Encode a raw buffer. */
6894
6895static void
873a185b 6896linux_low_encode_raw (std::string *buffer, const gdb_byte *data,
b20a6524
MM
6897 unsigned int size)
6898{
6899 if (size == 0)
6900 return;
6901
268a13a5 6902 /* We use hex encoding - see gdbsupport/rsp-low.h. */
873a185b 6903 *buffer += "<raw>\n";
b20a6524
MM
6904
6905 while (size-- > 0)
6906 {
6907 char elem[2];
6908
6909 elem[0] = tohex ((*data >> 4) & 0xf);
6910 elem[1] = tohex (*data++ & 0xf);
6911
8b2d5ef8 6912 buffer->append (elem, 2);
b20a6524
MM
6913 }
6914
873a185b 6915 *buffer += "</raw>\n";
b20a6524
MM
6916}
6917
969c39fb
MM
6918/* See to_read_btrace target method. */
6919
79597bdd
TBA
6920int
6921linux_process_target::read_btrace (btrace_target_info *tinfo,
873a185b 6922 std::string *buffer,
79597bdd 6923 enum btrace_read_type type)
9accd112 6924{
734b0e4b 6925 struct btrace_data btrace;
969c39fb 6926 enum btrace_error err;
9accd112 6927
969c39fb
MM
6928 err = linux_read_btrace (&btrace, tinfo, type);
6929 if (err != BTRACE_ERR_NONE)
6930 {
6931 if (err == BTRACE_ERR_OVERFLOW)
873a185b 6932 *buffer += "E.Overflow.";
969c39fb 6933 else
873a185b 6934 *buffer += "E.Generic Error.";
969c39fb 6935
8dcc53b3 6936 return -1;
969c39fb 6937 }
9accd112 6938
734b0e4b
MM
6939 switch (btrace.format)
6940 {
6941 case BTRACE_FORMAT_NONE:
873a185b 6942 *buffer += "E.No Trace.";
8dcc53b3 6943 return -1;
734b0e4b
MM
6944
6945 case BTRACE_FORMAT_BTS:
873a185b
TT
6946 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6947 *buffer += "<btrace version=\"1.0\">\n";
9accd112 6948
46f29a9a 6949 for (const btrace_block &block : *btrace.variant.bts.blocks)
873a185b
TT
6950 string_xml_appendf (*buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6951 paddress (block.begin), paddress (block.end));
9accd112 6952
873a185b 6953 *buffer += "</btrace>\n";
734b0e4b
MM
6954 break;
6955
b20a6524 6956 case BTRACE_FORMAT_PT:
873a185b
TT
6957 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6958 *buffer += "<btrace version=\"1.0\">\n";
6959 *buffer += "<pt>\n";
b20a6524
MM
6960
6961 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
9accd112 6962
b20a6524
MM
6963 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6964 btrace.variant.pt.size);
6965
873a185b
TT
6966 *buffer += "</pt>\n";
6967 *buffer += "</btrace>\n";
b20a6524
MM
6968 break;
6969
6970 default:
873a185b 6971 *buffer += "E.Unsupported Trace Format.";
8dcc53b3 6972 return -1;
734b0e4b 6973 }
969c39fb
MM
6974
6975 return 0;
9accd112 6976}
f4abbc16
MM
6977
6978/* See to_btrace_conf target method. */
6979
79597bdd
TBA
6980int
6981linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
873a185b 6982 std::string *buffer)
f4abbc16
MM
6983{
6984 const struct btrace_config *conf;
6985
873a185b
TT
6986 *buffer += "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n";
6987 *buffer += "<btrace-conf version=\"1.0\">\n";
f4abbc16
MM
6988
6989 conf = linux_btrace_conf (tinfo);
6990 if (conf != NULL)
6991 {
6992 switch (conf->format)
6993 {
6994 case BTRACE_FORMAT_NONE:
6995 break;
6996
6997 case BTRACE_FORMAT_BTS:
873a185b
TT
6998 string_xml_appendf (*buffer, "<bts");
6999 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->bts.size);
7000 string_xml_appendf (*buffer, " />\n");
f4abbc16 7001 break;
b20a6524
MM
7002
7003 case BTRACE_FORMAT_PT:
873a185b
TT
7004 string_xml_appendf (*buffer, "<pt");
7005 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->pt.size);
7006 string_xml_appendf (*buffer, "/>\n");
b20a6524 7007 break;
f4abbc16
MM
7008 }
7009 }
7010
873a185b 7011 *buffer += "</btrace-conf>\n";
f4abbc16
MM
7012 return 0;
7013}
9accd112
MM
7014#endif /* HAVE_LINUX_BTRACE */
7015
7b669087
GB
7016/* See nat/linux-nat.h. */
7017
7018ptid_t
7019current_lwp_ptid (void)
7020{
7021 return ptid_of (current_thread);
7022}
7023
07b3255c 7024/* A helper function that copies NAME to DEST, replacing non-printable
862180a2
TT
7025 characters with '?'. Returns the original DEST as a
7026 convenience. */
07b3255c
TT
7027
7028static const char *
7029replace_non_ascii (char *dest, const char *name)
7030{
862180a2 7031 const char *result = dest;
07b3255c
TT
7032 while (*name != '\0')
7033 {
7034 if (!ISPRINT (*name))
7035 *dest++ = '?';
7036 else
7037 *dest++ = *name;
7038 ++name;
7039 }
862180a2
TT
7040 *dest = '\0';
7041 return result;
07b3255c
TT
7042}
7043
7f63b89b
TBA
7044const char *
7045linux_process_target::thread_name (ptid_t thread)
7046{
07b3255c
TT
7047 static char dest[100];
7048
7049 const char *name = linux_proc_tid_get_name (thread);
7050 if (name == nullptr)
7051 return nullptr;
7052
7053 /* Linux limits the comm file to 16 bytes (including the trailing
7054 \0. If the program or thread name is set when using a multi-byte
7055 encoding, this might cause it to be truncated mid-character. In
7056 this situation, sending the truncated form in an XML <thread>
7057 response will cause a parse error in gdb. So, instead convert
7058 from the locale's encoding (we can't be sure this is the correct
7059 encoding, but it's as good a guess as we have) to UTF-8, but in a
7060 way that ignores any encoding errors. See PR remote/30618. */
7061 const char *cset = nl_langinfo (CODESET);
7062 iconv_t handle = iconv_open ("UTF-8//IGNORE", cset);
7063 if (handle == (iconv_t) -1)
7064 return replace_non_ascii (dest, name);
7065
7066 size_t inbytes = strlen (name);
7067 char *inbuf = const_cast<char *> (name);
7068 size_t outbytes = sizeof (dest);
7069 char *outbuf = dest;
7070 size_t result = iconv (handle, &inbuf, &inbytes, &outbuf, &outbytes);
7071
7072 if (result == (size_t) -1)
7073 {
7074 if (errno == E2BIG)
7075 outbuf = &dest[sizeof (dest) - 1];
7076 else if ((errno == EILSEQ || errno == EINVAL)
7077 && outbuf < &dest[sizeof (dest) - 2])
7078 *outbuf++ = '?';
07b3255c 7079 }
862180a2 7080 *outbuf = '\0';
07b3255c
TT
7081
7082 iconv_close (handle);
7083 return *dest == '\0' ? nullptr : dest;
7f63b89b
TBA
7084}
7085
7086#if USE_THREAD_DB
7087bool
7088linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7089 int *handle_len)
7090{
7091 return thread_db_thread_handle (ptid, handle, handle_len);
7092}
7093#endif
7094
7b961964
SM
7095thread_info *
7096linux_process_target::thread_pending_parent (thread_info *thread)
7097{
7098 lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
7099
7100 if (parent == nullptr)
7101 return nullptr;
7102
7103 return get_lwp_thread (parent);
7104}
7105
df5ad102 7106thread_info *
faf44a31
PA
7107linux_process_target::thread_pending_child (thread_info *thread,
7108 target_waitkind *kind)
df5ad102 7109{
faf44a31 7110 lwp_info *child = get_thread_lwp (thread)->pending_child (kind);
df5ad102
SM
7111
7112 if (child == nullptr)
7113 return nullptr;
7114
7115 return get_lwp_thread (child);
7116}
7117
276d4552
YQ
7118/* Default implementation of linux_target_ops method "set_pc" for
7119 32-bit pc register which is literally named "pc". */
7120
7121void
7122linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7123{
7124 uint32_t newpc = pc;
7125
7126 supply_register_by_name (regcache, "pc", &newpc);
7127}
7128
7129/* Default implementation of linux_target_ops method "get_pc" for
7130 32-bit pc register which is literally named "pc". */
7131
7132CORE_ADDR
7133linux_get_pc_32bit (struct regcache *regcache)
7134{
7135 uint32_t pc;
7136
7137 collect_register_by_name (regcache, "pc", &pc);
c058728c 7138 threads_debug_printf ("stop pc is 0x%" PRIx32, pc);
276d4552
YQ
7139 return pc;
7140}
7141
6f69e520
YQ
7142/* Default implementation of linux_target_ops method "set_pc" for
7143 64-bit pc register which is literally named "pc". */
7144
7145void
7146linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7147{
7148 uint64_t newpc = pc;
7149
7150 supply_register_by_name (regcache, "pc", &newpc);
7151}
7152
7153/* Default implementation of linux_target_ops method "get_pc" for
7154 64-bit pc register which is literally named "pc". */
7155
7156CORE_ADDR
7157linux_get_pc_64bit (struct regcache *regcache)
7158{
7159 uint64_t pc;
7160
7161 collect_register_by_name (regcache, "pc", &pc);
c058728c 7162 threads_debug_printf ("stop pc is 0x%" PRIx64, pc);
6f69e520
YQ
7163 return pc;
7164}
7165
0570503d 7166/* See linux-low.h. */
974c89e0 7167
0570503d 7168int
43e5fbd8 7169linux_get_auxv (int pid, int wordsize, CORE_ADDR match, CORE_ADDR *valp)
974c89e0
AH
7170{
7171 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7172 int offset = 0;
7173
7174 gdb_assert (wordsize == 4 || wordsize == 8);
7175
43e5fbd8
TJB
7176 while (the_target->read_auxv (pid, offset, data, 2 * wordsize)
7177 == 2 * wordsize)
974c89e0
AH
7178 {
7179 if (wordsize == 4)
7180 {
0570503d 7181 uint32_t *data_p = (uint32_t *) data;
974c89e0 7182 if (data_p[0] == match)
0570503d
PFC
7183 {
7184 *valp = data_p[1];
7185 return 1;
7186 }
974c89e0
AH
7187 }
7188 else
7189 {
0570503d 7190 uint64_t *data_p = (uint64_t *) data;
974c89e0 7191 if (data_p[0] == match)
0570503d
PFC
7192 {
7193 *valp = data_p[1];
7194 return 1;
7195 }
974c89e0
AH
7196 }
7197
7198 offset += 2 * wordsize;
7199 }
7200
7201 return 0;
7202}
7203
7204/* See linux-low.h. */
7205
7206CORE_ADDR
43e5fbd8 7207linux_get_hwcap (int pid, int wordsize)
974c89e0 7208{
0570503d 7209 CORE_ADDR hwcap = 0;
43e5fbd8 7210 linux_get_auxv (pid, wordsize, AT_HWCAP, &hwcap);
0570503d 7211 return hwcap;
974c89e0
AH
7212}
7213
7214/* See linux-low.h. */
7215
7216CORE_ADDR
43e5fbd8 7217linux_get_hwcap2 (int pid, int wordsize)
974c89e0 7218{
0570503d 7219 CORE_ADDR hwcap2 = 0;
43e5fbd8 7220 linux_get_auxv (pid, wordsize, AT_HWCAP2, &hwcap2);
0570503d 7221 return hwcap2;
974c89e0 7222}
6f69e520 7223
3aee8918
PA
7224#ifdef HAVE_LINUX_REGSETS
7225void
7226initialize_regsets_info (struct regsets_info *info)
7227{
7228 for (info->num_regsets = 0;
7229 info->regsets[info->num_regsets].size >= 0;
7230 info->num_regsets++)
7231 ;
3aee8918
PA
7232}
7233#endif
7234
da6d8c04
DJ
7235void
7236initialize_low (void)
7237{
bd99dc85 7238 struct sigaction sigchld_action;
dd373349 7239
bd99dc85 7240 memset (&sigchld_action, 0, sizeof (sigchld_action));
ef0478f6 7241 set_target_ops (the_linux_target);
dd373349 7242
aa7c7447 7243 linux_ptrace_init_warnings ();
1b919490 7244 linux_proc_init_warnings ();
bd99dc85
PA
7245
7246 sigchld_action.sa_handler = sigchld_handler;
7247 sigemptyset (&sigchld_action.sa_mask);
7248 sigchld_action.sa_flags = SA_RESTART;
7249 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
7250
7251 initialize_low_arch ();
89245bc0
DB
7252
7253 linux_check_ptrace_features ();
da6d8c04 7254}