]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdbserver/linux-low.cc
x86: Also handle stores for -muse-unaligned-vector-move
[thirdparty/binutils-gdb.git] / gdbserver / linux-low.cc
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
3666a048 2 Copyright (C) 1995-2021 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
268a13a5 22#include "gdbsupport/agent.h"
de0d863e 23#include "tdesc.h"
268a13a5
TT
24#include "gdbsupport/rsp-low.h"
25#include "gdbsupport/signals-state-save-restore.h"
96d7229d
LM
26#include "nat/linux-nat.h"
27#include "nat/linux-waitpid.h"
268a13a5 28#include "gdbsupport/gdb_wait.h"
5826e159 29#include "nat/gdb_ptrace.h"
125f8a3d
GB
30#include "nat/linux-ptrace.h"
31#include "nat/linux-procfs.h"
8cc73a39 32#include "nat/linux-personality.h"
da6d8c04
DJ
33#include <signal.h>
34#include <sys/ioctl.h>
35#include <fcntl.h>
0a30fbc4 36#include <unistd.h>
fd500816 37#include <sys/syscall.h>
f9387fc3 38#include <sched.h>
07e059b5
VP
39#include <ctype.h>
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
53ce3c39 43#include <sys/stat.h>
efcbbd14 44#include <sys/vfs.h>
1570b33e 45#include <sys/uio.h>
268a13a5 46#include "gdbsupport/filestuff.h"
c144c7a0 47#include "tracepoint.h"
276d4552 48#include <inttypes.h>
268a13a5 49#include "gdbsupport/common-inferior.h"
2090129c 50#include "nat/fork-inferior.h"
268a13a5 51#include "gdbsupport/environ.h"
21987b9c 52#include "gdbsupport/gdb-sigmask.h"
268a13a5 53#include "gdbsupport/scoped_restore.h"
957f3f49
DE
54#ifndef ELFMAG0
55/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59#include <elf.h>
60#endif
14d2069a 61#include "nat/linux-namespaces.h"
efcbbd14 62
fd462a61
DJ
63#ifndef O_LARGEFILE
64#define O_LARGEFILE 0
65#endif
1a981360 66
69f4c9cc
AH
67#ifndef AT_HWCAP2
68#define AT_HWCAP2 26
69#endif
70
db0dfaa0
LM
71/* Some targets did not define these ptrace constants from the start,
72 so gdbserver defines them locally here. In the future, these may
73 be removed after they are added to asm/ptrace.h. */
74#if !(defined(PT_TEXT_ADDR) \
75 || defined(PT_DATA_ADDR) \
76 || defined(PT_TEXT_END_ADDR))
77#if defined(__mcoldfire__)
78/* These are still undefined in 3.10 kernels. */
79#define PT_TEXT_ADDR 49*4
80#define PT_DATA_ADDR 50*4
81#define PT_TEXT_END_ADDR 51*4
db0dfaa0
LM
82/* These are still undefined in 3.10 kernels. */
83#elif defined(__TMS320C6X__)
84#define PT_TEXT_ADDR (0x10000*4)
85#define PT_DATA_ADDR (0x10004*4)
86#define PT_TEXT_END_ADDR (0x10008*4)
87#endif
88#endif
89
5203ae1e
TBA
90#if (defined(__UCLIBC__) \
91 && defined(HAS_NOMMU) \
92 && defined(PT_TEXT_ADDR) \
93 && defined(PT_DATA_ADDR) \
94 && defined(PT_TEXT_END_ADDR))
95#define SUPPORTS_READ_OFFSETS
96#endif
97
9accd112 98#ifdef HAVE_LINUX_BTRACE
125f8a3d 99# include "nat/linux-btrace.h"
268a13a5 100# include "gdbsupport/btrace-common.h"
9accd112
MM
101#endif
102
8365dcf5
TJB
103#ifndef HAVE_ELF32_AUXV_T
104/* Copied from glibc's elf.h. */
105typedef struct
106{
107 uint32_t a_type; /* Entry type */
108 union
109 {
110 uint32_t a_val; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
114 } a_un;
115} Elf32_auxv_t;
116#endif
117
118#ifndef HAVE_ELF64_AUXV_T
119/* Copied from glibc's elf.h. */
120typedef struct
121{
122 uint64_t a_type; /* Entry type */
123 union
124 {
125 uint64_t a_val; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
129 } a_un;
130} Elf64_auxv_t;
131#endif
132
ded48a5e
YQ
133/* Does the current host support PTRACE_GETREGSET? */
134int have_ptrace_getregset = -1;
135
cff068da
GB
136/* LWP accessors. */
137
138/* See nat/linux-nat.h. */
139
140ptid_t
141ptid_of_lwp (struct lwp_info *lwp)
142{
143 return ptid_of (get_lwp_thread (lwp));
144}
145
146/* See nat/linux-nat.h. */
147
4b134ca1
GB
148void
149lwp_set_arch_private_info (struct lwp_info *lwp,
150 struct arch_lwp_info *info)
151{
152 lwp->arch_private = info;
153}
154
155/* See nat/linux-nat.h. */
156
157struct arch_lwp_info *
158lwp_arch_private_info (struct lwp_info *lwp)
159{
160 return lwp->arch_private;
161}
162
163/* See nat/linux-nat.h. */
164
cff068da
GB
165int
166lwp_is_stopped (struct lwp_info *lwp)
167{
168 return lwp->stopped;
169}
170
171/* See nat/linux-nat.h. */
172
173enum target_stop_reason
174lwp_stop_reason (struct lwp_info *lwp)
175{
176 return lwp->stop_reason;
177}
178
0e00e962
AA
179/* See nat/linux-nat.h. */
180
181int
182lwp_is_stepping (struct lwp_info *lwp)
183{
184 return lwp->stepping;
185}
186
05044653
PA
187/* A list of all unknown processes which receive stop signals. Some
188 other process will presumably claim each of these as forked
189 children momentarily. */
24a09b5f 190
05044653
PA
191struct simple_pid_list
192{
193 /* The process ID. */
194 int pid;
195
196 /* The status as reported by waitpid. */
197 int status;
198
199 /* Next in chain. */
200 struct simple_pid_list *next;
201};
05c309a8 202static struct simple_pid_list *stopped_pids;
05044653
PA
203
204/* Trivial list manipulation functions to keep track of a list of new
205 stopped processes. */
206
207static void
208add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
209{
8d749320 210 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
05044653
PA
211
212 new_pid->pid = pid;
213 new_pid->status = status;
214 new_pid->next = *listp;
215 *listp = new_pid;
216}
217
218static int
219pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
220{
221 struct simple_pid_list **p;
222
223 for (p = listp; *p != NULL; p = &(*p)->next)
224 if ((*p)->pid == pid)
225 {
226 struct simple_pid_list *next = (*p)->next;
227
228 *statusp = (*p)->status;
229 xfree (*p);
230 *p = next;
231 return 1;
232 }
233 return 0;
234}
24a09b5f 235
bde24c0a
PA
236enum stopping_threads_kind
237 {
238 /* Not stopping threads presently. */
239 NOT_STOPPING_THREADS,
240
241 /* Stopping threads. */
242 STOPPING_THREADS,
243
244 /* Stopping and suspending threads. */
245 STOPPING_AND_SUSPENDING_THREADS
246 };
247
248/* This is set while stop_all_lwps is in effect. */
6bd434d6 249static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
250
251/* FIXME make into a target method? */
24a09b5f 252int using_threads = 1;
24a09b5f 253
fa593d66
PA
254/* True if we're presently stabilizing threads (moving them out of
255 jump pads). */
256static int stabilizing_threads;
257
f50bf8e5 258static void unsuspend_all_lwps (struct lwp_info *except);
95954743 259static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
00db26fa 260static int lwp_is_marked_dead (struct lwp_info *lwp);
d50171e4 261static int kill_lwp (unsigned long lwpid, int signo);
863d01bd 262static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
ece66d65 263static int linux_low_ptrace_options (int attached);
ced2dffb 264static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
d50171e4 265
582511be
PA
266/* When the event-loop is doing a step-over, this points at the thread
267 being stepped. */
6bd434d6 268static ptid_t step_over_bkpt;
582511be 269
bf9ae9d8
TBA
270bool
271linux_process_target::low_supports_breakpoints ()
272{
273 return false;
274}
d50171e4 275
bf9ae9d8
TBA
276CORE_ADDR
277linux_process_target::low_get_pc (regcache *regcache)
278{
279 return 0;
280}
281
282void
283linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
d50171e4 284{
bf9ae9d8 285 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
d50171e4 286}
0d62e5e8 287
7582c77c
TBA
288std::vector<CORE_ADDR>
289linux_process_target::low_get_next_pcs (regcache *regcache)
290{
291 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
292 "implemented");
293}
294
d4807ea2
TBA
295int
296linux_process_target::low_decr_pc_after_break ()
297{
298 return 0;
299}
300
c2d6af84
PA
301/* True if LWP is stopped in its stepping range. */
302
303static int
304lwp_in_step_range (struct lwp_info *lwp)
305{
306 CORE_ADDR pc = lwp->stop_pc;
307
308 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
309}
310
bd99dc85
PA
311/* The read/write ends of the pipe registered as waitable file in the
312 event loop. */
313static int linux_event_pipe[2] = { -1, -1 };
314
315/* True if we're currently in async mode. */
316#define target_is_async_p() (linux_event_pipe[0] != -1)
317
02fc4de7 318static void send_sigstop (struct lwp_info *lwp);
bd99dc85 319
d0722149
DE
320/* Return non-zero if HEADER is a 64-bit ELF file. */
321
322static int
214d508e 323elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 324{
214d508e
L
325 if (header->e_ident[EI_MAG0] == ELFMAG0
326 && header->e_ident[EI_MAG1] == ELFMAG1
327 && header->e_ident[EI_MAG2] == ELFMAG2
328 && header->e_ident[EI_MAG3] == ELFMAG3)
329 {
330 *machine = header->e_machine;
331 return header->e_ident[EI_CLASS] == ELFCLASS64;
332
333 }
334 *machine = EM_NONE;
335 return -1;
d0722149
DE
336}
337
338/* Return non-zero if FILE is a 64-bit ELF file,
339 zero if the file is not a 64-bit ELF file,
340 and -1 if the file is not accessible or doesn't exist. */
341
be07f1a2 342static int
214d508e 343elf_64_file_p (const char *file, unsigned int *machine)
d0722149 344{
957f3f49 345 Elf64_Ehdr header;
d0722149
DE
346 int fd;
347
348 fd = open (file, O_RDONLY);
349 if (fd < 0)
350 return -1;
351
352 if (read (fd, &header, sizeof (header)) != sizeof (header))
353 {
354 close (fd);
355 return 0;
356 }
357 close (fd);
358
214d508e 359 return elf_64_header_p (&header, machine);
d0722149
DE
360}
361
be07f1a2
PA
362/* Accepts an integer PID; Returns true if the executable PID is
363 running is a 64-bit ELF file.. */
364
365int
214d508e 366linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 367{
d8d2a3ee 368 char file[PATH_MAX];
be07f1a2
PA
369
370 sprintf (file, "/proc/%d/exe", pid);
214d508e 371 return elf_64_file_p (file, machine);
be07f1a2
PA
372}
373
fd000fb3
TBA
374void
375linux_process_target::delete_lwp (lwp_info *lwp)
bd99dc85 376{
fa96cb38
PA
377 struct thread_info *thr = get_lwp_thread (lwp);
378
379 if (debug_threads)
380 debug_printf ("deleting %ld\n", lwpid_of (thr));
381
382 remove_thread (thr);
466eecee 383
fd000fb3 384 low_delete_thread (lwp->arch_private);
466eecee 385
013e3554 386 delete lwp;
bd99dc85
PA
387}
388
fd000fb3
TBA
389void
390linux_process_target::low_delete_thread (arch_lwp_info *info)
391{
392 /* Default implementation should be overridden if architecture-specific
393 info is being used. */
394 gdb_assert (info == nullptr);
395}
95954743 396
fd000fb3
TBA
397process_info *
398linux_process_target::add_linux_process (int pid, int attached)
95954743
PA
399{
400 struct process_info *proc;
401
95954743 402 proc = add_process (pid, attached);
8d749320 403 proc->priv = XCNEW (struct process_info_private);
95954743 404
fd000fb3 405 proc->priv->arch_private = low_new_process ();
aa5ca48f 406
95954743
PA
407 return proc;
408}
409
fd000fb3
TBA
410arch_process_info *
411linux_process_target::low_new_process ()
412{
413 return nullptr;
414}
415
416void
417linux_process_target::low_delete_process (arch_process_info *info)
418{
419 /* Default implementation must be overridden if architecture-specific
420 info exists. */
421 gdb_assert (info == nullptr);
422}
423
424void
425linux_process_target::low_new_fork (process_info *parent, process_info *child)
426{
427 /* Nop. */
428}
429
797bcff5
TBA
430void
431linux_process_target::arch_setup_thread (thread_info *thread)
94585166
DB
432{
433 struct thread_info *saved_thread;
434
435 saved_thread = current_thread;
436 current_thread = thread;
437
797bcff5 438 low_arch_setup ();
94585166
DB
439
440 current_thread = saved_thread;
441}
442
d16f3f6c
TBA
443int
444linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
445 int wstat)
24a09b5f 446{
c12a5089 447 client_state &cs = get_client_state ();
94585166 448 struct lwp_info *event_lwp = *orig_event_lwp;
89a5711c 449 int event = linux_ptrace_get_extended_event (wstat);
de0d863e 450 struct thread_info *event_thr = get_lwp_thread (event_lwp);
54a0b537 451 struct lwp_info *new_lwp;
24a09b5f 452
183be222 453 gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
65706a29 454
82075af2
JS
455 /* All extended events we currently use are mid-syscall. Only
456 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
457 you have to be using PTRACE_SEIZE to get that. */
458 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
459
c269dbdb
DB
460 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
461 || (event == PTRACE_EVENT_CLONE))
24a09b5f 462 {
95954743 463 ptid_t ptid;
24a09b5f 464 unsigned long new_pid;
05044653 465 int ret, status;
24a09b5f 466
de0d863e 467 /* Get the pid of the new lwp. */
d86d4aaf 468 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 469 &new_pid);
24a09b5f
DJ
470
471 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 472 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
473 {
474 /* The new child has a pending SIGSTOP. We can't affect it until it
475 hits the SIGSTOP, but we're already attached. */
476
97438e3f 477 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
478
479 if (ret == -1)
480 perror_with_name ("waiting for new child");
481 else if (ret != new_pid)
482 warning ("wait returned unexpected PID %d", ret);
da5898ce 483 else if (!WIFSTOPPED (status))
24a09b5f
DJ
484 warning ("wait returned unexpected status 0x%x", status);
485 }
486
c269dbdb 487 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
de0d863e
DB
488 {
489 struct process_info *parent_proc;
490 struct process_info *child_proc;
491 struct lwp_info *child_lwp;
bfacd19d 492 struct thread_info *child_thr;
de0d863e 493
184ea2f7 494 ptid = ptid_t (new_pid, new_pid);
de0d863e
DB
495
496 if (debug_threads)
497 {
498 debug_printf ("HEW: Got fork event from LWP %ld, "
499 "new child is %d\n",
e38504b3 500 ptid_of (event_thr).lwp (),
e99b03dc 501 ptid.pid ());
de0d863e
DB
502 }
503
504 /* Add the new process to the tables and clone the breakpoint
505 lists of the parent. We need to do this even if the new process
506 will be detached, since we will need the process object and the
507 breakpoints to remove any breakpoints from memory when we
508 detach, and the client side will access registers. */
fd000fb3 509 child_proc = add_linux_process (new_pid, 0);
de0d863e
DB
510 gdb_assert (child_proc != NULL);
511 child_lwp = add_lwp (ptid);
512 gdb_assert (child_lwp != NULL);
513 child_lwp->stopped = 1;
bfacd19d
DB
514 child_lwp->must_set_ptrace_flags = 1;
515 child_lwp->status_pending_p = 0;
516 child_thr = get_lwp_thread (child_lwp);
517 child_thr->last_resume_kind = resume_stop;
183be222 518 child_thr->last_status.set_stopped (GDB_SIGNAL_0);
998d452a 519
863d01bd 520 /* If we're suspending all threads, leave this one suspended
0f8288ae
YQ
521 too. If the fork/clone parent is stepping over a breakpoint,
522 all other threads have been suspended already. Leave the
523 child suspended too. */
524 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
525 || event_lwp->bp_reinsert != 0)
863d01bd
PA
526 {
527 if (debug_threads)
528 debug_printf ("HEW: leaving child suspended\n");
529 child_lwp->suspended = 1;
530 }
531
de0d863e
DB
532 parent_proc = get_thread_process (event_thr);
533 child_proc->attached = parent_proc->attached;
2e7b624b
YQ
534
535 if (event_lwp->bp_reinsert != 0
7582c77c 536 && supports_software_single_step ()
2e7b624b
YQ
537 && event == PTRACE_EVENT_VFORK)
538 {
3b9a79ef
YQ
539 /* If we leave single-step breakpoints there, child will
540 hit it, so uninsert single-step breakpoints from parent
2e7b624b
YQ
541 (and child). Once vfork child is done, reinsert
542 them back to parent. */
3b9a79ef 543 uninsert_single_step_breakpoints (event_thr);
2e7b624b
YQ
544 }
545
63c40ec7 546 clone_all_breakpoints (child_thr, event_thr);
de0d863e 547
51a948fd
AB
548 target_desc_up tdesc = allocate_target_description ();
549 copy_target_description (tdesc.get (), parent_proc->tdesc);
550 child_proc->tdesc = tdesc.release ();
de0d863e 551
3a8a0396 552 /* Clone arch-specific process data. */
fd000fb3 553 low_new_fork (parent_proc, child_proc);
3a8a0396 554
de0d863e 555 /* Save fork info in the parent thread. */
c269dbdb 556 if (event == PTRACE_EVENT_FORK)
183be222 557 event_lwp->waitstatus.set_forked (ptid);
c269dbdb 558 else if (event == PTRACE_EVENT_VFORK)
183be222 559 event_lwp->waitstatus.set_vforked (ptid);
c269dbdb 560
de0d863e
DB
561 /* The status_pending field contains bits denoting the
562 extended event, so when the pending event is handled,
563 the handler will look at lwp->waitstatus. */
564 event_lwp->status_pending_p = 1;
565 event_lwp->status_pending = wstat;
566
5a04c4cf
PA
567 /* Link the threads until the parent event is passed on to
568 higher layers. */
569 event_lwp->fork_relative = child_lwp;
570 child_lwp->fork_relative = event_lwp;
571
3b9a79ef
YQ
572 /* If the parent thread is doing step-over with single-step
573 breakpoints, the list of single-step breakpoints are cloned
2e7b624b
YQ
574 from the parent's. Remove them from the child process.
575 In case of vfork, we'll reinsert them back once vforked
576 child is done. */
8a81c5d7 577 if (event_lwp->bp_reinsert != 0
7582c77c 578 && supports_software_single_step ())
8a81c5d7 579 {
8a81c5d7
YQ
580 /* The child process is forked and stopped, so it is safe
581 to access its memory without stopping all other threads
582 from other processes. */
3b9a79ef 583 delete_single_step_breakpoints (child_thr);
8a81c5d7 584
3b9a79ef
YQ
585 gdb_assert (has_single_step_breakpoints (event_thr));
586 gdb_assert (!has_single_step_breakpoints (child_thr));
8a81c5d7
YQ
587 }
588
de0d863e
DB
589 /* Report the event. */
590 return 0;
591 }
592
fa96cb38
PA
593 if (debug_threads)
594 debug_printf ("HEW: Got clone event "
595 "from LWP %ld, new child is LWP %ld\n",
596 lwpid_of (event_thr), new_pid);
597
184ea2f7 598 ptid = ptid_t (pid_of (event_thr), new_pid);
b3312d80 599 new_lwp = add_lwp (ptid);
24a09b5f 600
e27d73f6 601 /* Either we're going to immediately resume the new thread
df95181f 602 or leave it stopped. resume_one_lwp is a nop if it
e27d73f6 603 thinks the thread is currently running, so set this first
df95181f 604 before calling resume_one_lwp. */
e27d73f6
DE
605 new_lwp->stopped = 1;
606
0f8288ae
YQ
607 /* If we're suspending all threads, leave this one suspended
608 too. If the fork/clone parent is stepping over a breakpoint,
609 all other threads have been suspended already. Leave the
610 child suspended too. */
611 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
612 || event_lwp->bp_reinsert != 0)
bde24c0a
PA
613 new_lwp->suspended = 1;
614
da5898ce
DJ
615 /* Normally we will get the pending SIGSTOP. But in some cases
616 we might get another signal delivered to the group first.
f21cc1a2 617 If we do get another signal, be sure not to lose it. */
20ba1ce6 618 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 619 {
54a0b537 620 new_lwp->stop_expected = 1;
20ba1ce6
PA
621 new_lwp->status_pending_p = 1;
622 new_lwp->status_pending = status;
da5898ce 623 }
c12a5089 624 else if (cs.report_thread_events)
65706a29 625 {
183be222 626 new_lwp->waitstatus.set_thread_created ();
65706a29
PA
627 new_lwp->status_pending_p = 1;
628 new_lwp->status_pending = status;
629 }
de0d863e 630
a0aad537 631#ifdef USE_THREAD_DB
94c207e0 632 thread_db_notice_clone (event_thr, ptid);
a0aad537 633#endif
86299109 634
de0d863e
DB
635 /* Don't report the event. */
636 return 1;
24a09b5f 637 }
c269dbdb
DB
638 else if (event == PTRACE_EVENT_VFORK_DONE)
639 {
183be222 640 event_lwp->waitstatus.set_vfork_done ();
c269dbdb 641
7582c77c 642 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
2e7b624b 643 {
3b9a79ef 644 reinsert_single_step_breakpoints (event_thr);
2e7b624b 645
3b9a79ef 646 gdb_assert (has_single_step_breakpoints (event_thr));
2e7b624b
YQ
647 }
648
c269dbdb
DB
649 /* Report the event. */
650 return 0;
651 }
c12a5089 652 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
94585166
DB
653 {
654 struct process_info *proc;
f27866ba 655 std::vector<int> syscalls_to_catch;
94585166
DB
656 ptid_t event_ptid;
657 pid_t event_pid;
658
659 if (debug_threads)
660 {
661 debug_printf ("HEW: Got exec event from LWP %ld\n",
662 lwpid_of (event_thr));
663 }
664
665 /* Get the event ptid. */
666 event_ptid = ptid_of (event_thr);
e99b03dc 667 event_pid = event_ptid.pid ();
94585166 668
82075af2 669 /* Save the syscall list from the execing process. */
94585166 670 proc = get_thread_process (event_thr);
f27866ba 671 syscalls_to_catch = std::move (proc->syscalls_to_catch);
82075af2
JS
672
673 /* Delete the execing process and all its threads. */
d16f3f6c 674 mourn (proc);
94585166
DB
675 current_thread = NULL;
676
677 /* Create a new process/lwp/thread. */
fd000fb3 678 proc = add_linux_process (event_pid, 0);
94585166
DB
679 event_lwp = add_lwp (event_ptid);
680 event_thr = get_lwp_thread (event_lwp);
681 gdb_assert (current_thread == event_thr);
797bcff5 682 arch_setup_thread (event_thr);
94585166
DB
683
684 /* Set the event status. */
183be222
SM
685 event_lwp->waitstatus.set_execd
686 (make_unique_xstrdup
687 (linux_proc_pid_to_exec_file (lwpid_of (event_thr))));
94585166
DB
688
689 /* Mark the exec status as pending. */
690 event_lwp->stopped = 1;
691 event_lwp->status_pending_p = 1;
692 event_lwp->status_pending = wstat;
693 event_thr->last_resume_kind = resume_continue;
183be222 694 event_thr->last_status.set_ignore ();
94585166 695
82075af2
JS
696 /* Update syscall state in the new lwp, effectively mid-syscall too. */
697 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
698
699 /* Restore the list to catch. Don't rely on the client, which is free
700 to avoid sending a new list when the architecture doesn't change.
701 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
f27866ba 702 proc->syscalls_to_catch = std::move (syscalls_to_catch);
82075af2 703
94585166
DB
704 /* Report the event. */
705 *orig_event_lwp = event_lwp;
706 return 0;
707 }
de0d863e
DB
708
709 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
24a09b5f
DJ
710}
711
df95181f
TBA
712CORE_ADDR
713linux_process_target::get_pc (lwp_info *lwp)
d50171e4 714{
0bfdf32f 715 struct thread_info *saved_thread;
d50171e4
PA
716 struct regcache *regcache;
717 CORE_ADDR pc;
718
bf9ae9d8 719 if (!low_supports_breakpoints ())
d50171e4
PA
720 return 0;
721
0bfdf32f
GB
722 saved_thread = current_thread;
723 current_thread = get_lwp_thread (lwp);
d50171e4 724
0bfdf32f 725 regcache = get_thread_regcache (current_thread, 1);
bf9ae9d8 726 pc = low_get_pc (regcache);
d50171e4
PA
727
728 if (debug_threads)
87ce2a04 729 debug_printf ("pc is 0x%lx\n", (long) pc);
d50171e4 730
0bfdf32f 731 current_thread = saved_thread;
d50171e4
PA
732 return pc;
733}
734
9eedd27d
TBA
735void
736linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
82075af2
JS
737{
738 struct thread_info *saved_thread;
739 struct regcache *regcache;
740
82075af2
JS
741 saved_thread = current_thread;
742 current_thread = get_lwp_thread (lwp);
743
744 regcache = get_thread_regcache (current_thread, 1);
9eedd27d 745 low_get_syscall_trapinfo (regcache, sysno);
82075af2
JS
746
747 if (debug_threads)
4cc32bec 748 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
82075af2
JS
749
750 current_thread = saved_thread;
751}
752
9eedd27d
TBA
753void
754linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
755{
756 /* By default, report an unknown system call number. */
757 *sysno = UNKNOWN_SYSCALL;
758}
759
df95181f
TBA
760bool
761linux_process_target::save_stop_reason (lwp_info *lwp)
0d62e5e8 762{
582511be
PA
763 CORE_ADDR pc;
764 CORE_ADDR sw_breakpoint_pc;
765 struct thread_info *saved_thread;
3e572f71
PA
766#if USE_SIGTRAP_SIGINFO
767 siginfo_t siginfo;
768#endif
d50171e4 769
bf9ae9d8 770 if (!low_supports_breakpoints ())
df95181f 771 return false;
0d62e5e8 772
582511be 773 pc = get_pc (lwp);
d4807ea2 774 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
d50171e4 775
582511be
PA
776 /* breakpoint_at reads from the current thread. */
777 saved_thread = current_thread;
778 current_thread = get_lwp_thread (lwp);
47c0c975 779
3e572f71
PA
780#if USE_SIGTRAP_SIGINFO
781 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
782 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
783 {
784 if (siginfo.si_signo == SIGTRAP)
785 {
e7ad2f14
PA
786 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
787 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 788 {
e7ad2f14
PA
789 /* The si_code is ambiguous on this arch -- check debug
790 registers. */
791 if (!check_stopped_by_watchpoint (lwp))
792 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
793 }
794 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
795 {
796 /* If we determine the LWP stopped for a SW breakpoint,
797 trust it. Particularly don't check watchpoint
798 registers, because at least on s390, we'd find
799 stopped-by-watchpoint as long as there's a watchpoint
800 set. */
3e572f71 801 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
3e572f71 802 }
e7ad2f14 803 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 804 {
e7ad2f14
PA
805 /* This can indicate either a hardware breakpoint or
806 hardware watchpoint. Check debug registers. */
807 if (!check_stopped_by_watchpoint (lwp))
808 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
3e572f71 809 }
2bf6fb9d
PA
810 else if (siginfo.si_code == TRAP_TRACE)
811 {
e7ad2f14
PA
812 /* We may have single stepped an instruction that
813 triggered a watchpoint. In that case, on some
814 architectures (such as x86), instead of TRAP_HWBKPT,
815 si_code indicates TRAP_TRACE, and we need to check
816 the debug registers separately. */
817 if (!check_stopped_by_watchpoint (lwp))
818 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
2bf6fb9d 819 }
3e572f71
PA
820 }
821 }
822#else
582511be
PA
823 /* We may have just stepped a breakpoint instruction. E.g., in
824 non-stop mode, GDB first tells the thread A to step a range, and
825 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
826 case we need to report the breakpoint PC. */
827 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
d7146cda 828 && low_breakpoint_at (sw_breakpoint_pc))
e7ad2f14
PA
829 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
830
831 if (hardware_breakpoint_inserted_here (pc))
832 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
833
834 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
835 check_stopped_by_watchpoint (lwp);
836#endif
837
838 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
582511be
PA
839 {
840 if (debug_threads)
841 {
842 struct thread_info *thr = get_lwp_thread (lwp);
843
844 debug_printf ("CSBB: %s stopped by software breakpoint\n",
845 target_pid_to_str (ptid_of (thr)));
846 }
847
848 /* Back up the PC if necessary. */
849 if (pc != sw_breakpoint_pc)
e7ad2f14 850 {
582511be
PA
851 struct regcache *regcache
852 = get_thread_regcache (current_thread, 1);
bf9ae9d8 853 low_set_pc (regcache, sw_breakpoint_pc);
582511be
PA
854 }
855
e7ad2f14
PA
856 /* Update this so we record the correct stop PC below. */
857 pc = sw_breakpoint_pc;
582511be 858 }
e7ad2f14 859 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
582511be
PA
860 {
861 if (debug_threads)
862 {
863 struct thread_info *thr = get_lwp_thread (lwp);
864
865 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
866 target_pid_to_str (ptid_of (thr)));
867 }
e7ad2f14
PA
868 }
869 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
870 {
871 if (debug_threads)
872 {
873 struct thread_info *thr = get_lwp_thread (lwp);
47c0c975 874
e7ad2f14
PA
875 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
876 target_pid_to_str (ptid_of (thr)));
877 }
582511be 878 }
e7ad2f14
PA
879 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
880 {
881 if (debug_threads)
882 {
883 struct thread_info *thr = get_lwp_thread (lwp);
582511be 884
e7ad2f14
PA
885 debug_printf ("CSBB: %s stopped by trace\n",
886 target_pid_to_str (ptid_of (thr)));
887 }
888 }
889
890 lwp->stop_pc = pc;
582511be 891 current_thread = saved_thread;
df95181f 892 return true;
0d62e5e8 893}
ce3a066d 894
fd000fb3
TBA
895lwp_info *
896linux_process_target::add_lwp (ptid_t ptid)
611cb4a5 897{
c360a473 898 lwp_info *lwp = new lwp_info;
0d62e5e8 899
754e3168
AH
900 lwp->thread = add_thread (ptid, lwp);
901
fd000fb3 902 low_new_thread (lwp);
aa5ca48f 903
54a0b537 904 return lwp;
0d62e5e8 905}
611cb4a5 906
fd000fb3
TBA
907void
908linux_process_target::low_new_thread (lwp_info *info)
909{
910 /* Nop. */
911}
912
2090129c
SDJ
913/* Callback to be used when calling fork_inferior, responsible for
914 actually initiating the tracing of the inferior. */
915
916static void
917linux_ptrace_fun ()
918{
919 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
920 (PTRACE_TYPE_ARG4) 0) < 0)
50fa3001 921 trace_start_error_with_name ("ptrace");
2090129c
SDJ
922
923 if (setpgid (0, 0) < 0)
924 trace_start_error_with_name ("setpgid");
925
926 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
927 stdout to stderr so that inferior i/o doesn't corrupt the connection.
928 Also, redirect stdin to /dev/null. */
929 if (remote_connection_is_stdio ())
930 {
931 if (close (0) < 0)
932 trace_start_error_with_name ("close");
933 if (open ("/dev/null", O_RDONLY) < 0)
934 trace_start_error_with_name ("open");
935 if (dup2 (2, 1) < 0)
936 trace_start_error_with_name ("dup2");
937 if (write (2, "stdin/stdout redirected\n",
938 sizeof ("stdin/stdout redirected\n") - 1) < 0)
939 {
940 /* Errors ignored. */;
941 }
942 }
943}
944
da6d8c04 945/* Start an inferior process and returns its pid.
2090129c
SDJ
946 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
947 are its arguments. */
da6d8c04 948
15295543
TBA
949int
950linux_process_target::create_inferior (const char *program,
951 const std::vector<char *> &program_args)
da6d8c04 952{
c12a5089 953 client_state &cs = get_client_state ();
a6dbe5df 954 struct lwp_info *new_lwp;
da6d8c04 955 int pid;
95954743 956 ptid_t ptid;
03583c20 957
41272101
TT
958 {
959 maybe_disable_address_space_randomization restore_personality
c12a5089 960 (cs.disable_randomization);
bea571eb 961 std::string str_program_args = construct_inferior_arguments (program_args);
41272101
TT
962
963 pid = fork_inferior (program,
964 str_program_args.c_str (),
965 get_environ ()->envp (), linux_ptrace_fun,
966 NULL, NULL, NULL, NULL);
967 }
03583c20 968
fd000fb3 969 add_linux_process (pid, 0);
95954743 970
184ea2f7 971 ptid = ptid_t (pid, pid);
95954743 972 new_lwp = add_lwp (ptid);
a6dbe5df 973 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 974
2090129c
SDJ
975 post_fork_inferior (pid, program);
976
a9fa9f7d 977 return pid;
da6d8c04
DJ
978}
979
ece66d65
JS
980/* Implement the post_create_inferior target_ops method. */
981
6dee9afb
TBA
982void
983linux_process_target::post_create_inferior ()
ece66d65
JS
984{
985 struct lwp_info *lwp = get_thread_lwp (current_thread);
986
797bcff5 987 low_arch_setup ();
ece66d65
JS
988
989 if (lwp->must_set_ptrace_flags)
990 {
991 struct process_info *proc = current_process ();
992 int options = linux_low_ptrace_options (proc->attached);
993
994 linux_enable_event_reporting (lwpid_of (current_thread), options);
995 lwp->must_set_ptrace_flags = 0;
996 }
997}
998
7ae1a6a6 999int
fd000fb3 1000linux_process_target::attach_lwp (ptid_t ptid)
da6d8c04 1001{
54a0b537 1002 struct lwp_info *new_lwp;
e38504b3 1003 int lwpid = ptid.lwp ();
611cb4a5 1004
b8e1b30e 1005 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 1006 != 0)
7ae1a6a6 1007 return errno;
24a09b5f 1008
b3312d80 1009 new_lwp = add_lwp (ptid);
0d62e5e8 1010
a6dbe5df
PA
1011 /* We need to wait for SIGSTOP before being able to make the next
1012 ptrace call on this LWP. */
1013 new_lwp->must_set_ptrace_flags = 1;
1014
644cebc9 1015 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2
PA
1016 {
1017 if (debug_threads)
87ce2a04 1018 debug_printf ("Attached to a stopped process\n");
c14d7ab2
PA
1019
1020 /* The process is definitely stopped. It is in a job control
1021 stop, unless the kernel predates the TASK_STOPPED /
1022 TASK_TRACED distinction, in which case it might be in a
1023 ptrace stop. Make sure it is in a ptrace stop; from there we
1024 can kill it, signal it, et cetera.
1025
1026 First make sure there is a pending SIGSTOP. Since we are
1027 already attached, the process can not transition from stopped
1028 to running without a PTRACE_CONT; so we know this signal will
1029 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1030 probably already in the queue (unless this kernel is old
1031 enough to use TASK_STOPPED for ptrace stops); but since
1032 SIGSTOP is not an RT signal, it can only be queued once. */
1033 kill_lwp (lwpid, SIGSTOP);
1034
1035 /* Finally, resume the stopped process. This will deliver the
1036 SIGSTOP (or a higher priority signal, just like normal
1037 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 1038 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
1039 }
1040
0d62e5e8 1041 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
1042 brings it to a halt.
1043
1044 There are several cases to consider here:
1045
1046 1) gdbserver has already attached to the process and is being notified
1b3f6016 1047 of a new thread that is being created.
d50171e4
PA
1048 In this case we should ignore that SIGSTOP and resume the
1049 process. This is handled below by setting stop_expected = 1,
8336d594 1050 and the fact that add_thread sets last_resume_kind ==
d50171e4 1051 resume_continue.
0e21c1ec
DE
1052
1053 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
1054 to it via attach_inferior.
1055 In this case we want the process thread to stop.
d50171e4
PA
1056 This is handled by having linux_attach set last_resume_kind ==
1057 resume_stop after we return.
e3deef73
LM
1058
1059 If the pid we are attaching to is also the tgid, we attach to and
1060 stop all the existing threads. Otherwise, we attach to pid and
1061 ignore any other threads in the same group as this pid.
0e21c1ec
DE
1062
1063 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
1064 existing threads.
1065 In this case we want the thread to stop.
1066 FIXME: This case is currently not properly handled.
1067 We should wait for the SIGSTOP but don't. Things work apparently
1068 because enough time passes between when we ptrace (ATTACH) and when
1069 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
1070
1071 On the other hand, if we are currently trying to stop all threads, we
1072 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 1073 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
1074 end of the list, and so the new thread has not yet reached
1075 wait_for_sigstop (but will). */
d50171e4 1076 new_lwp->stop_expected = 1;
0d62e5e8 1077
7ae1a6a6 1078 return 0;
95954743
PA
1079}
1080
8784d563
PA
1081/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1082 already attached. Returns true if a new LWP is found, false
1083 otherwise. */
1084
1085static int
1086attach_proc_task_lwp_callback (ptid_t ptid)
1087{
1088 /* Is this a new thread? */
1089 if (find_thread_ptid (ptid) == NULL)
1090 {
e38504b3 1091 int lwpid = ptid.lwp ();
8784d563
PA
1092 int err;
1093
1094 if (debug_threads)
1095 debug_printf ("Found new lwp %d\n", lwpid);
1096
fd000fb3 1097 err = the_linux_target->attach_lwp (ptid);
8784d563
PA
1098
1099 /* Be quiet if we simply raced with the thread exiting. EPERM
1100 is returned if the thread's task still exists, and is marked
1101 as exited or zombie, as well as other conditions, so in that
1102 case, confirm the status in /proc/PID/status. */
1103 if (err == ESRCH
1104 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1105 {
1106 if (debug_threads)
1107 {
1108 debug_printf ("Cannot attach to lwp %d: "
1109 "thread is gone (%d: %s)\n",
6d91ce9a 1110 lwpid, err, safe_strerror (err));
8784d563
PA
1111 }
1112 }
1113 else if (err != 0)
1114 {
4d9b86e1 1115 std::string reason
50fa3001 1116 = linux_ptrace_attach_fail_reason_string (ptid, err);
4d9b86e1
SM
1117
1118 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
8784d563
PA
1119 }
1120
1121 return 1;
1122 }
1123 return 0;
1124}
1125
500c1d85
PA
1126static void async_file_mark (void);
1127
e3deef73
LM
1128/* Attach to PID. If PID is the tgid, attach to it and all
1129 of its threads. */
1130
ef03dad8
TBA
1131int
1132linux_process_target::attach (unsigned long pid)
0d62e5e8 1133{
500c1d85
PA
1134 struct process_info *proc;
1135 struct thread_info *initial_thread;
184ea2f7 1136 ptid_t ptid = ptid_t (pid, pid);
7ae1a6a6
PA
1137 int err;
1138
fd000fb3 1139 proc = add_linux_process (pid, 1);
df0da8a2 1140
e3deef73
LM
1141 /* Attach to PID. We will check for other threads
1142 soon. */
fd000fb3 1143 err = attach_lwp (ptid);
7ae1a6a6 1144 if (err != 0)
4d9b86e1 1145 {
df0da8a2 1146 remove_process (proc);
4d9b86e1 1147
50fa3001
SDJ
1148 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1149 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
4d9b86e1 1150 }
7ae1a6a6 1151
500c1d85
PA
1152 /* Don't ignore the initial SIGSTOP if we just attached to this
1153 process. It will be collected by wait shortly. */
184ea2f7 1154 initial_thread = find_thread_ptid (ptid_t (pid, pid));
500c1d85 1155 initial_thread->last_resume_kind = resume_stop;
0d62e5e8 1156
8784d563
PA
1157 /* We must attach to every LWP. If /proc is mounted, use that to
1158 find them now. On the one hand, the inferior may be using raw
1159 clone instead of using pthreads. On the other hand, even if it
1160 is using pthreads, GDB may not be connected yet (thread_db needs
1161 to do symbol lookups, through qSymbol). Also, thread_db walks
1162 structures in the inferior's address space to find the list of
1163 threads/LWPs, and those structures may well be corrupted. Note
1164 that once thread_db is loaded, we'll still use it to list threads
1165 and associate pthread info with each LWP. */
1166 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
500c1d85
PA
1167
1168 /* GDB will shortly read the xml target description for this
1169 process, to figure out the process' architecture. But the target
1170 description is only filled in when the first process/thread in
1171 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1172 that now, otherwise, if GDB is fast enough, it could read the
1173 target description _before_ that initial stop. */
1174 if (non_stop)
1175 {
1176 struct lwp_info *lwp;
1177 int wstat, lwpid;
f2907e49 1178 ptid_t pid_ptid = ptid_t (pid);
500c1d85 1179
d16f3f6c 1180 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
500c1d85
PA
1181 gdb_assert (lwpid > 0);
1182
f2907e49 1183 lwp = find_lwp_pid (ptid_t (lwpid));
500c1d85
PA
1184
1185 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1186 {
1187 lwp->status_pending_p = 1;
1188 lwp->status_pending = wstat;
1189 }
1190
1191 initial_thread->last_resume_kind = resume_continue;
1192
1193 async_file_mark ();
1194
1195 gdb_assert (proc->tdesc != NULL);
1196 }
1197
95954743
PA
1198 return 0;
1199}
1200
95954743 1201static int
e4eb0dec 1202last_thread_of_process_p (int pid)
95954743 1203{
e4eb0dec 1204 bool seen_one = false;
95954743 1205
da4ae14a 1206 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
95954743 1207 {
e4eb0dec
SM
1208 if (!seen_one)
1209 {
1210 /* This is the first thread of this process we see. */
1211 seen_one = true;
1212 return false;
1213 }
1214 else
1215 {
1216 /* This is the second thread of this process we see. */
1217 return true;
1218 }
1219 });
da6d8c04 1220
e4eb0dec 1221 return thread == NULL;
95954743
PA
1222}
1223
da84f473
PA
1224/* Kill LWP. */
1225
1226static void
1227linux_kill_one_lwp (struct lwp_info *lwp)
1228{
d86d4aaf
DE
1229 struct thread_info *thr = get_lwp_thread (lwp);
1230 int pid = lwpid_of (thr);
da84f473
PA
1231
1232 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1233 there is no signal context, and ptrace(PTRACE_KILL) (or
1234 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1235 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1236 alternative is to kill with SIGKILL. We only need one SIGKILL
1237 per process, not one for each thread. But since we still support
4a6ed09b
PA
1238 support debugging programs using raw clone without CLONE_THREAD,
1239 we send one for each thread. For years, we used PTRACE_KILL
1240 only, so we're being a bit paranoid about some old kernels where
1241 PTRACE_KILL might work better (dubious if there are any such, but
1242 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1243 second, and so we're fine everywhere. */
da84f473
PA
1244
1245 errno = 0;
69ff6be5 1246 kill_lwp (pid, SIGKILL);
da84f473 1247 if (debug_threads)
ce9e3fe7
PA
1248 {
1249 int save_errno = errno;
1250
1251 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1252 target_pid_to_str (ptid_of (thr)),
6d91ce9a 1253 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1254 }
da84f473
PA
1255
1256 errno = 0;
b8e1b30e 1257 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 1258 if (debug_threads)
ce9e3fe7
PA
1259 {
1260 int save_errno = errno;
1261
1262 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1263 target_pid_to_str (ptid_of (thr)),
6d91ce9a 1264 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1265 }
da84f473
PA
1266}
1267
e76126e8
PA
1268/* Kill LWP and wait for it to die. */
1269
1270static void
1271kill_wait_lwp (struct lwp_info *lwp)
1272{
1273 struct thread_info *thr = get_lwp_thread (lwp);
e99b03dc 1274 int pid = ptid_of (thr).pid ();
e38504b3 1275 int lwpid = ptid_of (thr).lwp ();
e76126e8
PA
1276 int wstat;
1277 int res;
1278
1279 if (debug_threads)
1280 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1281
1282 do
1283 {
1284 linux_kill_one_lwp (lwp);
1285
1286 /* Make sure it died. Notes:
1287
1288 - The loop is most likely unnecessary.
1289
d16f3f6c 1290 - We don't use wait_for_event as that could delete lwps
e76126e8
PA
1291 while we're iterating over them. We're not interested in
1292 any pending status at this point, only in making sure all
1293 wait status on the kernel side are collected until the
1294 process is reaped.
1295
1296 - We don't use __WALL here as the __WALL emulation relies on
1297 SIGCHLD, and killing a stopped process doesn't generate
1298 one, nor an exit status.
1299 */
1300 res = my_waitpid (lwpid, &wstat, 0);
1301 if (res == -1 && errno == ECHILD)
1302 res = my_waitpid (lwpid, &wstat, __WCLONE);
1303 } while (res > 0 && WIFSTOPPED (wstat));
1304
586b02a9
PA
1305 /* Even if it was stopped, the child may have already disappeared.
1306 E.g., if it was killed by SIGKILL. */
1307 if (res < 0 && errno != ECHILD)
1308 perror_with_name ("kill_wait_lwp");
e76126e8
PA
1309}
1310
578290ec 1311/* Callback for `for_each_thread'. Kills an lwp of a given process,
da84f473 1312 except the leader. */
95954743 1313
578290ec
SM
1314static void
1315kill_one_lwp_callback (thread_info *thread, int pid)
da6d8c04 1316{
54a0b537 1317 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 1318
fd500816
DJ
1319 /* We avoid killing the first thread here, because of a Linux kernel (at
1320 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1321 the children get a chance to be reaped, it will remain a zombie
1322 forever. */
95954743 1323
d86d4aaf 1324 if (lwpid_of (thread) == pid)
95954743
PA
1325 {
1326 if (debug_threads)
87ce2a04 1327 debug_printf ("lkop: is last of process %s\n",
9c80ecd6 1328 target_pid_to_str (thread->id));
578290ec 1329 return;
95954743 1330 }
fd500816 1331
e76126e8 1332 kill_wait_lwp (lwp);
da6d8c04
DJ
1333}
1334
c6885a57
TBA
1335int
1336linux_process_target::kill (process_info *process)
0d62e5e8 1337{
a780ef4f 1338 int pid = process->pid;
9d606399 1339
f9e39928
PA
1340 /* If we're killing a running inferior, make sure it is stopped
1341 first, as PTRACE_KILL will not work otherwise. */
7984d532 1342 stop_all_lwps (0, NULL);
f9e39928 1343
578290ec
SM
1344 for_each_thread (pid, [&] (thread_info *thread)
1345 {
1346 kill_one_lwp_callback (thread, pid);
1347 });
fd500816 1348
54a0b537 1349 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1350 thread in the list, so do so now. */
a780ef4f 1351 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
bd99dc85 1352
784867a5 1353 if (lwp == NULL)
fd500816 1354 {
784867a5 1355 if (debug_threads)
d86d4aaf
DE
1356 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1357 pid);
784867a5
JK
1358 }
1359 else
e76126e8 1360 kill_wait_lwp (lwp);
2d717e4f 1361
8adb37b9 1362 mourn (process);
f9e39928
PA
1363
1364 /* Since we presently can only stop all lwps of all processes, we
1365 need to unstop lwps of other processes. */
7984d532 1366 unstop_all_lwps (0, NULL);
95954743 1367 return 0;
0d62e5e8
DJ
1368}
1369
9b224c5e
PA
1370/* Get pending signal of THREAD, for detaching purposes. This is the
1371 signal the thread last stopped for, which we need to deliver to the
1372 thread when detaching, otherwise, it'd be suppressed/lost. */
1373
1374static int
1375get_detach_signal (struct thread_info *thread)
1376{
c12a5089 1377 client_state &cs = get_client_state ();
a493e3e2 1378 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1379 int status;
1380 struct lwp_info *lp = get_thread_lwp (thread);
1381
1382 if (lp->status_pending_p)
1383 status = lp->status_pending;
1384 else
1385 {
1386 /* If the thread had been suspended by gdbserver, and it stopped
1387 cleanly, then it'll have stopped with SIGSTOP. But we don't
1388 want to deliver that SIGSTOP. */
183be222
SM
1389 if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1390 || thread->last_status.sig () == GDB_SIGNAL_0)
9b224c5e
PA
1391 return 0;
1392
1393 /* Otherwise, we may need to deliver the signal we
1394 intercepted. */
1395 status = lp->last_status;
1396 }
1397
1398 if (!WIFSTOPPED (status))
1399 {
1400 if (debug_threads)
87ce2a04 1401 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
d86d4aaf 1402 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1403 return 0;
1404 }
1405
1406 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1407 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e
PA
1408 {
1409 if (debug_threads)
87ce2a04
DE
1410 debug_printf ("GPS: lwp %s had stopped with extended "
1411 "status: no pending signal\n",
d86d4aaf 1412 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1413 return 0;
1414 }
1415
2ea28649 1416 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e 1417
c12a5089 1418 if (cs.program_signals_p && !cs.program_signals[signo])
9b224c5e
PA
1419 {
1420 if (debug_threads)
87ce2a04 1421 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
d86d4aaf 1422 target_pid_to_str (ptid_of (thread)),
87ce2a04 1423 gdb_signal_to_string (signo));
9b224c5e
PA
1424 return 0;
1425 }
c12a5089 1426 else if (!cs.program_signals_p
9b224c5e
PA
1427 /* If we have no way to know which signals GDB does not
1428 want to have passed to the program, assume
1429 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1430 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e
PA
1431 {
1432 if (debug_threads)
87ce2a04
DE
1433 debug_printf ("GPS: lwp %s had signal %s, "
1434 "but we don't know if we should pass it. "
1435 "Default to not.\n",
d86d4aaf 1436 target_pid_to_str (ptid_of (thread)),
87ce2a04 1437 gdb_signal_to_string (signo));
9b224c5e
PA
1438 return 0;
1439 }
1440 else
1441 {
1442 if (debug_threads)
87ce2a04 1443 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
d86d4aaf 1444 target_pid_to_str (ptid_of (thread)),
87ce2a04 1445 gdb_signal_to_string (signo));
9b224c5e
PA
1446
1447 return WSTOPSIG (status);
1448 }
1449}
1450
fd000fb3
TBA
1451void
1452linux_process_target::detach_one_lwp (lwp_info *lwp)
6ad8ae5c 1453{
ced2dffb 1454 struct thread_info *thread = get_lwp_thread (lwp);
9b224c5e 1455 int sig;
ced2dffb 1456 int lwpid;
6ad8ae5c 1457
9b224c5e 1458 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1459 if (lwp->stop_expected)
ae13219e 1460 {
9b224c5e 1461 if (debug_threads)
87ce2a04 1462 debug_printf ("Sending SIGCONT to %s\n",
d86d4aaf 1463 target_pid_to_str (ptid_of (thread)));
9b224c5e 1464
d86d4aaf 1465 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1466 lwp->stop_expected = 0;
ae13219e
DJ
1467 }
1468
9b224c5e
PA
1469 /* Pass on any pending signal for this thread. */
1470 sig = get_detach_signal (thread);
1471
ced2dffb
PA
1472 /* Preparing to resume may try to write registers, and fail if the
1473 lwp is zombie. If that happens, ignore the error. We'll handle
1474 it below, when detach fails with ESRCH. */
a70b8144 1475 try
ced2dffb
PA
1476 {
1477 /* Flush any pending changes to the process's registers. */
1478 regcache_invalidate_thread (thread);
1479
1480 /* Finally, let it resume. */
d7599cc0 1481 low_prepare_to_resume (lwp);
ced2dffb 1482 }
230d2906 1483 catch (const gdb_exception_error &ex)
ced2dffb
PA
1484 {
1485 if (!check_ptrace_stopped_lwp_gone (lwp))
eedc3f4f 1486 throw;
ced2dffb 1487 }
ced2dffb
PA
1488
1489 lwpid = lwpid_of (thread);
1490 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1491 (PTRACE_TYPE_ARG4) (long) sig) < 0)
ced2dffb
PA
1492 {
1493 int save_errno = errno;
1494
1495 /* We know the thread exists, so ESRCH must mean the lwp is
1496 zombie. This can happen if one of the already-detached
1497 threads exits the whole thread group. In that case we're
1498 still attached, and must reap the lwp. */
1499 if (save_errno == ESRCH)
1500 {
1501 int ret, status;
1502
1503 ret = my_waitpid (lwpid, &status, __WALL);
1504 if (ret == -1)
1505 {
1506 warning (_("Couldn't reap LWP %d while detaching: %s"),
6d91ce9a 1507 lwpid, safe_strerror (errno));
ced2dffb
PA
1508 }
1509 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1510 {
1511 warning (_("Reaping LWP %d while detaching "
1512 "returned unexpected status 0x%x"),
1513 lwpid, status);
1514 }
1515 }
1516 else
1517 {
1518 error (_("Can't detach %s: %s"),
1519 target_pid_to_str (ptid_of (thread)),
6d91ce9a 1520 safe_strerror (save_errno));
ced2dffb
PA
1521 }
1522 }
1523 else if (debug_threads)
1524 {
1525 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1526 target_pid_to_str (ptid_of (thread)),
1527 strsignal (sig));
1528 }
bd99dc85
PA
1529
1530 delete_lwp (lwp);
ced2dffb
PA
1531}
1532
9061c9cf
TBA
1533int
1534linux_process_target::detach (process_info *process)
95954743 1535{
ced2dffb 1536 struct lwp_info *main_lwp;
95954743 1537
863d01bd
PA
1538 /* As there's a step over already in progress, let it finish first,
1539 otherwise nesting a stabilize_threads operation on top gets real
1540 messy. */
1541 complete_ongoing_step_over ();
1542
f9e39928 1543 /* Stop all threads before detaching. First, ptrace requires that
30baf67b 1544 the thread is stopped to successfully detach. Second, thread_db
f9e39928
PA
1545 may need to uninstall thread event breakpoints from memory, which
1546 only works with a stopped process anyway. */
7984d532 1547 stop_all_lwps (0, NULL);
f9e39928 1548
ca5c370d 1549#ifdef USE_THREAD_DB
8336d594 1550 thread_db_detach (process);
ca5c370d
PA
1551#endif
1552
fa593d66 1553 /* Stabilize threads (move out of jump pads). */
5c9eb2f2 1554 target_stabilize_threads ();
fa593d66 1555
ced2dffb
PA
1556 /* Detach from the clone lwps first. If the thread group exits just
1557 while we're detaching, we must reap the clone lwps before we're
1558 able to reap the leader. */
fd000fb3
TBA
1559 for_each_thread (process->pid, [this] (thread_info *thread)
1560 {
1561 /* We don't actually detach from the thread group leader just yet.
1562 If the thread group exits, we must reap the zombie clone lwps
1563 before we're able to reap the leader. */
1564 if (thread->id.pid () == thread->id.lwp ())
1565 return;
1566
1567 lwp_info *lwp = get_thread_lwp (thread);
1568 detach_one_lwp (lwp);
1569 });
ced2dffb 1570
ef2ddb33 1571 main_lwp = find_lwp_pid (ptid_t (process->pid));
fd000fb3 1572 detach_one_lwp (main_lwp);
8336d594 1573
8adb37b9 1574 mourn (process);
f9e39928
PA
1575
1576 /* Since we presently can only stop all lwps of all processes, we
1577 need to unstop lwps of other processes. */
7984d532 1578 unstop_all_lwps (0, NULL);
f9e39928
PA
1579 return 0;
1580}
1581
1582/* Remove all LWPs that belong to process PROC from the lwp list. */
1583
8adb37b9
TBA
1584void
1585linux_process_target::mourn (process_info *process)
8336d594
PA
1586{
1587 struct process_info_private *priv;
1588
1589#ifdef USE_THREAD_DB
1590 thread_db_mourn (process);
1591#endif
1592
fd000fb3 1593 for_each_thread (process->pid, [this] (thread_info *thread)
6b2a85da
SM
1594 {
1595 delete_lwp (get_thread_lwp (thread));
1596 });
f9e39928 1597
8336d594 1598 /* Freeing all private data. */
fe978cb0 1599 priv = process->priv;
fd000fb3 1600 low_delete_process (priv->arch_private);
8336d594 1601 free (priv);
fe978cb0 1602 process->priv = NULL;
505106cd
PA
1603
1604 remove_process (process);
8336d594
PA
1605}
1606
95a49a39
TBA
1607void
1608linux_process_target::join (int pid)
444d6139 1609{
444d6139
PA
1610 int status, ret;
1611
1612 do {
d105de22 1613 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1614 if (WIFEXITED (status) || WIFSIGNALED (status))
1615 break;
1616 } while (ret != -1 || errno != ECHILD);
1617}
1618
13d3d99b
TBA
1619/* Return true if the given thread is still alive. */
1620
1621bool
1622linux_process_target::thread_alive (ptid_t ptid)
0d62e5e8 1623{
95954743
PA
1624 struct lwp_info *lwp = find_lwp_pid (ptid);
1625
1626 /* We assume we always know if a thread exits. If a whole process
1627 exited but we still haven't been able to report it to GDB, we'll
1628 hold on to the last lwp of the dead process. */
1629 if (lwp != NULL)
00db26fa 1630 return !lwp_is_marked_dead (lwp);
0d62e5e8
DJ
1631 else
1632 return 0;
1633}
1634
df95181f
TBA
1635bool
1636linux_process_target::thread_still_has_status_pending (thread_info *thread)
582511be
PA
1637{
1638 struct lwp_info *lp = get_thread_lwp (thread);
1639
1640 if (!lp->status_pending_p)
1641 return 0;
1642
582511be 1643 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1644 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1645 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be
PA
1646 {
1647 struct thread_info *saved_thread;
1648 CORE_ADDR pc;
1649 int discard = 0;
1650
1651 gdb_assert (lp->last_status != 0);
1652
1653 pc = get_pc (lp);
1654
1655 saved_thread = current_thread;
1656 current_thread = thread;
1657
1658 if (pc != lp->stop_pc)
1659 {
1660 if (debug_threads)
1661 debug_printf ("PC of %ld changed\n",
1662 lwpid_of (thread));
1663 discard = 1;
1664 }
3e572f71
PA
1665
1666#if !USE_SIGTRAP_SIGINFO
15c66dd6 1667 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
d7146cda 1668 && !low_breakpoint_at (pc))
582511be
PA
1669 {
1670 if (debug_threads)
1671 debug_printf ("previous SW breakpoint of %ld gone\n",
1672 lwpid_of (thread));
1673 discard = 1;
1674 }
15c66dd6 1675 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1676 && !hardware_breakpoint_inserted_here (pc))
1677 {
1678 if (debug_threads)
1679 debug_printf ("previous HW breakpoint of %ld gone\n",
1680 lwpid_of (thread));
1681 discard = 1;
1682 }
3e572f71 1683#endif
582511be
PA
1684
1685 current_thread = saved_thread;
1686
1687 if (discard)
1688 {
1689 if (debug_threads)
1690 debug_printf ("discarding pending breakpoint status\n");
1691 lp->status_pending_p = 0;
1692 return 0;
1693 }
1694 }
1695
1696 return 1;
1697}
1698
a681f9c9
PA
1699/* Returns true if LWP is resumed from the client's perspective. */
1700
1701static int
1702lwp_resumed (struct lwp_info *lwp)
1703{
1704 struct thread_info *thread = get_lwp_thread (lwp);
1705
1706 if (thread->last_resume_kind != resume_stop)
1707 return 1;
1708
1709 /* Did gdb send us a `vCont;t', but we haven't reported the
1710 corresponding stop to gdb yet? If so, the thread is still
1711 resumed/running from gdb's perspective. */
1712 if (thread->last_resume_kind == resume_stop
183be222 1713 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
a681f9c9
PA
1714 return 1;
1715
1716 return 0;
1717}
1718
df95181f
TBA
1719bool
1720linux_process_target::status_pending_p_callback (thread_info *thread,
1721 ptid_t ptid)
0d62e5e8 1722{
582511be 1723 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1724
1725 /* Check if we're only interested in events from a specific process
afa8d396 1726 or a specific LWP. */
83e1b6c1 1727 if (!thread->id.matches (ptid))
95954743 1728 return 0;
0d62e5e8 1729
a681f9c9
PA
1730 if (!lwp_resumed (lp))
1731 return 0;
1732
582511be 1733 if (lp->status_pending_p
df95181f 1734 && !thread_still_has_status_pending (thread))
582511be 1735 {
df95181f 1736 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
582511be
PA
1737 return 0;
1738 }
0d62e5e8 1739
582511be 1740 return lp->status_pending_p;
0d62e5e8
DJ
1741}
1742
95954743
PA
1743struct lwp_info *
1744find_lwp_pid (ptid_t ptid)
1745{
da4ae14a 1746 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
454296a2
SM
1747 {
1748 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
da4ae14a 1749 return thr_arg->id.lwp () == lwp;
454296a2 1750 });
d86d4aaf
DE
1751
1752 if (thread == NULL)
1753 return NULL;
1754
9c80ecd6 1755 return get_thread_lwp (thread);
95954743
PA
1756}
1757
fa96cb38 1758/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1759
fa96cb38
PA
1760static int
1761num_lwps (int pid)
1762{
fa96cb38 1763 int count = 0;
0d62e5e8 1764
4d3bb80e
SM
1765 for_each_thread (pid, [&] (thread_info *thread)
1766 {
9c80ecd6 1767 count++;
4d3bb80e 1768 });
3aee8918 1769
fa96cb38
PA
1770 return count;
1771}
d61ddec4 1772
6d4ee8c6
GB
1773/* See nat/linux-nat.h. */
1774
1775struct lwp_info *
1776iterate_over_lwps (ptid_t filter,
d3a70e03 1777 gdb::function_view<iterate_over_lwps_ftype> callback)
6d4ee8c6 1778{
da4ae14a 1779 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
6d1e5673 1780 {
da4ae14a 1781 lwp_info *lwp = get_thread_lwp (thr_arg);
6d1e5673 1782
d3a70e03 1783 return callback (lwp);
6d1e5673 1784 });
6d4ee8c6 1785
9c80ecd6 1786 if (thread == NULL)
6d4ee8c6
GB
1787 return NULL;
1788
9c80ecd6 1789 return get_thread_lwp (thread);
6d4ee8c6
GB
1790}
1791
fd000fb3
TBA
1792void
1793linux_process_target::check_zombie_leaders ()
fa96cb38 1794{
fd000fb3 1795 for_each_process ([this] (process_info *proc) {
9179355e
SM
1796 pid_t leader_pid = pid_of (proc);
1797 struct lwp_info *leader_lp;
1798
f2907e49 1799 leader_lp = find_lwp_pid (ptid_t (leader_pid));
9179355e
SM
1800
1801 if (debug_threads)
1802 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1803 "num_lwps=%d, zombie=%d\n",
1804 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1805 linux_proc_pid_is_zombie (leader_pid));
1806
1807 if (leader_lp != NULL && !leader_lp->stopped
1808 /* Check if there are other threads in the group, as we may
1809 have raced with the inferior simply exiting. */
1810 && !last_thread_of_process_p (leader_pid)
1811 && linux_proc_pid_is_zombie (leader_pid))
1812 {
1813 /* A leader zombie can mean one of two things:
1814
1815 - It exited, and there's an exit status pending
1816 available, or only the leader exited (not the whole
1817 program). In the latter case, we can't waitpid the
1818 leader's exit status until all other threads are gone.
1819
1820 - There are 3 or more threads in the group, and a thread
1821 other than the leader exec'd. On an exec, the Linux
1822 kernel destroys all other threads (except the execing
1823 one) in the thread group, and resets the execing thread's
1824 tid to the tgid. No exit notification is sent for the
1825 execing thread -- from the ptracer's perspective, it
1826 appears as though the execing thread just vanishes.
1827 Until we reap all other threads except the leader and the
1828 execing thread, the leader will be zombie, and the
1829 execing thread will be in `D (disc sleep)'. As soon as
1830 all other threads are reaped, the execing thread changes
1831 it's tid to the tgid, and the previous (zombie) leader
1832 vanishes, giving place to the "new" leader. We could try
1833 distinguishing the exit and exec cases, by waiting once
1834 more, and seeing if something comes out, but it doesn't
1835 sound useful. The previous leader _does_ go away, and
1836 we'll re-add the new one once we see the exec event
1837 (which is just the same as what would happen if the
1838 previous leader did exit voluntarily before some other
1839 thread execs). */
1840
1841 if (debug_threads)
1842 debug_printf ("CZL: Thread group leader %d zombie "
1843 "(it exited, or another thread execd).\n",
1844 leader_pid);
1845
1846 delete_lwp (leader_lp);
1847 }
1848 });
fa96cb38 1849}
c3adc08c 1850
a1385b7b
SM
1851/* Callback for `find_thread'. Returns the first LWP that is not
1852 stopped. */
d50171e4 1853
a1385b7b
SM
1854static bool
1855not_stopped_callback (thread_info *thread, ptid_t filter)
fa96cb38 1856{
a1385b7b
SM
1857 if (!thread->id.matches (filter))
1858 return false;
47c0c975 1859
a1385b7b 1860 lwp_info *lwp = get_thread_lwp (thread);
fa96cb38 1861
a1385b7b 1862 return !lwp->stopped;
0d62e5e8 1863}
611cb4a5 1864
863d01bd
PA
1865/* Increment LWP's suspend count. */
1866
1867static void
1868lwp_suspended_inc (struct lwp_info *lwp)
1869{
1870 lwp->suspended++;
1871
1872 if (debug_threads && lwp->suspended > 4)
1873 {
1874 struct thread_info *thread = get_lwp_thread (lwp);
1875
1876 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1877 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1878 }
1879}
1880
1881/* Decrement LWP's suspend count. */
1882
1883static void
1884lwp_suspended_decr (struct lwp_info *lwp)
1885{
1886 lwp->suspended--;
1887
1888 if (lwp->suspended < 0)
1889 {
1890 struct thread_info *thread = get_lwp_thread (lwp);
1891
1892 internal_error (__FILE__, __LINE__,
1893 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1894 lwp->suspended);
1895 }
1896}
1897
219f2f23
PA
1898/* This function should only be called if the LWP got a SIGTRAP.
1899
1900 Handle any tracepoint steps or hits. Return true if a tracepoint
1901 event was handled, 0 otherwise. */
1902
1903static int
1904handle_tracepoints (struct lwp_info *lwp)
1905{
1906 struct thread_info *tinfo = get_lwp_thread (lwp);
1907 int tpoint_related_event = 0;
1908
582511be
PA
1909 gdb_assert (lwp->suspended == 0);
1910
7984d532
PA
1911 /* If this tracepoint hit causes a tracing stop, we'll immediately
1912 uninsert tracepoints. To do this, we temporarily pause all
1913 threads, unpatch away, and then unpause threads. We need to make
1914 sure the unpausing doesn't resume LWP too. */
863d01bd 1915 lwp_suspended_inc (lwp);
7984d532 1916
219f2f23
PA
1917 /* And we need to be sure that any all-threads-stopping doesn't try
1918 to move threads out of the jump pads, as it could deadlock the
1919 inferior (LWP could be in the jump pad, maybe even holding the
1920 lock.) */
1921
1922 /* Do any necessary step collect actions. */
1923 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1924
fa593d66
PA
1925 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1926
219f2f23
PA
1927 /* See if we just hit a tracepoint and do its main collect
1928 actions. */
1929 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1930
863d01bd 1931 lwp_suspended_decr (lwp);
7984d532
PA
1932
1933 gdb_assert (lwp->suspended == 0);
229d26fc
SM
1934 gdb_assert (!stabilizing_threads
1935 || (lwp->collecting_fast_tracepoint
1936 != fast_tpoint_collect_result::not_collecting));
7984d532 1937
219f2f23
PA
1938 if (tpoint_related_event)
1939 {
1940 if (debug_threads)
87ce2a04 1941 debug_printf ("got a tracepoint event\n");
219f2f23
PA
1942 return 1;
1943 }
1944
1945 return 0;
1946}
1947
13e567af
TBA
1948fast_tpoint_collect_result
1949linux_process_target::linux_fast_tracepoint_collecting
1950 (lwp_info *lwp, fast_tpoint_collect_status *status)
fa593d66
PA
1951{
1952 CORE_ADDR thread_area;
d86d4aaf 1953 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66 1954
fa593d66
PA
1955 /* Get the thread area address. This is used to recognize which
1956 thread is which when tracing with the in-process agent library.
1957 We don't read anything from the address, and treat it as opaque;
1958 it's the address itself that we assume is unique per-thread. */
13e567af 1959 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
229d26fc 1960 return fast_tpoint_collect_result::not_collecting;
fa593d66
PA
1961
1962 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1963}
1964
13e567af
TBA
1965int
1966linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1967{
1968 return -1;
1969}
1970
d16f3f6c
TBA
1971bool
1972linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
fa593d66 1973{
0bfdf32f 1974 struct thread_info *saved_thread;
fa593d66 1975
0bfdf32f
GB
1976 saved_thread = current_thread;
1977 current_thread = get_lwp_thread (lwp);
fa593d66
PA
1978
1979 if ((wstat == NULL
1980 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1981 && supports_fast_tracepoints ()
58b4daa5 1982 && agent_loaded_p ())
fa593d66
PA
1983 {
1984 struct fast_tpoint_collect_status status;
fa593d66
PA
1985
1986 if (debug_threads)
87ce2a04
DE
1987 debug_printf ("Checking whether LWP %ld needs to move out of the "
1988 "jump pad.\n",
0bfdf32f 1989 lwpid_of (current_thread));
fa593d66 1990
229d26fc
SM
1991 fast_tpoint_collect_result r
1992 = linux_fast_tracepoint_collecting (lwp, &status);
fa593d66
PA
1993
1994 if (wstat == NULL
1995 || (WSTOPSIG (*wstat) != SIGILL
1996 && WSTOPSIG (*wstat) != SIGFPE
1997 && WSTOPSIG (*wstat) != SIGSEGV
1998 && WSTOPSIG (*wstat) != SIGBUS))
1999 {
2000 lwp->collecting_fast_tracepoint = r;
2001
229d26fc 2002 if (r != fast_tpoint_collect_result::not_collecting)
fa593d66 2003 {
229d26fc
SM
2004 if (r == fast_tpoint_collect_result::before_insn
2005 && lwp->exit_jump_pad_bkpt == NULL)
fa593d66
PA
2006 {
2007 /* Haven't executed the original instruction yet.
2008 Set breakpoint there, and wait till it's hit,
2009 then single-step until exiting the jump pad. */
2010 lwp->exit_jump_pad_bkpt
2011 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2012 }
2013
2014 if (debug_threads)
87ce2a04
DE
2015 debug_printf ("Checking whether LWP %ld needs to move out of "
2016 "the jump pad...it does\n",
0bfdf32f
GB
2017 lwpid_of (current_thread));
2018 current_thread = saved_thread;
fa593d66 2019
d16f3f6c 2020 return true;
fa593d66
PA
2021 }
2022 }
2023 else
2024 {
2025 /* If we get a synchronous signal while collecting, *and*
2026 while executing the (relocated) original instruction,
2027 reset the PC to point at the tpoint address, before
2028 reporting to GDB. Otherwise, it's an IPA lib bug: just
2029 report the signal to GDB, and pray for the best. */
2030
229d26fc
SM
2031 lwp->collecting_fast_tracepoint
2032 = fast_tpoint_collect_result::not_collecting;
fa593d66 2033
229d26fc 2034 if (r != fast_tpoint_collect_result::not_collecting
fa593d66
PA
2035 && (status.adjusted_insn_addr <= lwp->stop_pc
2036 && lwp->stop_pc < status.adjusted_insn_addr_end))
2037 {
2038 siginfo_t info;
2039 struct regcache *regcache;
2040
2041 /* The si_addr on a few signals references the address
2042 of the faulting instruction. Adjust that as
2043 well. */
2044 if ((WSTOPSIG (*wstat) == SIGILL
2045 || WSTOPSIG (*wstat) == SIGFPE
2046 || WSTOPSIG (*wstat) == SIGBUS
2047 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 2048 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2049 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
2050 /* Final check just to make sure we don't clobber
2051 the siginfo of non-kernel-sent signals. */
2052 && (uintptr_t) info.si_addr == lwp->stop_pc)
2053 {
2054 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 2055 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2056 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
2057 }
2058
0bfdf32f 2059 regcache = get_thread_regcache (current_thread, 1);
bf9ae9d8 2060 low_set_pc (regcache, status.tpoint_addr);
fa593d66
PA
2061 lwp->stop_pc = status.tpoint_addr;
2062
2063 /* Cancel any fast tracepoint lock this thread was
2064 holding. */
2065 force_unlock_trace_buffer ();
2066 }
2067
2068 if (lwp->exit_jump_pad_bkpt != NULL)
2069 {
2070 if (debug_threads)
87ce2a04
DE
2071 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2072 "stopping all threads momentarily.\n");
fa593d66
PA
2073
2074 stop_all_lwps (1, lwp);
fa593d66
PA
2075
2076 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2077 lwp->exit_jump_pad_bkpt = NULL;
2078
2079 unstop_all_lwps (1, lwp);
2080
2081 gdb_assert (lwp->suspended >= 0);
2082 }
2083 }
2084 }
2085
2086 if (debug_threads)
87ce2a04
DE
2087 debug_printf ("Checking whether LWP %ld needs to move out of the "
2088 "jump pad...no\n",
0bfdf32f 2089 lwpid_of (current_thread));
0cccb683 2090
0bfdf32f 2091 current_thread = saved_thread;
d16f3f6c 2092 return false;
fa593d66
PA
2093}
2094
2095/* Enqueue one signal in the "signals to report later when out of the
2096 jump pad" list. */
2097
2098static void
2099enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2100{
d86d4aaf 2101 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
2102
2103 if (debug_threads)
87ce2a04 2104 debug_printf ("Deferring signal %d for LWP %ld.\n",
d86d4aaf 2105 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2106
2107 if (debug_threads)
2108 {
013e3554 2109 for (const auto &sig : lwp->pending_signals_to_report)
87ce2a04 2110 debug_printf (" Already queued %d\n",
013e3554 2111 sig.signal);
fa593d66 2112
87ce2a04 2113 debug_printf (" (no more currently queued signals)\n");
fa593d66
PA
2114 }
2115
1a981360
PA
2116 /* Don't enqueue non-RT signals if they are already in the deferred
2117 queue. (SIGSTOP being the easiest signal to see ending up here
2118 twice) */
2119 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2120 {
013e3554 2121 for (const auto &sig : lwp->pending_signals_to_report)
1a981360 2122 {
013e3554 2123 if (sig.signal == WSTOPSIG (*wstat))
1a981360
PA
2124 {
2125 if (debug_threads)
87ce2a04
DE
2126 debug_printf ("Not requeuing already queued non-RT signal %d"
2127 " for LWP %ld\n",
013e3554 2128 sig.signal,
d86d4aaf 2129 lwpid_of (thread));
1a981360
PA
2130 return;
2131 }
2132 }
2133 }
2134
013e3554 2135 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
8d749320 2136
d86d4aaf 2137 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554 2138 &lwp->pending_signals_to_report.back ().info);
fa593d66
PA
2139}
2140
2141/* Dequeue one signal from the "signals to report later when out of
2142 the jump pad" list. */
2143
2144static int
2145dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2146{
d86d4aaf
DE
2147 struct thread_info *thread = get_lwp_thread (lwp);
2148
013e3554 2149 if (!lwp->pending_signals_to_report.empty ())
fa593d66 2150 {
013e3554 2151 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
fa593d66 2152
013e3554
TBA
2153 *wstat = W_STOPCODE (p_sig.signal);
2154 if (p_sig.info.si_signo != 0)
d86d4aaf 2155 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554
TBA
2156 &p_sig.info);
2157
2158 lwp->pending_signals_to_report.pop_front ();
fa593d66
PA
2159
2160 if (debug_threads)
87ce2a04 2161 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
d86d4aaf 2162 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2163
2164 if (debug_threads)
2165 {
013e3554 2166 for (const auto &sig : lwp->pending_signals_to_report)
87ce2a04 2167 debug_printf (" Still queued %d\n",
013e3554 2168 sig.signal);
fa593d66 2169
87ce2a04 2170 debug_printf (" (no more queued signals)\n");
fa593d66
PA
2171 }
2172
2173 return 1;
2174 }
2175
2176 return 0;
2177}
2178
ac1bbaca
TBA
2179bool
2180linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
582511be 2181{
ac1bbaca
TBA
2182 struct thread_info *saved_thread = current_thread;
2183 current_thread = get_lwp_thread (child);
d50171e4 2184
ac1bbaca
TBA
2185 if (low_stopped_by_watchpoint ())
2186 {
2187 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2188 child->stopped_data_address = low_stopped_data_address ();
2189 }
582511be 2190
ac1bbaca 2191 current_thread = saved_thread;
582511be 2192
ac1bbaca
TBA
2193 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2194}
d50171e4 2195
ac1bbaca
TBA
2196bool
2197linux_process_target::low_stopped_by_watchpoint ()
2198{
2199 return false;
2200}
d50171e4 2201
ac1bbaca
TBA
2202CORE_ADDR
2203linux_process_target::low_stopped_data_address ()
2204{
2205 return 0;
c4d9ceb6
YQ
2206}
2207
de0d863e
DB
2208/* Return the ptrace options that we want to try to enable. */
2209
2210static int
2211linux_low_ptrace_options (int attached)
2212{
c12a5089 2213 client_state &cs = get_client_state ();
de0d863e
DB
2214 int options = 0;
2215
2216 if (!attached)
2217 options |= PTRACE_O_EXITKILL;
2218
c12a5089 2219 if (cs.report_fork_events)
de0d863e
DB
2220 options |= PTRACE_O_TRACEFORK;
2221
c12a5089 2222 if (cs.report_vfork_events)
c269dbdb
DB
2223 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2224
c12a5089 2225 if (cs.report_exec_events)
94585166
DB
2226 options |= PTRACE_O_TRACEEXEC;
2227
82075af2
JS
2228 options |= PTRACE_O_TRACESYSGOOD;
2229
de0d863e
DB
2230 return options;
2231}
2232
1a48f002 2233void
d16f3f6c 2234linux_process_target::filter_event (int lwpid, int wstat)
fa96cb38 2235{
c12a5089 2236 client_state &cs = get_client_state ();
fa96cb38
PA
2237 struct lwp_info *child;
2238 struct thread_info *thread;
582511be 2239 int have_stop_pc = 0;
fa96cb38 2240
f2907e49 2241 child = find_lwp_pid (ptid_t (lwpid));
fa96cb38 2242
94585166
DB
2243 /* Check for stop events reported by a process we didn't already
2244 know about - anything not already in our LWP list.
2245
2246 If we're expecting to receive stopped processes after
2247 fork, vfork, and clone events, then we'll just add the
2248 new one to our list and go back to waiting for the event
2249 to be reported - the stopped process might be returned
2250 from waitpid before or after the event is.
2251
2252 But note the case of a non-leader thread exec'ing after the
2253 leader having exited, and gone from our lists (because
2254 check_zombie_leaders deleted it). The non-leader thread
2255 changes its tid to the tgid. */
2256
2257 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2258 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2259 {
2260 ptid_t child_ptid;
2261
2262 /* A multi-thread exec after we had seen the leader exiting. */
2263 if (debug_threads)
2264 {
2265 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2266 "after exec.\n", lwpid);
2267 }
2268
184ea2f7 2269 child_ptid = ptid_t (lwpid, lwpid);
94585166
DB
2270 child = add_lwp (child_ptid);
2271 child->stopped = 1;
2272 current_thread = child->thread;
2273 }
2274
fa96cb38
PA
2275 /* If we didn't find a process, one of two things presumably happened:
2276 - A process we started and then detached from has exited. Ignore it.
2277 - A process we are controlling has forked and the new child's stop
2278 was reported to us by the kernel. Save its PID. */
2279 if (child == NULL && WIFSTOPPED (wstat))
2280 {
2281 add_to_pid_list (&stopped_pids, lwpid, wstat);
1a48f002 2282 return;
fa96cb38
PA
2283 }
2284 else if (child == NULL)
1a48f002 2285 return;
fa96cb38
PA
2286
2287 thread = get_lwp_thread (child);
2288
2289 child->stopped = 1;
2290
2291 child->last_status = wstat;
2292
582511be
PA
2293 /* Check if the thread has exited. */
2294 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2295 {
2296 if (debug_threads)
2297 debug_printf ("LLFE: %d exited.\n", lwpid);
f50bf8e5
YQ
2298
2299 if (finish_step_over (child))
2300 {
2301 /* Unsuspend all other LWPs, and set them back running again. */
2302 unsuspend_all_lwps (child);
2303 }
2304
65706a29
PA
2305 /* If there is at least one more LWP, then the exit signal was
2306 not the end of the debugged application and should be
2307 ignored, unless GDB wants to hear about thread exits. */
c12a5089 2308 if (cs.report_thread_events
65706a29 2309 || last_thread_of_process_p (pid_of (thread)))
582511be 2310 {
65706a29
PA
2311 /* Since events are serialized to GDB core, and we can't
2312 report this one right now. Leave the status pending for
2313 the next time we're able to report it. */
2314 mark_lwp_dead (child, wstat);
1a48f002 2315 return;
582511be
PA
2316 }
2317 else
2318 {
65706a29 2319 delete_lwp (child);
1a48f002 2320 return;
582511be
PA
2321 }
2322 }
2323
2324 gdb_assert (WIFSTOPPED (wstat));
2325
fa96cb38
PA
2326 if (WIFSTOPPED (wstat))
2327 {
2328 struct process_info *proc;
2329
c06cbd92 2330 /* Architecture-specific setup after inferior is running. */
fa96cb38 2331 proc = find_process_pid (pid_of (thread));
c06cbd92 2332 if (proc->tdesc == NULL)
fa96cb38 2333 {
c06cbd92
YQ
2334 if (proc->attached)
2335 {
c06cbd92
YQ
2336 /* This needs to happen after we have attached to the
2337 inferior and it is stopped for the first time, but
2338 before we access any inferior registers. */
797bcff5 2339 arch_setup_thread (thread);
c06cbd92
YQ
2340 }
2341 else
2342 {
2343 /* The process is started, but GDBserver will do
2344 architecture-specific setup after the program stops at
2345 the first instruction. */
2346 child->status_pending_p = 1;
2347 child->status_pending = wstat;
1a48f002 2348 return;
c06cbd92 2349 }
fa96cb38
PA
2350 }
2351 }
2352
fa96cb38
PA
2353 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2354 {
beed38b8 2355 struct process_info *proc = find_process_pid (pid_of (thread));
de0d863e 2356 int options = linux_low_ptrace_options (proc->attached);
beed38b8 2357
de0d863e 2358 linux_enable_event_reporting (lwpid, options);
fa96cb38
PA
2359 child->must_set_ptrace_flags = 0;
2360 }
2361
82075af2
JS
2362 /* Always update syscall_state, even if it will be filtered later. */
2363 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2364 {
2365 child->syscall_state
2366 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2367 ? TARGET_WAITKIND_SYSCALL_RETURN
2368 : TARGET_WAITKIND_SYSCALL_ENTRY);
2369 }
2370 else
2371 {
2372 /* Almost all other ptrace-stops are known to be outside of system
2373 calls, with further exceptions in handle_extended_wait. */
2374 child->syscall_state = TARGET_WAITKIND_IGNORE;
2375 }
2376
e7ad2f14
PA
2377 /* Be careful to not overwrite stop_pc until save_stop_reason is
2378 called. */
fa96cb38 2379 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2380 && linux_is_extended_waitstatus (wstat))
fa96cb38 2381 {
582511be 2382 child->stop_pc = get_pc (child);
94585166 2383 if (handle_extended_wait (&child, wstat))
de0d863e
DB
2384 {
2385 /* The event has been handled, so just return without
2386 reporting it. */
1a48f002 2387 return;
de0d863e 2388 }
fa96cb38
PA
2389 }
2390
80aea927 2391 if (linux_wstatus_maybe_breakpoint (wstat))
582511be 2392 {
e7ad2f14 2393 if (save_stop_reason (child))
582511be
PA
2394 have_stop_pc = 1;
2395 }
2396
2397 if (!have_stop_pc)
2398 child->stop_pc = get_pc (child);
2399
fa96cb38
PA
2400 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2401 && child->stop_expected)
2402 {
2403 if (debug_threads)
2404 debug_printf ("Expected stop.\n");
2405 child->stop_expected = 0;
2406
2407 if (thread->last_resume_kind == resume_stop)
2408 {
2409 /* We want to report the stop to the core. Treat the
2410 SIGSTOP as a normal event. */
2bf6fb9d
PA
2411 if (debug_threads)
2412 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2413 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2414 }
2415 else if (stopping_threads != NOT_STOPPING_THREADS)
2416 {
2417 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2418 pending. */
2bf6fb9d
PA
2419 if (debug_threads)
2420 debug_printf ("LLW: SIGSTOP caught for %s "
2421 "while stopping threads.\n",
2422 target_pid_to_str (ptid_of (thread)));
1a48f002 2423 return;
fa96cb38
PA
2424 }
2425 else
2426 {
2bf6fb9d
PA
2427 /* This is a delayed SIGSTOP. Filter out the event. */
2428 if (debug_threads)
2429 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2430 child->stepping ? "step" : "continue",
2431 target_pid_to_str (ptid_of (thread)));
2432
df95181f 2433 resume_one_lwp (child, child->stepping, 0, NULL);
1a48f002 2434 return;
fa96cb38
PA
2435 }
2436 }
2437
582511be
PA
2438 child->status_pending_p = 1;
2439 child->status_pending = wstat;
1a48f002 2440 return;
fa96cb38
PA
2441}
2442
b31cdfa6
TBA
2443bool
2444linux_process_target::maybe_hw_step (thread_info *thread)
f79b145d 2445{
b31cdfa6
TBA
2446 if (supports_hardware_single_step ())
2447 return true;
f79b145d
YQ
2448 else
2449 {
3b9a79ef 2450 /* GDBserver must insert single-step breakpoint for software
f79b145d 2451 single step. */
3b9a79ef 2452 gdb_assert (has_single_step_breakpoints (thread));
b31cdfa6 2453 return false;
f79b145d
YQ
2454 }
2455}
2456
df95181f
TBA
2457void
2458linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
20ba1ce6 2459{
20ba1ce6
PA
2460 struct lwp_info *lp = get_thread_lwp (thread);
2461
2462 if (lp->stopped
863d01bd 2463 && !lp->suspended
20ba1ce6 2464 && !lp->status_pending_p
183be222 2465 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
20ba1ce6 2466 {
8901d193
YQ
2467 int step = 0;
2468
2469 if (thread->last_resume_kind == resume_step)
2470 step = maybe_hw_step (thread);
20ba1ce6
PA
2471
2472 if (debug_threads)
2473 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2474 target_pid_to_str (ptid_of (thread)),
2475 paddress (lp->stop_pc),
2476 step);
2477
df95181f 2478 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
20ba1ce6
PA
2479 }
2480}
2481
d16f3f6c
TBA
2482int
2483linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2484 ptid_t filter_ptid,
2485 int *wstatp, int options)
0d62e5e8 2486{
d86d4aaf 2487 struct thread_info *event_thread;
d50171e4 2488 struct lwp_info *event_child, *requested_child;
fa96cb38 2489 sigset_t block_mask, prev_mask;
d50171e4 2490
fa96cb38 2491 retry:
d86d4aaf
DE
2492 /* N.B. event_thread points to the thread_info struct that contains
2493 event_child. Keep them in sync. */
2494 event_thread = NULL;
d50171e4
PA
2495 event_child = NULL;
2496 requested_child = NULL;
0d62e5e8 2497
95954743 2498 /* Check for a lwp with a pending status. */
bd99dc85 2499
d7e15655 2500 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
0d62e5e8 2501 {
83e1b6c1
SM
2502 event_thread = find_thread_in_random ([&] (thread_info *thread)
2503 {
2504 return status_pending_p_callback (thread, filter_ptid);
2505 });
2506
d86d4aaf
DE
2507 if (event_thread != NULL)
2508 event_child = get_thread_lwp (event_thread);
2509 if (debug_threads && event_thread)
2510 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
0d62e5e8 2511 }
d7e15655 2512 else if (filter_ptid != null_ptid)
0d62e5e8 2513 {
fa96cb38 2514 requested_child = find_lwp_pid (filter_ptid);
d50171e4 2515
bde24c0a 2516 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66 2517 && requested_child->status_pending_p
229d26fc
SM
2518 && (requested_child->collecting_fast_tracepoint
2519 != fast_tpoint_collect_result::not_collecting))
fa593d66
PA
2520 {
2521 enqueue_one_deferred_signal (requested_child,
2522 &requested_child->status_pending);
2523 requested_child->status_pending_p = 0;
2524 requested_child->status_pending = 0;
df95181f 2525 resume_one_lwp (requested_child, 0, 0, NULL);
fa593d66
PA
2526 }
2527
2528 if (requested_child->suspended
2529 && requested_child->status_pending_p)
38e08fca
GB
2530 {
2531 internal_error (__FILE__, __LINE__,
2532 "requesting an event out of a"
2533 " suspended child?");
2534 }
fa593d66 2535
d50171e4 2536 if (requested_child->status_pending_p)
d86d4aaf
DE
2537 {
2538 event_child = requested_child;
2539 event_thread = get_lwp_thread (event_child);
2540 }
0d62e5e8 2541 }
611cb4a5 2542
0d62e5e8
DJ
2543 if (event_child != NULL)
2544 {
bd99dc85 2545 if (debug_threads)
87ce2a04 2546 debug_printf ("Got an event from pending child %ld (%04x)\n",
d86d4aaf 2547 lwpid_of (event_thread), event_child->status_pending);
fa96cb38 2548 *wstatp = event_child->status_pending;
bd99dc85
PA
2549 event_child->status_pending_p = 0;
2550 event_child->status_pending = 0;
0bfdf32f 2551 current_thread = event_thread;
d86d4aaf 2552 return lwpid_of (event_thread);
0d62e5e8
DJ
2553 }
2554
fa96cb38
PA
2555 /* But if we don't find a pending event, we'll have to wait.
2556
2557 We only enter this loop if no process has a pending wait status.
2558 Thus any action taken in response to a wait status inside this
2559 loop is responding as soon as we detect the status, not after any
2560 pending events. */
d8301ad1 2561
fa96cb38
PA
2562 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2563 all signals while here. */
2564 sigfillset (&block_mask);
21987b9c 2565 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
fa96cb38 2566
582511be
PA
2567 /* Always pull all events out of the kernel. We'll randomly select
2568 an event LWP out of all that have events, to prevent
2569 starvation. */
fa96cb38 2570 while (event_child == NULL)
0d62e5e8 2571 {
fa96cb38 2572 pid_t ret = 0;
0d62e5e8 2573
fa96cb38
PA
2574 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2575 quirks:
0d62e5e8 2576
fa96cb38
PA
2577 - If the thread group leader exits while other threads in the
2578 thread group still exist, waitpid(TGID, ...) hangs. That
2579 waitpid won't return an exit status until the other threads
2580 in the group are reaped.
611cb4a5 2581
fa96cb38
PA
2582 - When a non-leader thread execs, that thread just vanishes
2583 without reporting an exit (so we'd hang if we waited for it
2584 explicitly in that case). The exec event is reported to
94585166 2585 the TGID pid. */
fa96cb38
PA
2586 errno = 0;
2587 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2588
fa96cb38
PA
2589 if (debug_threads)
2590 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
6d91ce9a 2591 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
0d62e5e8 2592
fa96cb38 2593 if (ret > 0)
0d62e5e8 2594 {
89be2091 2595 if (debug_threads)
bd99dc85 2596 {
fa96cb38 2597 debug_printf ("LLW: waitpid %ld received %s\n",
8d06918f 2598 (long) ret, status_to_str (*wstatp).c_str ());
bd99dc85 2599 }
89be2091 2600
582511be
PA
2601 /* Filter all events. IOW, leave all events pending. We'll
2602 randomly select an event LWP out of all that have events
2603 below. */
d16f3f6c 2604 filter_event (ret, *wstatp);
fa96cb38
PA
2605 /* Retry until nothing comes out of waitpid. A single
2606 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2607 continue;
2608 }
2609
20ba1ce6
PA
2610 /* Now that we've pulled all events out of the kernel, resume
2611 LWPs that don't have an interesting event to report. */
2612 if (stopping_threads == NOT_STOPPING_THREADS)
df95181f
TBA
2613 for_each_thread ([this] (thread_info *thread)
2614 {
2615 resume_stopped_resumed_lwps (thread);
2616 });
20ba1ce6
PA
2617
2618 /* ... and find an LWP with a status to report to the core, if
2619 any. */
83e1b6c1
SM
2620 event_thread = find_thread_in_random ([&] (thread_info *thread)
2621 {
2622 return status_pending_p_callback (thread, filter_ptid);
2623 });
2624
582511be
PA
2625 if (event_thread != NULL)
2626 {
2627 event_child = get_thread_lwp (event_thread);
2628 *wstatp = event_child->status_pending;
2629 event_child->status_pending_p = 0;
2630 event_child->status_pending = 0;
2631 break;
2632 }
2633
fa96cb38
PA
2634 /* Check for zombie thread group leaders. Those can't be reaped
2635 until all other threads in the thread group are. */
2636 check_zombie_leaders ();
2637
a1385b7b
SM
2638 auto not_stopped = [&] (thread_info *thread)
2639 {
2640 return not_stopped_callback (thread, wait_ptid);
2641 };
2642
fa96cb38
PA
2643 /* If there are no resumed children left in the set of LWPs we
2644 want to wait for, bail. We can't just block in
2645 waitpid/sigsuspend, because lwps might have been left stopped
2646 in trace-stop state, and we'd be stuck forever waiting for
2647 their status to change (which would only happen if we resumed
2648 them). Even if WNOHANG is set, this return code is preferred
2649 over 0 (below), as it is more detailed. */
a1385b7b 2650 if (find_thread (not_stopped) == NULL)
a6dbe5df 2651 {
fa96cb38
PA
2652 if (debug_threads)
2653 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
21987b9c 2654 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2655 return -1;
a6dbe5df
PA
2656 }
2657
fa96cb38
PA
2658 /* No interesting event to report to the caller. */
2659 if ((options & WNOHANG))
24a09b5f 2660 {
fa96cb38
PA
2661 if (debug_threads)
2662 debug_printf ("WNOHANG set, no event found\n");
2663
21987b9c 2664 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2665 return 0;
24a09b5f
DJ
2666 }
2667
fa96cb38
PA
2668 /* Block until we get an event reported with SIGCHLD. */
2669 if (debug_threads)
2670 debug_printf ("sigsuspend'ing\n");
d50171e4 2671
fa96cb38 2672 sigsuspend (&prev_mask);
21987b9c 2673 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38
PA
2674 goto retry;
2675 }
d50171e4 2676
21987b9c 2677 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2678
0bfdf32f 2679 current_thread = event_thread;
d50171e4 2680
fa96cb38
PA
2681 return lwpid_of (event_thread);
2682}
2683
d16f3f6c
TBA
2684int
2685linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
fa96cb38 2686{
d16f3f6c 2687 return wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2688}
2689
6bf5e0ba
PA
2690/* Select one LWP out of those that have events pending. */
2691
2692static void
2693select_event_lwp (struct lwp_info **orig_lp)
2694{
582511be
PA
2695 struct thread_info *event_thread = NULL;
2696
2697 /* In all-stop, give preference to the LWP that is being
2698 single-stepped. There will be at most one, and it's the LWP that
2699 the core is most interested in. If we didn't do this, then we'd
2700 have to handle pending step SIGTRAPs somehow in case the core
2701 later continues the previously-stepped thread, otherwise we'd
2702 report the pending SIGTRAP, and the core, not having stepped the
2703 thread, wouldn't understand what the trap was for, and therefore
2704 would report it to the user as a random signal. */
2705 if (!non_stop)
6bf5e0ba 2706 {
39a64da5
SM
2707 event_thread = find_thread ([] (thread_info *thread)
2708 {
2709 lwp_info *lp = get_thread_lwp (thread);
2710
183be222 2711 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
39a64da5
SM
2712 && thread->last_resume_kind == resume_step
2713 && lp->status_pending_p);
2714 });
2715
582511be
PA
2716 if (event_thread != NULL)
2717 {
2718 if (debug_threads)
2719 debug_printf ("SEL: Select single-step %s\n",
2720 target_pid_to_str (ptid_of (event_thread)));
2721 }
6bf5e0ba 2722 }
582511be 2723 if (event_thread == NULL)
6bf5e0ba
PA
2724 {
2725 /* No single-stepping LWP. Select one at random, out of those
dda83cd7 2726 which have had events. */
6bf5e0ba 2727
b0319eaa 2728 event_thread = find_thread_in_random ([&] (thread_info *thread)
39a64da5
SM
2729 {
2730 lwp_info *lp = get_thread_lwp (thread);
2731
b0319eaa 2732 /* Only resumed LWPs that have an event pending. */
183be222 2733 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
b0319eaa 2734 && lp->status_pending_p);
39a64da5 2735 });
6bf5e0ba
PA
2736 }
2737
d86d4aaf 2738 if (event_thread != NULL)
6bf5e0ba 2739 {
d86d4aaf
DE
2740 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2741
6bf5e0ba
PA
2742 /* Switch the event LWP. */
2743 *orig_lp = event_lp;
2744 }
2745}
2746
7984d532
PA
2747/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2748 NULL. */
2749
2750static void
2751unsuspend_all_lwps (struct lwp_info *except)
2752{
139720c5
SM
2753 for_each_thread ([&] (thread_info *thread)
2754 {
2755 lwp_info *lwp = get_thread_lwp (thread);
2756
2757 if (lwp != except)
2758 lwp_suspended_decr (lwp);
2759 });
7984d532
PA
2760}
2761
5a6b0a41 2762static bool lwp_running (thread_info *thread);
fa593d66
PA
2763
2764/* Stabilize threads (move out of jump pads).
2765
2766 If a thread is midway collecting a fast tracepoint, we need to
2767 finish the collection and move it out of the jump pad before
2768 reporting the signal.
2769
2770 This avoids recursion while collecting (when a signal arrives
2771 midway, and the signal handler itself collects), which would trash
2772 the trace buffer. In case the user set a breakpoint in a signal
2773 handler, this avoids the backtrace showing the jump pad, etc..
2774 Most importantly, there are certain things we can't do safely if
2775 threads are stopped in a jump pad (or in its callee's). For
2776 example:
2777
2778 - starting a new trace run. A thread still collecting the
2779 previous run, could trash the trace buffer when resumed. The trace
2780 buffer control structures would have been reset but the thread had
2781 no way to tell. The thread could even midway memcpy'ing to the
2782 buffer, which would mean that when resumed, it would clobber the
2783 trace buffer that had been set for a new run.
2784
2785 - we can't rewrite/reuse the jump pads for new tracepoints
2786 safely. Say you do tstart while a thread is stopped midway while
2787 collecting. When the thread is later resumed, it finishes the
2788 collection, and returns to the jump pad, to execute the original
2789 instruction that was under the tracepoint jump at the time the
2790 older run had been started. If the jump pad had been rewritten
2791 since for something else in the new run, the thread would now
2792 execute the wrong / random instructions. */
2793
5c9eb2f2
TBA
2794void
2795linux_process_target::stabilize_threads ()
fa593d66 2796{
13e567af
TBA
2797 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2798 {
2799 return stuck_in_jump_pad (thread);
2800 });
fa593d66 2801
d86d4aaf 2802 if (thread_stuck != NULL)
fa593d66 2803 {
b4d51a55 2804 if (debug_threads)
87ce2a04 2805 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
d86d4aaf 2806 lwpid_of (thread_stuck));
fa593d66
PA
2807 return;
2808 }
2809
fcb056a5 2810 thread_info *saved_thread = current_thread;
fa593d66
PA
2811
2812 stabilizing_threads = 1;
2813
2814 /* Kick 'em all. */
d16f3f6c
TBA
2815 for_each_thread ([this] (thread_info *thread)
2816 {
2817 move_out_of_jump_pad (thread);
2818 });
fa593d66
PA
2819
2820 /* Loop until all are stopped out of the jump pads. */
5a6b0a41 2821 while (find_thread (lwp_running) != NULL)
fa593d66
PA
2822 {
2823 struct target_waitstatus ourstatus;
2824 struct lwp_info *lwp;
fa593d66
PA
2825 int wstat;
2826
2827 /* Note that we go through the full wait even loop. While
2828 moving threads out of jump pad, we need to be able to step
2829 over internal breakpoints and such. */
d16f3f6c 2830 wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66 2831
183be222 2832 if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
fa593d66 2833 {
0bfdf32f 2834 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2835
2836 /* Lock it. */
863d01bd 2837 lwp_suspended_inc (lwp);
fa593d66 2838
183be222 2839 if (ourstatus.sig () != GDB_SIGNAL_0
0bfdf32f 2840 || current_thread->last_resume_kind == resume_stop)
fa593d66 2841 {
183be222 2842 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
fa593d66
PA
2843 enqueue_one_deferred_signal (lwp, &wstat);
2844 }
2845 }
2846 }
2847
fcdad592 2848 unsuspend_all_lwps (NULL);
fa593d66
PA
2849
2850 stabilizing_threads = 0;
2851
0bfdf32f 2852 current_thread = saved_thread;
fa593d66 2853
b4d51a55 2854 if (debug_threads)
fa593d66 2855 {
13e567af
TBA
2856 thread_stuck = find_thread ([this] (thread_info *thread)
2857 {
2858 return stuck_in_jump_pad (thread);
2859 });
fcb056a5 2860
d86d4aaf 2861 if (thread_stuck != NULL)
87ce2a04 2862 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
d86d4aaf 2863 lwpid_of (thread_stuck));
fa593d66
PA
2864 }
2865}
2866
582511be
PA
2867/* Convenience function that is called when the kernel reports an
2868 event that is not passed out to GDB. */
2869
2870static ptid_t
2871ignore_event (struct target_waitstatus *ourstatus)
2872{
2873 /* If we got an event, there may still be others, as a single
2874 SIGCHLD can indicate more than one child stopped. This forces
2875 another target_wait call. */
2876 async_file_mark ();
2877
183be222 2878 ourstatus->set_ignore ();
582511be
PA
2879 return null_ptid;
2880}
2881
fd000fb3
TBA
2882ptid_t
2883linux_process_target::filter_exit_event (lwp_info *event_child,
2884 target_waitstatus *ourstatus)
65706a29 2885{
c12a5089 2886 client_state &cs = get_client_state ();
65706a29
PA
2887 struct thread_info *thread = get_lwp_thread (event_child);
2888 ptid_t ptid = ptid_of (thread);
2889
2890 if (!last_thread_of_process_p (pid_of (thread)))
2891 {
c12a5089 2892 if (cs.report_thread_events)
183be222 2893 ourstatus->set_thread_exited (0);
65706a29 2894 else
183be222 2895 ourstatus->set_ignore ();
65706a29
PA
2896
2897 delete_lwp (event_child);
2898 }
2899 return ptid;
2900}
2901
82075af2
JS
2902/* Returns 1 if GDB is interested in any event_child syscalls. */
2903
2904static int
2905gdb_catching_syscalls_p (struct lwp_info *event_child)
2906{
2907 struct thread_info *thread = get_lwp_thread (event_child);
2908 struct process_info *proc = get_thread_process (thread);
2909
f27866ba 2910 return !proc->syscalls_to_catch.empty ();
82075af2
JS
2911}
2912
9eedd27d
TBA
2913bool
2914linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
82075af2 2915{
4cc32bec 2916 int sysno;
82075af2
JS
2917 struct thread_info *thread = get_lwp_thread (event_child);
2918 struct process_info *proc = get_thread_process (thread);
2919
f27866ba 2920 if (proc->syscalls_to_catch.empty ())
9eedd27d 2921 return false;
82075af2 2922
f27866ba 2923 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
9eedd27d 2924 return true;
82075af2 2925
4cc32bec 2926 get_syscall_trapinfo (event_child, &sysno);
f27866ba
SM
2927
2928 for (int iter : proc->syscalls_to_catch)
82075af2 2929 if (iter == sysno)
9eedd27d 2930 return true;
82075af2 2931
9eedd27d 2932 return false;
82075af2
JS
2933}
2934
d16f3f6c
TBA
2935ptid_t
2936linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
b60cea74 2937 target_wait_flags target_options)
da6d8c04 2938{
c12a5089 2939 client_state &cs = get_client_state ();
e5f1222d 2940 int w;
fc7238bb 2941 struct lwp_info *event_child;
bd99dc85 2942 int options;
bd99dc85 2943 int pid;
6bf5e0ba
PA
2944 int step_over_finished;
2945 int bp_explains_trap;
2946 int maybe_internal_trap;
2947 int report_to_gdb;
219f2f23 2948 int trace_event;
c2d6af84 2949 int in_step_range;
f2faf941 2950 int any_resumed;
bd99dc85 2951
87ce2a04
DE
2952 if (debug_threads)
2953 {
2954 debug_enter ();
d16f3f6c 2955 debug_printf ("wait_1: [%s]\n", target_pid_to_str (ptid));
87ce2a04
DE
2956 }
2957
bd99dc85
PA
2958 /* Translate generic target options into linux options. */
2959 options = __WALL;
2960 if (target_options & TARGET_WNOHANG)
2961 options |= WNOHANG;
0d62e5e8 2962
fa593d66
PA
2963 bp_explains_trap = 0;
2964 trace_event = 0;
c2d6af84 2965 in_step_range = 0;
183be222 2966 ourstatus->set_ignore ();
bd99dc85 2967
83e1b6c1
SM
2968 auto status_pending_p_any = [&] (thread_info *thread)
2969 {
2970 return status_pending_p_callback (thread, minus_one_ptid);
2971 };
2972
a1385b7b
SM
2973 auto not_stopped = [&] (thread_info *thread)
2974 {
2975 return not_stopped_callback (thread, minus_one_ptid);
2976 };
2977
f2faf941 2978 /* Find a resumed LWP, if any. */
83e1b6c1 2979 if (find_thread (status_pending_p_any) != NULL)
f2faf941 2980 any_resumed = 1;
a1385b7b 2981 else if (find_thread (not_stopped) != NULL)
f2faf941
PA
2982 any_resumed = 1;
2983 else
2984 any_resumed = 0;
2985
d7e15655 2986 if (step_over_bkpt == null_ptid)
d16f3f6c 2987 pid = wait_for_event (ptid, &w, options);
6bf5e0ba
PA
2988 else
2989 {
2990 if (debug_threads)
87ce2a04
DE
2991 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2992 target_pid_to_str (step_over_bkpt));
d16f3f6c 2993 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
6bf5e0ba
PA
2994 }
2995
f2faf941 2996 if (pid == 0 || (pid == -1 && !any_resumed))
87ce2a04 2997 {
fa96cb38
PA
2998 gdb_assert (target_options & TARGET_WNOHANG);
2999
87ce2a04
DE
3000 if (debug_threads)
3001 {
d16f3f6c 3002 debug_printf ("wait_1 ret = null_ptid, "
fa96cb38 3003 "TARGET_WAITKIND_IGNORE\n");
87ce2a04
DE
3004 debug_exit ();
3005 }
fa96cb38 3006
183be222 3007 ourstatus->set_ignore ();
87ce2a04
DE
3008 return null_ptid;
3009 }
fa96cb38
PA
3010 else if (pid == -1)
3011 {
3012 if (debug_threads)
3013 {
d16f3f6c 3014 debug_printf ("wait_1 ret = null_ptid, "
fa96cb38
PA
3015 "TARGET_WAITKIND_NO_RESUMED\n");
3016 debug_exit ();
3017 }
bd99dc85 3018
183be222 3019 ourstatus->set_no_resumed ();
fa96cb38
PA
3020 return null_ptid;
3021 }
0d62e5e8 3022
0bfdf32f 3023 event_child = get_thread_lwp (current_thread);
0d62e5e8 3024
d16f3f6c 3025 /* wait_for_event only returns an exit status for the last
fa96cb38
PA
3026 child of a process. Report it. */
3027 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 3028 {
fa96cb38 3029 if (WIFEXITED (w))
0d62e5e8 3030 {
183be222 3031 ourstatus->set_exited (WEXITSTATUS (w));
bd99dc85 3032
fa96cb38 3033 if (debug_threads)
bd99dc85 3034 {
d16f3f6c 3035 debug_printf ("wait_1 ret = %s, exited with "
fa96cb38 3036 "retcode %d\n",
0bfdf32f 3037 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
3038 WEXITSTATUS (w));
3039 debug_exit ();
bd99dc85 3040 }
fa96cb38
PA
3041 }
3042 else
3043 {
183be222 3044 ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
5b1c542e 3045
fa96cb38
PA
3046 if (debug_threads)
3047 {
d16f3f6c 3048 debug_printf ("wait_1 ret = %s, terminated with "
fa96cb38 3049 "signal %d\n",
0bfdf32f 3050 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
3051 WTERMSIG (w));
3052 debug_exit ();
3053 }
0d62e5e8 3054 }
fa96cb38 3055
183be222 3056 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
65706a29
PA
3057 return filter_exit_event (event_child, ourstatus);
3058
0bfdf32f 3059 return ptid_of (current_thread);
da6d8c04
DJ
3060 }
3061
2d97cd35
AT
3062 /* If step-over executes a breakpoint instruction, in the case of a
3063 hardware single step it means a gdb/gdbserver breakpoint had been
3064 planted on top of a permanent breakpoint, in the case of a software
3065 single step it may just mean that gdbserver hit the reinsert breakpoint.
e7ad2f14 3066 The PC has been adjusted by save_stop_reason to point at
2d97cd35
AT
3067 the breakpoint address.
3068 So in the case of the hardware single step advance the PC manually
3069 past the breakpoint and in the case of software single step advance only
3b9a79ef 3070 if it's not the single_step_breakpoint we are hitting.
2d97cd35
AT
3071 This avoids that a program would keep trapping a permanent breakpoint
3072 forever. */
d7e15655 3073 if (step_over_bkpt != null_ptid
2d97cd35
AT
3074 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3075 && (event_child->stepping
3b9a79ef 3076 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
8090aef2 3077 {
dd373349
AT
3078 int increment_pc = 0;
3079 int breakpoint_kind = 0;
3080 CORE_ADDR stop_pc = event_child->stop_pc;
3081
d16f3f6c
TBA
3082 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3083 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
8090aef2
PA
3084
3085 if (debug_threads)
3086 {
3087 debug_printf ("step-over for %s executed software breakpoint\n",
3088 target_pid_to_str (ptid_of (current_thread)));
3089 }
3090
3091 if (increment_pc != 0)
3092 {
3093 struct regcache *regcache
3094 = get_thread_regcache (current_thread, 1);
3095
3096 event_child->stop_pc += increment_pc;
bf9ae9d8 3097 low_set_pc (regcache, event_child->stop_pc);
8090aef2 3098
d7146cda 3099 if (!low_breakpoint_at (event_child->stop_pc))
15c66dd6 3100 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
3101 }
3102 }
3103
6bf5e0ba
PA
3104 /* If this event was not handled before, and is not a SIGTRAP, we
3105 report it. SIGILL and SIGSEGV are also treated as traps in case
3106 a breakpoint is inserted at the current PC. If this target does
3107 not support internal breakpoints at all, we also report the
3108 SIGTRAP without further processing; it's of no concern to us. */
3109 maybe_internal_trap
bf9ae9d8 3110 = (low_supports_breakpoints ()
6bf5e0ba
PA
3111 && (WSTOPSIG (w) == SIGTRAP
3112 || ((WSTOPSIG (w) == SIGILL
3113 || WSTOPSIG (w) == SIGSEGV)
d7146cda 3114 && low_breakpoint_at (event_child->stop_pc))));
6bf5e0ba
PA
3115
3116 if (maybe_internal_trap)
3117 {
3118 /* Handle anything that requires bookkeeping before deciding to
3119 report the event or continue waiting. */
3120
3121 /* First check if we can explain the SIGTRAP with an internal
3122 breakpoint, or if we should possibly report the event to GDB.
3123 Do this before anything that may remove or insert a
3124 breakpoint. */
3125 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3126
3127 /* We have a SIGTRAP, possibly a step-over dance has just
3128 finished. If so, tweak the state machine accordingly,
3b9a79ef
YQ
3129 reinsert breakpoints and delete any single-step
3130 breakpoints. */
6bf5e0ba
PA
3131 step_over_finished = finish_step_over (event_child);
3132
3133 /* Now invoke the callbacks of any internal breakpoints there. */
3134 check_breakpoints (event_child->stop_pc);
3135
219f2f23
PA
3136 /* Handle tracepoint data collecting. This may overflow the
3137 trace buffer, and cause a tracing stop, removing
3138 breakpoints. */
3139 trace_event = handle_tracepoints (event_child);
3140
6bf5e0ba
PA
3141 if (bp_explains_trap)
3142 {
6bf5e0ba 3143 if (debug_threads)
87ce2a04 3144 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba
PA
3145 }
3146 }
3147 else
3148 {
3149 /* We have some other signal, possibly a step-over dance was in
3150 progress, and it should be cancelled too. */
3151 step_over_finished = finish_step_over (event_child);
fa593d66
PA
3152 }
3153
3154 /* We have all the data we need. Either report the event to GDB, or
3155 resume threads and keep waiting for more. */
3156
3157 /* If we're collecting a fast tracepoint, finish the collection and
3158 move out of the jump pad before delivering a signal. See
3159 linux_stabilize_threads. */
3160
3161 if (WIFSTOPPED (w)
3162 && WSTOPSIG (w) != SIGTRAP
3163 && supports_fast_tracepoints ()
58b4daa5 3164 && agent_loaded_p ())
fa593d66
PA
3165 {
3166 if (debug_threads)
87ce2a04
DE
3167 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3168 "to defer or adjust it.\n",
0bfdf32f 3169 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3170
3171 /* Allow debugging the jump pad itself. */
0bfdf32f 3172 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
3173 && maybe_move_out_of_jump_pad (event_child, &w))
3174 {
3175 enqueue_one_deferred_signal (event_child, &w);
3176
3177 if (debug_threads)
87ce2a04 3178 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
0bfdf32f 3179 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66 3180
df95181f 3181 resume_one_lwp (event_child, 0, 0, NULL);
582511be 3182
edeeb602
YQ
3183 if (debug_threads)
3184 debug_exit ();
582511be 3185 return ignore_event (ourstatus);
fa593d66
PA
3186 }
3187 }
219f2f23 3188
229d26fc
SM
3189 if (event_child->collecting_fast_tracepoint
3190 != fast_tpoint_collect_result::not_collecting)
fa593d66
PA
3191 {
3192 if (debug_threads)
87ce2a04
DE
3193 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3194 "Check if we're already there.\n",
0bfdf32f 3195 lwpid_of (current_thread),
229d26fc 3196 (int) event_child->collecting_fast_tracepoint);
fa593d66
PA
3197
3198 trace_event = 1;
3199
3200 event_child->collecting_fast_tracepoint
3201 = linux_fast_tracepoint_collecting (event_child, NULL);
3202
229d26fc
SM
3203 if (event_child->collecting_fast_tracepoint
3204 != fast_tpoint_collect_result::before_insn)
fa593d66
PA
3205 {
3206 /* No longer need this breakpoint. */
3207 if (event_child->exit_jump_pad_bkpt != NULL)
3208 {
3209 if (debug_threads)
87ce2a04
DE
3210 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3211 "stopping all threads momentarily.\n");
fa593d66
PA
3212
3213 /* Other running threads could hit this breakpoint.
3214 We don't handle moribund locations like GDB does,
3215 instead we always pause all threads when removing
3216 breakpoints, so that any step-over or
3217 decr_pc_after_break adjustment is always taken
3218 care of while the breakpoint is still
3219 inserted. */
3220 stop_all_lwps (1, event_child);
fa593d66
PA
3221
3222 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3223 event_child->exit_jump_pad_bkpt = NULL;
3224
3225 unstop_all_lwps (1, event_child);
3226
3227 gdb_assert (event_child->suspended >= 0);
3228 }
3229 }
3230
229d26fc
SM
3231 if (event_child->collecting_fast_tracepoint
3232 == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
3233 {
3234 if (debug_threads)
87ce2a04
DE
3235 debug_printf ("fast tracepoint finished "
3236 "collecting successfully.\n");
fa593d66
PA
3237
3238 /* We may have a deferred signal to report. */
3239 if (dequeue_one_deferred_signal (event_child, &w))
3240 {
3241 if (debug_threads)
87ce2a04 3242 debug_printf ("dequeued one signal.\n");
fa593d66 3243 }
3c11dd79 3244 else
fa593d66 3245 {
3c11dd79 3246 if (debug_threads)
87ce2a04 3247 debug_printf ("no deferred signals.\n");
fa593d66
PA
3248
3249 if (stabilizing_threads)
3250 {
183be222 3251 ourstatus->set_stopped (GDB_SIGNAL_0);
87ce2a04
DE
3252
3253 if (debug_threads)
3254 {
d16f3f6c 3255 debug_printf ("wait_1 ret = %s, stopped "
87ce2a04 3256 "while stabilizing threads\n",
0bfdf32f 3257 target_pid_to_str (ptid_of (current_thread)));
87ce2a04
DE
3258 debug_exit ();
3259 }
3260
0bfdf32f 3261 return ptid_of (current_thread);
fa593d66
PA
3262 }
3263 }
3264 }
6bf5e0ba
PA
3265 }
3266
e471f25b
PA
3267 /* Check whether GDB would be interested in this event. */
3268
82075af2
JS
3269 /* Check if GDB is interested in this syscall. */
3270 if (WIFSTOPPED (w)
3271 && WSTOPSIG (w) == SYSCALL_SIGTRAP
9eedd27d 3272 && !gdb_catch_this_syscall (event_child))
82075af2
JS
3273 {
3274 if (debug_threads)
3275 {
3276 debug_printf ("Ignored syscall for LWP %ld.\n",
3277 lwpid_of (current_thread));
3278 }
3279
df95181f 3280 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
edeeb602
YQ
3281
3282 if (debug_threads)
3283 debug_exit ();
82075af2
JS
3284 return ignore_event (ourstatus);
3285 }
3286
e471f25b
PA
3287 /* If GDB is not interested in this signal, don't stop other
3288 threads, and don't report it to GDB. Just resume the inferior
3289 right away. We do this for threading-related signals as well as
3290 any that GDB specifically requested we ignore. But never ignore
3291 SIGSTOP if we sent it ourselves, and do not ignore signals when
3292 stepping - they may require special handling to skip the signal
c9587f88
AT
3293 handler. Also never ignore signals that could be caused by a
3294 breakpoint. */
e471f25b 3295 if (WIFSTOPPED (w)
0bfdf32f 3296 && current_thread->last_resume_kind != resume_step
e471f25b 3297 && (
1a981360 3298#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 3299 (current_process ()->priv->thread_db != NULL
e471f25b
PA
3300 && (WSTOPSIG (w) == __SIGRTMIN
3301 || WSTOPSIG (w) == __SIGRTMIN + 1))
3302 ||
3303#endif
c12a5089 3304 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 3305 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
3306 && current_thread->last_resume_kind == resume_stop)
3307 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
3308 {
3309 siginfo_t info, *info_p;
3310
3311 if (debug_threads)
87ce2a04 3312 debug_printf ("Ignored signal %d for LWP %ld.\n",
0bfdf32f 3313 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 3314
0bfdf32f 3315 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 3316 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
3317 info_p = &info;
3318 else
3319 info_p = NULL;
863d01bd
PA
3320
3321 if (step_over_finished)
3322 {
3323 /* We cancelled this thread's step-over above. We still
3324 need to unsuspend all other LWPs, and set them back
3325 running again while the signal handler runs. */
3326 unsuspend_all_lwps (event_child);
3327
3328 /* Enqueue the pending signal info so that proceed_all_lwps
3329 doesn't lose it. */
3330 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3331
3332 proceed_all_lwps ();
3333 }
3334 else
3335 {
df95181f
TBA
3336 resume_one_lwp (event_child, event_child->stepping,
3337 WSTOPSIG (w), info_p);
863d01bd 3338 }
edeeb602
YQ
3339
3340 if (debug_threads)
3341 debug_exit ();
3342
582511be 3343 return ignore_event (ourstatus);
e471f25b
PA
3344 }
3345
c2d6af84
PA
3346 /* Note that all addresses are always "out of the step range" when
3347 there's no range to begin with. */
3348 in_step_range = lwp_in_step_range (event_child);
3349
3350 /* If GDB wanted this thread to single step, and the thread is out
3351 of the step range, we always want to report the SIGTRAP, and let
3352 GDB handle it. Watchpoints should always be reported. So should
3353 signals we can't explain. A SIGTRAP we can't explain could be a
3354 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3355 do, we're be able to handle GDB breakpoints on top of internal
3356 breakpoints, by handling the internal breakpoint and still
3357 reporting the event to GDB. If we don't, we're out of luck, GDB
863d01bd
PA
3358 won't see the breakpoint hit. If we see a single-step event but
3359 the thread should be continuing, don't pass the trap to gdb.
3360 That indicates that we had previously finished a single-step but
3361 left the single-step pending -- see
3362 complete_ongoing_step_over. */
6bf5e0ba 3363 report_to_gdb = (!maybe_internal_trap
0bfdf32f 3364 || (current_thread->last_resume_kind == resume_step
c2d6af84 3365 && !in_step_range)
15c66dd6 3366 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
863d01bd
PA
3367 || (!in_step_range
3368 && !bp_explains_trap
3369 && !trace_event
3370 && !step_over_finished
3371 && !(current_thread->last_resume_kind == resume_continue
3372 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
9f3a5c85 3373 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5 3374 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
de0d863e 3375 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
183be222 3376 || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
d3ce09f5
SS
3377
3378 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
3379
3380 /* We found no reason GDB would want us to stop. We either hit one
3381 of our own breakpoints, or finished an internal step GDB
3382 shouldn't know about. */
3383 if (!report_to_gdb)
3384 {
3385 if (debug_threads)
3386 {
3387 if (bp_explains_trap)
87ce2a04 3388 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 3389 if (step_over_finished)
87ce2a04 3390 debug_printf ("Step-over finished.\n");
219f2f23 3391 if (trace_event)
87ce2a04 3392 debug_printf ("Tracepoint event.\n");
c2d6af84 3393 if (lwp_in_step_range (event_child))
87ce2a04
DE
3394 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3395 paddress (event_child->stop_pc),
3396 paddress (event_child->step_range_start),
3397 paddress (event_child->step_range_end));
6bf5e0ba
PA
3398 }
3399
3400 /* We're not reporting this breakpoint to GDB, so apply the
3401 decr_pc_after_break adjustment to the inferior's regcache
3402 ourselves. */
3403
bf9ae9d8 3404 if (low_supports_breakpoints ())
6bf5e0ba
PA
3405 {
3406 struct regcache *regcache
0bfdf32f 3407 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3408 low_set_pc (regcache, event_child->stop_pc);
6bf5e0ba
PA
3409 }
3410
7984d532 3411 if (step_over_finished)
e3652c84
YQ
3412 {
3413 /* If we have finished stepping over a breakpoint, we've
3414 stopped and suspended all LWPs momentarily except the
3415 stepping one. This is where we resume them all again.
3416 We're going to keep waiting, so use proceed, which
3417 handles stepping over the next breakpoint. */
3418 unsuspend_all_lwps (event_child);
3419 }
3420 else
3421 {
3422 /* Remove the single-step breakpoints if any. Note that
3423 there isn't single-step breakpoint if we finished stepping
3424 over. */
7582c77c 3425 if (supports_software_single_step ()
e3652c84
YQ
3426 && has_single_step_breakpoints (current_thread))
3427 {
3428 stop_all_lwps (0, event_child);
3429 delete_single_step_breakpoints (current_thread);
3430 unstop_all_lwps (0, event_child);
3431 }
3432 }
7984d532 3433
e3652c84
YQ
3434 if (debug_threads)
3435 debug_printf ("proceeding all threads.\n");
6bf5e0ba 3436 proceed_all_lwps ();
edeeb602
YQ
3437
3438 if (debug_threads)
3439 debug_exit ();
3440
582511be 3441 return ignore_event (ourstatus);
6bf5e0ba
PA
3442 }
3443
3444 if (debug_threads)
3445 {
183be222 3446 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
ad071a30 3447 {
23fdd69e
SM
3448 std::string str
3449 = target_waitstatus_to_string (&event_child->waitstatus);
ad071a30 3450
ad071a30 3451 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
23fdd69e 3452 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
ad071a30 3453 }
0bfdf32f 3454 if (current_thread->last_resume_kind == resume_step)
c2d6af84
PA
3455 {
3456 if (event_child->step_range_start == event_child->step_range_end)
87ce2a04 3457 debug_printf ("GDB wanted to single-step, reporting event.\n");
c2d6af84 3458 else if (!lwp_in_step_range (event_child))
87ce2a04 3459 debug_printf ("Out of step range, reporting event.\n");
c2d6af84 3460 }
15c66dd6 3461 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
87ce2a04 3462 debug_printf ("Stopped by watchpoint.\n");
582511be 3463 else if (gdb_breakpoint_here (event_child->stop_pc))
87ce2a04 3464 debug_printf ("Stopped by GDB breakpoint.\n");
6bf5e0ba 3465 if (debug_threads)
87ce2a04 3466 debug_printf ("Hit a non-gdbserver trap event.\n");
6bf5e0ba
PA
3467 }
3468
3469 /* Alright, we're going to report a stop. */
3470
3b9a79ef 3471 /* Remove single-step breakpoints. */
7582c77c 3472 if (supports_software_single_step ())
8901d193 3473 {
3b9a79ef 3474 /* Remove single-step breakpoints or not. It it is true, stop all
8901d193
YQ
3475 lwps, so that other threads won't hit the breakpoint in the
3476 staled memory. */
3b9a79ef 3477 int remove_single_step_breakpoints_p = 0;
8901d193
YQ
3478
3479 if (non_stop)
3480 {
3b9a79ef
YQ
3481 remove_single_step_breakpoints_p
3482 = has_single_step_breakpoints (current_thread);
8901d193
YQ
3483 }
3484 else
3485 {
3486 /* In all-stop, a stop reply cancels all previous resume
3b9a79ef 3487 requests. Delete all single-step breakpoints. */
8901d193 3488
9c80ecd6
SM
3489 find_thread ([&] (thread_info *thread) {
3490 if (has_single_step_breakpoints (thread))
3491 {
3492 remove_single_step_breakpoints_p = 1;
3493 return true;
3494 }
8901d193 3495
9c80ecd6
SM
3496 return false;
3497 });
8901d193
YQ
3498 }
3499
3b9a79ef 3500 if (remove_single_step_breakpoints_p)
8901d193 3501 {
3b9a79ef 3502 /* If we remove single-step breakpoints from memory, stop all lwps,
8901d193
YQ
3503 so that other threads won't hit the breakpoint in the staled
3504 memory. */
3505 stop_all_lwps (0, event_child);
3506
3507 if (non_stop)
3508 {
3b9a79ef
YQ
3509 gdb_assert (has_single_step_breakpoints (current_thread));
3510 delete_single_step_breakpoints (current_thread);
8901d193
YQ
3511 }
3512 else
3513 {
9c80ecd6
SM
3514 for_each_thread ([] (thread_info *thread){
3515 if (has_single_step_breakpoints (thread))
3516 delete_single_step_breakpoints (thread);
3517 });
8901d193
YQ
3518 }
3519
3520 unstop_all_lwps (0, event_child);
3521 }
3522 }
3523
582511be 3524 if (!stabilizing_threads)
6bf5e0ba
PA
3525 {
3526 /* In all-stop, stop all threads. */
582511be
PA
3527 if (!non_stop)
3528 stop_all_lwps (0, NULL);
6bf5e0ba 3529
c03e6ccc 3530 if (step_over_finished)
582511be
PA
3531 {
3532 if (!non_stop)
3533 {
3534 /* If we were doing a step-over, all other threads but
3535 the stepping one had been paused in start_step_over,
3536 with their suspend counts incremented. We don't want
3537 to do a full unstop/unpause, because we're in
3538 all-stop mode (so we want threads stopped), but we
3539 still need to unsuspend the other threads, to
3540 decrement their `suspended' count back. */
3541 unsuspend_all_lwps (event_child);
3542 }
3543 else
3544 {
3545 /* If we just finished a step-over, then all threads had
3546 been momentarily paused. In all-stop, that's fine,
3547 we want threads stopped by now anyway. In non-stop,
3548 we need to re-resume threads that GDB wanted to be
3549 running. */
3550 unstop_all_lwps (1, event_child);
3551 }
3552 }
c03e6ccc 3553
3aa5cfa0
AT
3554 /* If we're not waiting for a specific LWP, choose an event LWP
3555 from among those that have had events. Giving equal priority
3556 to all LWPs that have had events helps prevent
3557 starvation. */
d7e15655 3558 if (ptid == minus_one_ptid)
3aa5cfa0
AT
3559 {
3560 event_child->status_pending_p = 1;
3561 event_child->status_pending = w;
3562
3563 select_event_lwp (&event_child);
3564
3565 /* current_thread and event_child must stay in sync. */
3566 current_thread = get_lwp_thread (event_child);
3567
3568 event_child->status_pending_p = 0;
3569 w = event_child->status_pending;
3570 }
3571
3572
fa593d66 3573 /* Stabilize threads (move out of jump pads). */
582511be 3574 if (!non_stop)
5c9eb2f2 3575 target_stabilize_threads ();
6bf5e0ba
PA
3576 }
3577 else
3578 {
3579 /* If we just finished a step-over, then all threads had been
3580 momentarily paused. In all-stop, that's fine, we want
3581 threads stopped by now anyway. In non-stop, we need to
3582 re-resume threads that GDB wanted to be running. */
3583 if (step_over_finished)
7984d532 3584 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3585 }
3586
183be222 3587 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
de0d863e 3588 {
00db26fa
PA
3589 /* If the reported event is an exit, fork, vfork or exec, let
3590 GDB know. */
5a04c4cf
PA
3591
3592 /* Break the unreported fork relationship chain. */
183be222
SM
3593 if (event_child->waitstatus.kind () == TARGET_WAITKIND_FORKED
3594 || event_child->waitstatus.kind () == TARGET_WAITKIND_VFORKED)
5a04c4cf
PA
3595 {
3596 event_child->fork_relative->fork_relative = NULL;
3597 event_child->fork_relative = NULL;
3598 }
3599
00db26fa 3600 *ourstatus = event_child->waitstatus;
de0d863e 3601 /* Clear the event lwp's waitstatus since we handled it already. */
183be222 3602 event_child->waitstatus.set_ignore ();
de0d863e
DB
3603 }
3604 else
183be222
SM
3605 {
3606 /* The actual stop signal is overwritten below. */
3607 ourstatus->set_stopped (GDB_SIGNAL_0);
3608 }
5b1c542e 3609
582511be 3610 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3611 it was a software breakpoint, and the client doesn't know we can
3612 adjust the breakpoint ourselves. */
3613 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
c12a5089 3614 && !cs.swbreak_feature)
582511be 3615 {
d4807ea2 3616 int decr_pc = low_decr_pc_after_break ();
582511be
PA
3617
3618 if (decr_pc != 0)
3619 {
3620 struct regcache *regcache
3621 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3622 low_set_pc (regcache, event_child->stop_pc + decr_pc);
582511be
PA
3623 }
3624 }
3625
82075af2
JS
3626 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3627 {
183be222
SM
3628 int syscall_number;
3629
3630 get_syscall_trapinfo (event_child, &syscall_number);
3631 if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3632 ourstatus->set_syscall_entry (syscall_number);
3633 else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3634 ourstatus->set_syscall_return (syscall_number);
3635 else
3636 gdb_assert_not_reached ("unexpected syscall state");
82075af2
JS
3637 }
3638 else if (current_thread->last_resume_kind == resume_stop
3639 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
3640 {
3641 /* A thread that has been requested to stop by GDB with vCont;t,
3642 and it stopped cleanly, so report as SIG0. The use of
3643 SIGSTOP is an implementation detail. */
183be222 3644 ourstatus->set_stopped (GDB_SIGNAL_0);
bd99dc85 3645 }
0bfdf32f 3646 else if (current_thread->last_resume_kind == resume_stop
8336d594 3647 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
3648 {
3649 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 3650 but, it stopped for other reasons. */
183be222 3651 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
bd99dc85 3652 }
183be222
SM
3653 else if (ourstatus->kind () == TARGET_WAITKIND_STOPPED)
3654 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
bd99dc85 3655
d7e15655 3656 gdb_assert (step_over_bkpt == null_ptid);
d50171e4 3657
bd99dc85 3658 if (debug_threads)
87ce2a04 3659 {
d16f3f6c 3660 debug_printf ("wait_1 ret = %s, %d, %d\n",
0bfdf32f 3661 target_pid_to_str (ptid_of (current_thread)),
183be222 3662 ourstatus->kind (), ourstatus->sig ());
87ce2a04
DE
3663 debug_exit ();
3664 }
bd99dc85 3665
183be222 3666 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
65706a29
PA
3667 return filter_exit_event (event_child, ourstatus);
3668
0bfdf32f 3669 return ptid_of (current_thread);
bd99dc85
PA
3670}
3671
3672/* Get rid of any pending event in the pipe. */
3673static void
3674async_file_flush (void)
3675{
3676 int ret;
3677 char buf;
3678
3679 do
3680 ret = read (linux_event_pipe[0], &buf, 1);
3681 while (ret >= 0 || (ret == -1 && errno == EINTR));
3682}
3683
3684/* Put something in the pipe, so the event loop wakes up. */
3685static void
3686async_file_mark (void)
3687{
3688 int ret;
3689
3690 async_file_flush ();
3691
3692 do
3693 ret = write (linux_event_pipe[1], "+", 1);
3694 while (ret == 0 || (ret == -1 && errno == EINTR));
3695
3696 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3697 be awakened anyway. */
3698}
3699
6532e7e3
TBA
3700ptid_t
3701linux_process_target::wait (ptid_t ptid,
3702 target_waitstatus *ourstatus,
b60cea74 3703 target_wait_flags target_options)
bd99dc85 3704{
95954743 3705 ptid_t event_ptid;
bd99dc85 3706
bd99dc85
PA
3707 /* Flush the async file first. */
3708 if (target_is_async_p ())
3709 async_file_flush ();
3710
582511be
PA
3711 do
3712 {
d16f3f6c 3713 event_ptid = wait_1 (ptid, ourstatus, target_options);
582511be
PA
3714 }
3715 while ((target_options & TARGET_WNOHANG) == 0
d7e15655 3716 && event_ptid == null_ptid
183be222 3717 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3718
3719 /* If at least one stop was reported, there may be more. A single
3720 SIGCHLD can signal more than one child stop. */
3721 if (target_is_async_p ()
3722 && (target_options & TARGET_WNOHANG) != 0
d7e15655 3723 && event_ptid != null_ptid)
bd99dc85
PA
3724 async_file_mark ();
3725
3726 return event_ptid;
da6d8c04
DJ
3727}
3728
c5f62d5f 3729/* Send a signal to an LWP. */
fd500816
DJ
3730
3731static int
a1928bad 3732kill_lwp (unsigned long lwpid, int signo)
fd500816 3733{
4a6ed09b 3734 int ret;
fd500816 3735
4a6ed09b
PA
3736 errno = 0;
3737 ret = syscall (__NR_tkill, lwpid, signo);
3738 if (errno == ENOSYS)
3739 {
3740 /* If tkill fails, then we are not using nptl threads, a
3741 configuration we no longer support. */
3742 perror_with_name (("tkill"));
3743 }
3744 return ret;
fd500816
DJ
3745}
3746
964e4306
PA
3747void
3748linux_stop_lwp (struct lwp_info *lwp)
3749{
3750 send_sigstop (lwp);
3751}
3752
0d62e5e8 3753static void
02fc4de7 3754send_sigstop (struct lwp_info *lwp)
0d62e5e8 3755{
bd99dc85 3756 int pid;
0d62e5e8 3757
d86d4aaf 3758 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3759
0d62e5e8
DJ
3760 /* If we already have a pending stop signal for this process, don't
3761 send another. */
54a0b537 3762 if (lwp->stop_expected)
0d62e5e8 3763 {
ae13219e 3764 if (debug_threads)
87ce2a04 3765 debug_printf ("Have pending sigstop for lwp %d\n", pid);
ae13219e 3766
0d62e5e8
DJ
3767 return;
3768 }
3769
3770 if (debug_threads)
87ce2a04 3771 debug_printf ("Sending sigstop to lwp %d\n", pid);
0d62e5e8 3772
d50171e4 3773 lwp->stop_expected = 1;
bd99dc85 3774 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3775}
3776
df3e4dbe
SM
3777static void
3778send_sigstop (thread_info *thread, lwp_info *except)
02fc4de7 3779{
d86d4aaf 3780 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3781
7984d532
PA
3782 /* Ignore EXCEPT. */
3783 if (lwp == except)
df3e4dbe 3784 return;
7984d532 3785
02fc4de7 3786 if (lwp->stopped)
df3e4dbe 3787 return;
02fc4de7
PA
3788
3789 send_sigstop (lwp);
7984d532
PA
3790}
3791
3792/* Increment the suspend count of an LWP, and stop it, if not stopped
3793 yet. */
df3e4dbe
SM
3794static void
3795suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
7984d532 3796{
d86d4aaf 3797 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3798
3799 /* Ignore EXCEPT. */
3800 if (lwp == except)
df3e4dbe 3801 return;
7984d532 3802
863d01bd 3803 lwp_suspended_inc (lwp);
7984d532 3804
df3e4dbe 3805 send_sigstop (thread, except);
02fc4de7
PA
3806}
3807
95954743
PA
3808static void
3809mark_lwp_dead (struct lwp_info *lwp, int wstat)
3810{
95954743
PA
3811 /* Store the exit status for later. */
3812 lwp->status_pending_p = 1;
3813 lwp->status_pending = wstat;
3814
00db26fa
PA
3815 /* Store in waitstatus as well, as there's nothing else to process
3816 for this event. */
3817 if (WIFEXITED (wstat))
183be222 3818 lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
00db26fa 3819 else if (WIFSIGNALED (wstat))
183be222 3820 lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
00db26fa 3821
95954743
PA
3822 /* Prevent trying to stop it. */
3823 lwp->stopped = 1;
3824
3825 /* No further stops are expected from a dead lwp. */
3826 lwp->stop_expected = 0;
3827}
3828
00db26fa
PA
3829/* Return true if LWP has exited already, and has a pending exit event
3830 to report to GDB. */
3831
3832static int
3833lwp_is_marked_dead (struct lwp_info *lwp)
3834{
3835 return (lwp->status_pending_p
3836 && (WIFEXITED (lwp->status_pending)
3837 || WIFSIGNALED (lwp->status_pending)));
3838}
3839
d16f3f6c
TBA
3840void
3841linux_process_target::wait_for_sigstop ()
0d62e5e8 3842{
0bfdf32f 3843 struct thread_info *saved_thread;
95954743 3844 ptid_t saved_tid;
fa96cb38
PA
3845 int wstat;
3846 int ret;
0d62e5e8 3847
0bfdf32f
GB
3848 saved_thread = current_thread;
3849 if (saved_thread != NULL)
9c80ecd6 3850 saved_tid = saved_thread->id;
bd99dc85 3851 else
95954743 3852 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3853
d50171e4 3854 if (debug_threads)
fa96cb38 3855 debug_printf ("wait_for_sigstop: pulling events\n");
d50171e4 3856
fa96cb38
PA
3857 /* Passing NULL_PTID as filter indicates we want all events to be
3858 left pending. Eventually this returns when there are no
3859 unwaited-for children left. */
d16f3f6c 3860 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
fa96cb38 3861 gdb_assert (ret == -1);
0d62e5e8 3862
13d3d99b 3863 if (saved_thread == NULL || mythread_alive (saved_tid))
0bfdf32f 3864 current_thread = saved_thread;
0d62e5e8
DJ
3865 else
3866 {
3867 if (debug_threads)
87ce2a04 3868 debug_printf ("Previously current thread died.\n");
0d62e5e8 3869
f0db101d
PA
3870 /* We can't change the current inferior behind GDB's back,
3871 otherwise, a subsequent command may apply to the wrong
3872 process. */
3873 current_thread = NULL;
0d62e5e8
DJ
3874 }
3875}
3876
13e567af
TBA
3877bool
3878linux_process_target::stuck_in_jump_pad (thread_info *thread)
fa593d66 3879{
d86d4aaf 3880 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3881
863d01bd
PA
3882 if (lwp->suspended != 0)
3883 {
3884 internal_error (__FILE__, __LINE__,
3885 "LWP %ld is suspended, suspended=%d\n",
3886 lwpid_of (thread), lwp->suspended);
3887 }
fa593d66
PA
3888 gdb_assert (lwp->stopped);
3889
3890 /* Allow debugging the jump pad, gdb_collect, etc.. */
3891 return (supports_fast_tracepoints ()
58b4daa5 3892 && agent_loaded_p ()
fa593d66 3893 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3894 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66 3895 || thread->last_resume_kind == resume_step)
229d26fc
SM
3896 && (linux_fast_tracepoint_collecting (lwp, NULL)
3897 != fast_tpoint_collect_result::not_collecting));
fa593d66
PA
3898}
3899
d16f3f6c
TBA
3900void
3901linux_process_target::move_out_of_jump_pad (thread_info *thread)
fa593d66 3902{
f0ce0d3a 3903 struct thread_info *saved_thread;
d86d4aaf 3904 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3905 int *wstat;
3906
863d01bd
PA
3907 if (lwp->suspended != 0)
3908 {
3909 internal_error (__FILE__, __LINE__,
3910 "LWP %ld is suspended, suspended=%d\n",
3911 lwpid_of (thread), lwp->suspended);
3912 }
fa593d66
PA
3913 gdb_assert (lwp->stopped);
3914
f0ce0d3a
PA
3915 /* For gdb_breakpoint_here. */
3916 saved_thread = current_thread;
3917 current_thread = thread;
3918
fa593d66
PA
3919 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3920
3921 /* Allow debugging the jump pad, gdb_collect, etc. */
3922 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3923 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3924 && thread->last_resume_kind != resume_step
3925 && maybe_move_out_of_jump_pad (lwp, wstat))
3926 {
3927 if (debug_threads)
87ce2a04 3928 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
d86d4aaf 3929 lwpid_of (thread));
fa593d66
PA
3930
3931 if (wstat)
3932 {
3933 lwp->status_pending_p = 0;
3934 enqueue_one_deferred_signal (lwp, wstat);
3935
3936 if (debug_threads)
87ce2a04
DE
3937 debug_printf ("Signal %d for LWP %ld deferred "
3938 "(in jump pad)\n",
d86d4aaf 3939 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
3940 }
3941
df95181f 3942 resume_one_lwp (lwp, 0, 0, NULL);
fa593d66
PA
3943 }
3944 else
863d01bd 3945 lwp_suspended_inc (lwp);
f0ce0d3a
PA
3946
3947 current_thread = saved_thread;
fa593d66
PA
3948}
3949
5a6b0a41
SM
3950static bool
3951lwp_running (thread_info *thread)
fa593d66 3952{
d86d4aaf 3953 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3954
00db26fa 3955 if (lwp_is_marked_dead (lwp))
5a6b0a41
SM
3956 return false;
3957
3958 return !lwp->stopped;
fa593d66
PA
3959}
3960
d16f3f6c
TBA
3961void
3962linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
0d62e5e8 3963{
bde24c0a
PA
3964 /* Should not be called recursively. */
3965 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3966
87ce2a04
DE
3967 if (debug_threads)
3968 {
3969 debug_enter ();
3970 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3971 suspend ? "stop-and-suspend" : "stop",
3972 except != NULL
d86d4aaf 3973 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
87ce2a04
DE
3974 : "none");
3975 }
3976
bde24c0a
PA
3977 stopping_threads = (suspend
3978 ? STOPPING_AND_SUSPENDING_THREADS
3979 : STOPPING_THREADS);
7984d532
PA
3980
3981 if (suspend)
df3e4dbe
SM
3982 for_each_thread ([&] (thread_info *thread)
3983 {
3984 suspend_and_send_sigstop (thread, except);
3985 });
7984d532 3986 else
df3e4dbe
SM
3987 for_each_thread ([&] (thread_info *thread)
3988 {
3989 send_sigstop (thread, except);
3990 });
3991
fa96cb38 3992 wait_for_sigstop ();
bde24c0a 3993 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04
DE
3994
3995 if (debug_threads)
3996 {
3997 debug_printf ("stop_all_lwps done, setting stopping_threads "
3998 "back to !stopping\n");
3999 debug_exit ();
4000 }
0d62e5e8
DJ
4001}
4002
863d01bd
PA
4003/* Enqueue one signal in the chain of signals which need to be
4004 delivered to this process on next resume. */
4005
4006static void
4007enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4008{
013e3554
TBA
4009 lwp->pending_signals.emplace_back (signal);
4010 if (info == nullptr)
4011 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
863d01bd 4012 else
013e3554 4013 lwp->pending_signals.back ().info = *info;
863d01bd
PA
4014}
4015
df95181f
TBA
4016void
4017linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
fa5308bd 4018{
984a2c04
YQ
4019 struct thread_info *thread = get_lwp_thread (lwp);
4020 struct regcache *regcache = get_thread_regcache (thread, 1);
8ce47547
TT
4021
4022 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
984a2c04 4023
984a2c04 4024 current_thread = thread;
7582c77c 4025 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
fa5308bd 4026
a0ff9e1a 4027 for (CORE_ADDR pc : next_pcs)
3b9a79ef 4028 set_single_step_breakpoint (pc, current_ptid);
fa5308bd
AT
4029}
4030
df95181f
TBA
4031int
4032linux_process_target::single_step (lwp_info* lwp)
7fe5e27e
AT
4033{
4034 int step = 0;
4035
b31cdfa6 4036 if (supports_hardware_single_step ())
7fe5e27e
AT
4037 {
4038 step = 1;
4039 }
7582c77c 4040 else if (supports_software_single_step ())
7fe5e27e
AT
4041 {
4042 install_software_single_step_breakpoints (lwp);
4043 step = 0;
4044 }
4045 else
4046 {
4047 if (debug_threads)
4048 debug_printf ("stepping is not implemented on this target");
4049 }
4050
4051 return step;
4052}
4053
35ac8b3e 4054/* The signal can be delivered to the inferior if we are not trying to
5b061e98
YQ
4055 finish a fast tracepoint collect. Since signal can be delivered in
4056 the step-over, the program may go to signal handler and trap again
4057 after return from the signal handler. We can live with the spurious
4058 double traps. */
35ac8b3e
YQ
4059
4060static int
4061lwp_signal_can_be_delivered (struct lwp_info *lwp)
4062{
229d26fc
SM
4063 return (lwp->collecting_fast_tracepoint
4064 == fast_tpoint_collect_result::not_collecting);
35ac8b3e
YQ
4065}
4066
df95181f
TBA
4067void
4068linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4069 int signal, siginfo_t *info)
da6d8c04 4070{
d86d4aaf 4071 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4072 struct thread_info *saved_thread;
82075af2 4073 int ptrace_request;
c06cbd92
YQ
4074 struct process_info *proc = get_thread_process (thread);
4075
4076 /* Note that target description may not be initialised
4077 (proc->tdesc == NULL) at this point because the program hasn't
4078 stopped at the first instruction yet. It means GDBserver skips
4079 the extra traps from the wrapper program (see option --wrapper).
4080 Code in this function that requires register access should be
4081 guarded by proc->tdesc == NULL or something else. */
0d62e5e8 4082
54a0b537 4083 if (lwp->stopped == 0)
0d62e5e8
DJ
4084 return;
4085
183be222 4086 gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
65706a29 4087
229d26fc
SM
4088 fast_tpoint_collect_result fast_tp_collecting
4089 = lwp->collecting_fast_tracepoint;
fa593d66 4090
229d26fc
SM
4091 gdb_assert (!stabilizing_threads
4092 || (fast_tp_collecting
4093 != fast_tpoint_collect_result::not_collecting));
fa593d66 4094
219f2f23
PA
4095 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4096 user used the "jump" command, or "set $pc = foo"). */
c06cbd92 4097 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
219f2f23
PA
4098 {
4099 /* Collecting 'while-stepping' actions doesn't make sense
4100 anymore. */
d86d4aaf 4101 release_while_stepping_state_list (thread);
219f2f23
PA
4102 }
4103
0d62e5e8 4104 /* If we have pending signals or status, and a new signal, enqueue the
35ac8b3e
YQ
4105 signal. Also enqueue the signal if it can't be delivered to the
4106 inferior right now. */
0d62e5e8 4107 if (signal != 0
fa593d66 4108 && (lwp->status_pending_p
013e3554 4109 || !lwp->pending_signals.empty ()
35ac8b3e 4110 || !lwp_signal_can_be_delivered (lwp)))
94610ec4
YQ
4111 {
4112 enqueue_pending_signal (lwp, signal, info);
4113
4114 /* Postpone any pending signal. It was enqueued above. */
4115 signal = 0;
4116 }
0d62e5e8 4117
d50171e4
PA
4118 if (lwp->status_pending_p)
4119 {
4120 if (debug_threads)
94610ec4 4121 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
87ce2a04 4122 " has pending status\n",
94610ec4 4123 lwpid_of (thread), step ? "step" : "continue",
87ce2a04 4124 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
4125 return;
4126 }
0d62e5e8 4127
0bfdf32f
GB
4128 saved_thread = current_thread;
4129 current_thread = thread;
0d62e5e8 4130
0d62e5e8
DJ
4131 /* This bit needs some thinking about. If we get a signal that
4132 we must report while a single-step reinsert is still pending,
4133 we often end up resuming the thread. It might be better to
4134 (ew) allow a stack of pending events; then we could be sure that
4135 the reinsert happened right away and not lose any signals.
4136
4137 Making this stack would also shrink the window in which breakpoints are
54a0b537 4138 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
4139 complete correctness, so it won't solve that problem. It may be
4140 worthwhile just to solve this one, however. */
54a0b537 4141 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
4142 {
4143 if (debug_threads)
87ce2a04
DE
4144 debug_printf (" pending reinsert at 0x%s\n",
4145 paddress (lwp->bp_reinsert));
d50171e4 4146
b31cdfa6 4147 if (supports_hardware_single_step ())
d50171e4 4148 {
229d26fc 4149 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
4150 {
4151 if (step == 0)
9986ba08 4152 warning ("BAD - reinserting but not stepping.");
fa593d66 4153 if (lwp->suspended)
9986ba08
PA
4154 warning ("BAD - reinserting and suspended(%d).",
4155 lwp->suspended);
fa593d66 4156 }
d50171e4 4157 }
f79b145d
YQ
4158
4159 step = maybe_hw_step (thread);
0d62e5e8
DJ
4160 }
4161
229d26fc 4162 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
fa593d66
PA
4163 {
4164 if (debug_threads)
87ce2a04
DE
4165 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4166 " (exit-jump-pad-bkpt)\n",
d86d4aaf 4167 lwpid_of (thread));
fa593d66 4168 }
229d26fc 4169 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
fa593d66
PA
4170 {
4171 if (debug_threads)
87ce2a04
DE
4172 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4173 " single-stepping\n",
d86d4aaf 4174 lwpid_of (thread));
fa593d66 4175
b31cdfa6 4176 if (supports_hardware_single_step ())
fa593d66
PA
4177 step = 1;
4178 else
38e08fca
GB
4179 {
4180 internal_error (__FILE__, __LINE__,
4181 "moving out of jump pad single-stepping"
4182 " not implemented on this target");
4183 }
fa593d66
PA
4184 }
4185
219f2f23
PA
4186 /* If we have while-stepping actions in this thread set it stepping.
4187 If we have a signal to deliver, it may or may not be set to
4188 SIG_IGN, we don't know. Assume so, and allow collecting
4189 while-stepping into a signal handler. A possible smart thing to
4190 do would be to set an internal breakpoint at the signal return
4191 address, continue, and carry on catching this while-stepping
4192 action only when that breakpoint is hit. A future
4193 enhancement. */
7fe5e27e 4194 if (thread->while_stepping != NULL)
219f2f23
PA
4195 {
4196 if (debug_threads)
87ce2a04 4197 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
d86d4aaf 4198 lwpid_of (thread));
7fe5e27e
AT
4199
4200 step = single_step (lwp);
219f2f23
PA
4201 }
4202
bf9ae9d8 4203 if (proc->tdesc != NULL && low_supports_breakpoints ())
0d62e5e8 4204 {
0bfdf32f 4205 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be 4206
bf9ae9d8 4207 lwp->stop_pc = low_get_pc (regcache);
582511be
PA
4208
4209 if (debug_threads)
4210 {
4211 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4212 (long) lwp->stop_pc);
4213 }
0d62e5e8
DJ
4214 }
4215
35ac8b3e
YQ
4216 /* If we have pending signals, consume one if it can be delivered to
4217 the inferior. */
013e3554 4218 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
0d62e5e8 4219 {
013e3554 4220 const pending_signal &p_sig = lwp->pending_signals.front ();
0d62e5e8 4221
013e3554
TBA
4222 signal = p_sig.signal;
4223 if (p_sig.info.si_signo != 0)
d86d4aaf 4224 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554 4225 &p_sig.info);
32ca6d61 4226
013e3554 4227 lwp->pending_signals.pop_front ();
0d62e5e8
DJ
4228 }
4229
94610ec4
YQ
4230 if (debug_threads)
4231 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4232 lwpid_of (thread), step ? "step" : "continue", signal,
4233 lwp->stop_expected ? "expected" : "not expected");
4234
d7599cc0 4235 low_prepare_to_resume (lwp);
aa5ca48f 4236
d86d4aaf 4237 regcache_invalidate_thread (thread);
da6d8c04 4238 errno = 0;
54a0b537 4239 lwp->stepping = step;
82075af2
JS
4240 if (step)
4241 ptrace_request = PTRACE_SINGLESTEP;
4242 else if (gdb_catching_syscalls_p (lwp))
4243 ptrace_request = PTRACE_SYSCALL;
4244 else
4245 ptrace_request = PTRACE_CONT;
4246 ptrace (ptrace_request,
4247 lwpid_of (thread),
b8e1b30e 4248 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
4249 /* Coerce to a uintptr_t first to avoid potential gcc warning
4250 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4251 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 4252
0bfdf32f 4253 current_thread = saved_thread;
da6d8c04 4254 if (errno)
23f238d3
PA
4255 perror_with_name ("resuming thread");
4256
4257 /* Successfully resumed. Clear state that no longer makes sense,
4258 and mark the LWP as running. Must not do this before resuming
4259 otherwise if that fails other code will be confused. E.g., we'd
4260 later try to stop the LWP and hang forever waiting for a stop
4261 status. Note that we must not throw after this is cleared,
4262 otherwise handle_zombie_lwp_error would get confused. */
4263 lwp->stopped = 0;
4264 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4265}
4266
d7599cc0
TBA
4267void
4268linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4269{
4270 /* Nop. */
4271}
4272
23f238d3
PA
4273/* Called when we try to resume a stopped LWP and that errors out. If
4274 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4275 or about to become), discard the error, clear any pending status
4276 the LWP may have, and return true (we'll collect the exit status
4277 soon enough). Otherwise, return false. */
4278
4279static int
4280check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4281{
4282 struct thread_info *thread = get_lwp_thread (lp);
4283
4284 /* If we get an error after resuming the LWP successfully, we'd
4285 confuse !T state for the LWP being gone. */
4286 gdb_assert (lp->stopped);
4287
4288 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4289 because even if ptrace failed with ESRCH, the tracee may be "not
4290 yet fully dead", but already refusing ptrace requests. In that
4291 case the tracee has 'R (Running)' state for a little bit
4292 (observed in Linux 3.18). See also the note on ESRCH in the
4293 ptrace(2) man page. Instead, check whether the LWP has any state
4294 other than ptrace-stopped. */
4295
4296 /* Don't assume anything if /proc/PID/status can't be read. */
4297 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 4298 {
23f238d3
PA
4299 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4300 lp->status_pending_p = 0;
4301 return 1;
4302 }
4303 return 0;
4304}
4305
df95181f
TBA
4306void
4307linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4308 siginfo_t *info)
23f238d3 4309{
a70b8144 4310 try
23f238d3 4311 {
df95181f 4312 resume_one_lwp_throw (lwp, step, signal, info);
23f238d3 4313 }
230d2906 4314 catch (const gdb_exception_error &ex)
23f238d3
PA
4315 {
4316 if (!check_ptrace_stopped_lwp_gone (lwp))
eedc3f4f 4317 throw;
3221518c 4318 }
da6d8c04
DJ
4319}
4320
5fdda392
SM
4321/* This function is called once per thread via for_each_thread.
4322 We look up which resume request applies to THREAD and mark it with a
4323 pointer to the appropriate resume request.
5544ad89
DJ
4324
4325 This algorithm is O(threads * resume elements), but resume elements
4326 is small (and will remain small at least until GDB supports thread
4327 suspension). */
ebcf782c 4328
5fdda392
SM
4329static void
4330linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
0d62e5e8 4331{
d86d4aaf 4332 struct lwp_info *lwp = get_thread_lwp (thread);
64386c31 4333
5fdda392 4334 for (int ndx = 0; ndx < n; ndx++)
95954743 4335 {
5fdda392 4336 ptid_t ptid = resume[ndx].thread;
d7e15655 4337 if (ptid == minus_one_ptid
9c80ecd6 4338 || ptid == thread->id
0c9070b3
YQ
4339 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4340 of PID'. */
e99b03dc 4341 || (ptid.pid () == pid_of (thread)
0e998d96 4342 && (ptid.is_pid ()
e38504b3 4343 || ptid.lwp () == -1)))
95954743 4344 {
5fdda392 4345 if (resume[ndx].kind == resume_stop
8336d594 4346 && thread->last_resume_kind == resume_stop)
d50171e4
PA
4347 {
4348 if (debug_threads)
87ce2a04 4349 debug_printf ("already %s LWP %ld at GDB's request\n",
183be222 4350 (thread->last_status.kind ()
87ce2a04
DE
4351 == TARGET_WAITKIND_STOPPED)
4352 ? "stopped"
4353 : "stopping",
d86d4aaf 4354 lwpid_of (thread));
d50171e4
PA
4355
4356 continue;
4357 }
4358
5a04c4cf
PA
4359 /* Ignore (wildcard) resume requests for already-resumed
4360 threads. */
5fdda392 4361 if (resume[ndx].kind != resume_stop
5a04c4cf
PA
4362 && thread->last_resume_kind != resume_stop)
4363 {
4364 if (debug_threads)
4365 debug_printf ("already %s LWP %ld at GDB's request\n",
4366 (thread->last_resume_kind
4367 == resume_step)
4368 ? "stepping"
4369 : "continuing",
4370 lwpid_of (thread));
4371 continue;
4372 }
4373
4374 /* Don't let wildcard resumes resume fork children that GDB
4375 does not yet know are new fork children. */
4376 if (lwp->fork_relative != NULL)
4377 {
5a04c4cf
PA
4378 struct lwp_info *rel = lwp->fork_relative;
4379
4380 if (rel->status_pending_p
183be222
SM
4381 && (rel->waitstatus.kind () == TARGET_WAITKIND_FORKED
4382 || rel->waitstatus.kind () == TARGET_WAITKIND_VFORKED))
5a04c4cf
PA
4383 {
4384 if (debug_threads)
4385 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4386 lwpid_of (thread));
4387 continue;
4388 }
4389 }
4390
4391 /* If the thread has a pending event that has already been
4392 reported to GDBserver core, but GDB has not pulled the
4393 event out of the vStopped queue yet, likewise, ignore the
4394 (wildcard) resume request. */
9c80ecd6 4395 if (in_queued_stop_replies (thread->id))
5a04c4cf
PA
4396 {
4397 if (debug_threads)
4398 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4399 lwpid_of (thread));
4400 continue;
4401 }
4402
5fdda392 4403 lwp->resume = &resume[ndx];
8336d594 4404 thread->last_resume_kind = lwp->resume->kind;
fa593d66 4405
c2d6af84
PA
4406 lwp->step_range_start = lwp->resume->step_range_start;
4407 lwp->step_range_end = lwp->resume->step_range_end;
4408
fa593d66
PA
4409 /* If we had a deferred signal to report, dequeue one now.
4410 This can happen if LWP gets more than one signal while
4411 trying to get out of a jump pad. */
4412 if (lwp->stopped
4413 && !lwp->status_pending_p
4414 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4415 {
4416 lwp->status_pending_p = 1;
4417
4418 if (debug_threads)
87ce2a04
DE
4419 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4420 "leaving status pending.\n",
d86d4aaf
DE
4421 WSTOPSIG (lwp->status_pending),
4422 lwpid_of (thread));
fa593d66
PA
4423 }
4424
5fdda392 4425 return;
95954743
PA
4426 }
4427 }
2bd7c093
PA
4428
4429 /* No resume action for this thread. */
4430 lwp->resume = NULL;
5544ad89
DJ
4431}
4432
df95181f
TBA
4433bool
4434linux_process_target::resume_status_pending (thread_info *thread)
5544ad89 4435{
d86d4aaf 4436 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4437
bd99dc85
PA
4438 /* LWPs which will not be resumed are not interesting, because
4439 we might not wait for them next time through linux_wait. */
2bd7c093 4440 if (lwp->resume == NULL)
25c28b4d 4441 return false;
64386c31 4442
df95181f 4443 return thread_still_has_status_pending (thread);
d50171e4
PA
4444}
4445
df95181f
TBA
4446bool
4447linux_process_target::thread_needs_step_over (thread_info *thread)
d50171e4 4448{
d86d4aaf 4449 struct lwp_info *lwp = get_thread_lwp (thread);
0bfdf32f 4450 struct thread_info *saved_thread;
d50171e4 4451 CORE_ADDR pc;
c06cbd92
YQ
4452 struct process_info *proc = get_thread_process (thread);
4453
4454 /* GDBserver is skipping the extra traps from the wrapper program,
4455 don't have to do step over. */
4456 if (proc->tdesc == NULL)
eca55aec 4457 return false;
d50171e4
PA
4458
4459 /* LWPs which will not be resumed are not interesting, because we
4460 might not wait for them next time through linux_wait. */
4461
4462 if (!lwp->stopped)
4463 {
4464 if (debug_threads)
87ce2a04 4465 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
d86d4aaf 4466 lwpid_of (thread));
eca55aec 4467 return false;
d50171e4
PA
4468 }
4469
8336d594 4470 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
4471 {
4472 if (debug_threads)
87ce2a04
DE
4473 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4474 " stopped\n",
d86d4aaf 4475 lwpid_of (thread));
eca55aec 4476 return false;
d50171e4
PA
4477 }
4478
7984d532
PA
4479 gdb_assert (lwp->suspended >= 0);
4480
4481 if (lwp->suspended)
4482 {
4483 if (debug_threads)
87ce2a04 4484 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
d86d4aaf 4485 lwpid_of (thread));
eca55aec 4486 return false;
7984d532
PA
4487 }
4488
bd99dc85 4489 if (lwp->status_pending_p)
d50171e4
PA
4490 {
4491 if (debug_threads)
87ce2a04
DE
4492 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4493 " status.\n",
d86d4aaf 4494 lwpid_of (thread));
eca55aec 4495 return false;
d50171e4
PA
4496 }
4497
4498 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4499 or we have. */
4500 pc = get_pc (lwp);
4501
4502 /* If the PC has changed since we stopped, then don't do anything,
4503 and let the breakpoint/tracepoint be hit. This happens if, for
4504 instance, GDB handled the decr_pc_after_break subtraction itself,
4505 GDB is OOL stepping this thread, or the user has issued a "jump"
4506 command, or poked thread's registers herself. */
4507 if (pc != lwp->stop_pc)
4508 {
4509 if (debug_threads)
87ce2a04
DE
4510 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4511 "Old stop_pc was 0x%s, PC is now 0x%s\n",
d86d4aaf
DE
4512 lwpid_of (thread),
4513 paddress (lwp->stop_pc), paddress (pc));
eca55aec 4514 return false;
d50171e4
PA
4515 }
4516
484b3c32
YQ
4517 /* On software single step target, resume the inferior with signal
4518 rather than stepping over. */
7582c77c 4519 if (supports_software_single_step ()
013e3554 4520 && !lwp->pending_signals.empty ()
484b3c32
YQ
4521 && lwp_signal_can_be_delivered (lwp))
4522 {
4523 if (debug_threads)
4524 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4525 " signals.\n",
4526 lwpid_of (thread));
4527
eca55aec 4528 return false;
484b3c32
YQ
4529 }
4530
0bfdf32f
GB
4531 saved_thread = current_thread;
4532 current_thread = thread;
d50171e4 4533
8b07ae33 4534 /* We can only step over breakpoints we know about. */
fa593d66 4535 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 4536 {
8b07ae33 4537 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
4538 though. If the condition is being evaluated on the target's side
4539 and it evaluate to false, step over this breakpoint as well. */
4540 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
4541 && gdb_condition_true_at_breakpoint (pc)
4542 && gdb_no_commands_at_breakpoint (pc))
8b07ae33
PA
4543 {
4544 if (debug_threads)
87ce2a04
DE
4545 debug_printf ("Need step over [LWP %ld]? yes, but found"
4546 " GDB breakpoint at 0x%s; skipping step over\n",
d86d4aaf 4547 lwpid_of (thread), paddress (pc));
d50171e4 4548
0bfdf32f 4549 current_thread = saved_thread;
eca55aec 4550 return false;
8b07ae33
PA
4551 }
4552 else
4553 {
4554 if (debug_threads)
87ce2a04
DE
4555 debug_printf ("Need step over [LWP %ld]? yes, "
4556 "found breakpoint at 0x%s\n",
d86d4aaf 4557 lwpid_of (thread), paddress (pc));
d50171e4 4558
8b07ae33 4559 /* We've found an lwp that needs stepping over --- return 1 so
8f86d7aa 4560 that find_thread stops looking. */
0bfdf32f 4561 current_thread = saved_thread;
8b07ae33 4562
eca55aec 4563 return true;
8b07ae33 4564 }
d50171e4
PA
4565 }
4566
0bfdf32f 4567 current_thread = saved_thread;
d50171e4
PA
4568
4569 if (debug_threads)
87ce2a04
DE
4570 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4571 " at 0x%s\n",
d86d4aaf 4572 lwpid_of (thread), paddress (pc));
c6ecbae5 4573
eca55aec 4574 return false;
5544ad89
DJ
4575}
4576
d16f3f6c
TBA
4577void
4578linux_process_target::start_step_over (lwp_info *lwp)
d50171e4 4579{
d86d4aaf 4580 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4581 struct thread_info *saved_thread;
d50171e4
PA
4582 CORE_ADDR pc;
4583 int step;
4584
4585 if (debug_threads)
87ce2a04 4586 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
d86d4aaf 4587 lwpid_of (thread));
d50171e4 4588
7984d532 4589 stop_all_lwps (1, lwp);
863d01bd
PA
4590
4591 if (lwp->suspended != 0)
4592 {
4593 internal_error (__FILE__, __LINE__,
4594 "LWP %ld suspended=%d\n", lwpid_of (thread),
4595 lwp->suspended);
4596 }
d50171e4
PA
4597
4598 if (debug_threads)
87ce2a04 4599 debug_printf ("Done stopping all threads for step-over.\n");
d50171e4
PA
4600
4601 /* Note, we should always reach here with an already adjusted PC,
4602 either by GDB (if we're resuming due to GDB's request), or by our
4603 caller, if we just finished handling an internal breakpoint GDB
4604 shouldn't care about. */
4605 pc = get_pc (lwp);
4606
0bfdf32f
GB
4607 saved_thread = current_thread;
4608 current_thread = thread;
d50171e4
PA
4609
4610 lwp->bp_reinsert = pc;
4611 uninsert_breakpoints_at (pc);
fa593d66 4612 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4 4613
7fe5e27e 4614 step = single_step (lwp);
d50171e4 4615
0bfdf32f 4616 current_thread = saved_thread;
d50171e4 4617
df95181f 4618 resume_one_lwp (lwp, step, 0, NULL);
d50171e4
PA
4619
4620 /* Require next event from this LWP. */
9c80ecd6 4621 step_over_bkpt = thread->id;
d50171e4
PA
4622}
4623
b31cdfa6
TBA
4624bool
4625linux_process_target::finish_step_over (lwp_info *lwp)
d50171e4
PA
4626{
4627 if (lwp->bp_reinsert != 0)
4628 {
f79b145d
YQ
4629 struct thread_info *saved_thread = current_thread;
4630
d50171e4 4631 if (debug_threads)
87ce2a04 4632 debug_printf ("Finished step over.\n");
d50171e4 4633
f79b145d
YQ
4634 current_thread = get_lwp_thread (lwp);
4635
d50171e4
PA
4636 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4637 may be no breakpoint to reinsert there by now. */
4638 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4639 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4640
4641 lwp->bp_reinsert = 0;
4642
3b9a79ef
YQ
4643 /* Delete any single-step breakpoints. No longer needed. We
4644 don't have to worry about other threads hitting this trap,
4645 and later not being able to explain it, because we were
4646 stepping over a breakpoint, and we hold all threads but
4647 LWP stopped while doing that. */
b31cdfa6 4648 if (!supports_hardware_single_step ())
f79b145d 4649 {
3b9a79ef
YQ
4650 gdb_assert (has_single_step_breakpoints (current_thread));
4651 delete_single_step_breakpoints (current_thread);
f79b145d 4652 }
d50171e4
PA
4653
4654 step_over_bkpt = null_ptid;
f79b145d 4655 current_thread = saved_thread;
b31cdfa6 4656 return true;
d50171e4
PA
4657 }
4658 else
b31cdfa6 4659 return false;
d50171e4
PA
4660}
4661
d16f3f6c
TBA
4662void
4663linux_process_target::complete_ongoing_step_over ()
863d01bd 4664{
d7e15655 4665 if (step_over_bkpt != null_ptid)
863d01bd
PA
4666 {
4667 struct lwp_info *lwp;
4668 int wstat;
4669 int ret;
4670
4671 if (debug_threads)
4672 debug_printf ("detach: step over in progress, finish it first\n");
4673
4674 /* Passing NULL_PTID as filter indicates we want all events to
4675 be left pending. Eventually this returns when there are no
4676 unwaited-for children left. */
d16f3f6c
TBA
4677 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4678 __WALL);
863d01bd
PA
4679 gdb_assert (ret == -1);
4680
4681 lwp = find_lwp_pid (step_over_bkpt);
4682 if (lwp != NULL)
7e9cf1fe
PA
4683 {
4684 finish_step_over (lwp);
4685
4686 /* If we got our step SIGTRAP, don't leave it pending,
4687 otherwise we would report it to GDB as a spurious
4688 SIGTRAP. */
4689 gdb_assert (lwp->status_pending_p);
4690 if (WIFSTOPPED (lwp->status_pending)
4691 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4692 {
4693 thread_info *thread = get_lwp_thread (lwp);
4694 if (thread->last_resume_kind != resume_step)
4695 {
4696 if (debug_threads)
4697 debug_printf ("detach: discard step-over SIGTRAP\n");
4698
4699 lwp->status_pending_p = 0;
4700 lwp->status_pending = 0;
4701 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4702 }
4703 else
4704 {
4705 if (debug_threads)
4706 debug_printf ("detach: resume_step, "
4707 "not discarding step-over SIGTRAP\n");
4708 }
4709 }
4710 }
863d01bd
PA
4711 step_over_bkpt = null_ptid;
4712 unsuspend_all_lwps (lwp);
4713 }
4714}
4715
df95181f
TBA
4716void
4717linux_process_target::resume_one_thread (thread_info *thread,
4718 bool leave_all_stopped)
5544ad89 4719{
d86d4aaf 4720 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4721 int leave_pending;
5544ad89 4722
2bd7c093 4723 if (lwp->resume == NULL)
c80825ff 4724 return;
5544ad89 4725
bd99dc85 4726 if (lwp->resume->kind == resume_stop)
5544ad89 4727 {
bd99dc85 4728 if (debug_threads)
d86d4aaf 4729 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
bd99dc85
PA
4730
4731 if (!lwp->stopped)
4732 {
4733 if (debug_threads)
d86d4aaf 4734 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
bd99dc85 4735
d50171e4
PA
4736 /* Stop the thread, and wait for the event asynchronously,
4737 through the event loop. */
02fc4de7 4738 send_sigstop (lwp);
bd99dc85
PA
4739 }
4740 else
4741 {
4742 if (debug_threads)
87ce2a04 4743 debug_printf ("already stopped LWP %ld\n",
d86d4aaf 4744 lwpid_of (thread));
d50171e4
PA
4745
4746 /* The LWP may have been stopped in an internal event that
4747 was not meant to be notified back to GDB (e.g., gdbserver
4748 breakpoint), so we should be reporting a stop event in
4749 this case too. */
4750
4751 /* If the thread already has a pending SIGSTOP, this is a
4752 no-op. Otherwise, something later will presumably resume
4753 the thread and this will cause it to cancel any pending
4754 operation, due to last_resume_kind == resume_stop. If
4755 the thread already has a pending status to report, we
4756 will still report it the next time we wait - see
4757 status_pending_p_callback. */
1a981360
PA
4758
4759 /* If we already have a pending signal to report, then
4760 there's no need to queue a SIGSTOP, as this means we're
4761 midway through moving the LWP out of the jumppad, and we
4762 will report the pending signal as soon as that is
4763 finished. */
013e3554 4764 if (lwp->pending_signals_to_report.empty ())
1a981360 4765 send_sigstop (lwp);
bd99dc85 4766 }
32ca6d61 4767
bd99dc85
PA
4768 /* For stop requests, we're done. */
4769 lwp->resume = NULL;
183be222 4770 thread->last_status.set_ignore ();
c80825ff 4771 return;
5544ad89
DJ
4772 }
4773
bd99dc85 4774 /* If this thread which is about to be resumed has a pending status,
863d01bd
PA
4775 then don't resume it - we can just report the pending status.
4776 Likewise if it is suspended, because e.g., another thread is
4777 stepping past a breakpoint. Make sure to queue any signals that
4778 would otherwise be sent. In all-stop mode, we do this decision
4779 based on if *any* thread has a pending status. If there's a
4780 thread that needs the step-over-breakpoint dance, then don't
4781 resume any other thread but that particular one. */
4782 leave_pending = (lwp->suspended
4783 || lwp->status_pending_p
4784 || leave_all_stopped);
5544ad89 4785
0e9a339e
YQ
4786 /* If we have a new signal, enqueue the signal. */
4787 if (lwp->resume->sig != 0)
4788 {
4789 siginfo_t info, *info_p;
4790
4791 /* If this is the same signal we were previously stopped by,
4792 make sure to queue its siginfo. */
4793 if (WIFSTOPPED (lwp->last_status)
4794 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4795 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4796 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4797 info_p = &info;
4798 else
4799 info_p = NULL;
4800
4801 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4802 }
4803
d50171e4 4804 if (!leave_pending)
bd99dc85
PA
4805 {
4806 if (debug_threads)
d86d4aaf 4807 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5544ad89 4808
9c80ecd6 4809 proceed_one_lwp (thread, NULL);
bd99dc85
PA
4810 }
4811 else
4812 {
4813 if (debug_threads)
d86d4aaf 4814 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
bd99dc85 4815 }
5544ad89 4816
183be222 4817 thread->last_status.set_ignore ();
bd99dc85 4818 lwp->resume = NULL;
0d62e5e8
DJ
4819}
4820
0e4d7e35
TBA
4821void
4822linux_process_target::resume (thread_resume *resume_info, size_t n)
0d62e5e8 4823{
d86d4aaf 4824 struct thread_info *need_step_over = NULL;
c6ecbae5 4825
87ce2a04
DE
4826 if (debug_threads)
4827 {
4828 debug_enter ();
4829 debug_printf ("linux_resume:\n");
4830 }
4831
5fdda392
SM
4832 for_each_thread ([&] (thread_info *thread)
4833 {
4834 linux_set_resume_request (thread, resume_info, n);
4835 });
5544ad89 4836
d50171e4
PA
4837 /* If there is a thread which would otherwise be resumed, which has
4838 a pending status, then don't resume any threads - we can just
4839 report the pending status. Make sure to queue any signals that
4840 would otherwise be sent. In non-stop mode, we'll apply this
4841 logic to each thread individually. We consume all pending events
4842 before considering to start a step-over (in all-stop). */
25c28b4d 4843 bool any_pending = false;
bd99dc85 4844 if (!non_stop)
df95181f
TBA
4845 any_pending = find_thread ([this] (thread_info *thread)
4846 {
4847 return resume_status_pending (thread);
4848 }) != nullptr;
d50171e4
PA
4849
4850 /* If there is a thread which would otherwise be resumed, which is
4851 stopped at a breakpoint that needs stepping over, then don't
4852 resume any threads - have it step over the breakpoint with all
4853 other threads stopped, then resume all threads again. Make sure
4854 to queue any signals that would otherwise be delivered or
4855 queued. */
bf9ae9d8 4856 if (!any_pending && low_supports_breakpoints ())
df95181f
TBA
4857 need_step_over = find_thread ([this] (thread_info *thread)
4858 {
4859 return thread_needs_step_over (thread);
4860 });
d50171e4 4861
c80825ff 4862 bool leave_all_stopped = (need_step_over != NULL || any_pending);
d50171e4
PA
4863
4864 if (debug_threads)
4865 {
4866 if (need_step_over != NULL)
87ce2a04 4867 debug_printf ("Not resuming all, need step over\n");
d50171e4 4868 else if (any_pending)
87ce2a04
DE
4869 debug_printf ("Not resuming, all-stop and found "
4870 "an LWP with pending status\n");
d50171e4 4871 else
87ce2a04 4872 debug_printf ("Resuming, no pending status or step over needed\n");
d50171e4
PA
4873 }
4874
4875 /* Even if we're leaving threads stopped, queue all signals we'd
4876 otherwise deliver. */
c80825ff
SM
4877 for_each_thread ([&] (thread_info *thread)
4878 {
df95181f 4879 resume_one_thread (thread, leave_all_stopped);
c80825ff 4880 });
d50171e4
PA
4881
4882 if (need_step_over)
d86d4aaf 4883 start_step_over (get_thread_lwp (need_step_over));
87ce2a04
DE
4884
4885 if (debug_threads)
4886 {
4887 debug_printf ("linux_resume done\n");
4888 debug_exit ();
4889 }
1bebeeca
PA
4890
4891 /* We may have events that were pending that can/should be sent to
4892 the client now. Trigger a linux_wait call. */
4893 if (target_is_async_p ())
4894 async_file_mark ();
d50171e4
PA
4895}
4896
df95181f
TBA
4897void
4898linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
d50171e4 4899{
d86d4aaf 4900 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4901 int step;
4902
7984d532 4903 if (lwp == except)
e2b44075 4904 return;
d50171e4
PA
4905
4906 if (debug_threads)
d86d4aaf 4907 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
d50171e4
PA
4908
4909 if (!lwp->stopped)
4910 {
4911 if (debug_threads)
d86d4aaf 4912 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
e2b44075 4913 return;
d50171e4
PA
4914 }
4915
02fc4de7 4916 if (thread->last_resume_kind == resume_stop
183be222 4917 && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
d50171e4
PA
4918 {
4919 if (debug_threads)
87ce2a04 4920 debug_printf (" client wants LWP to remain %ld stopped\n",
d86d4aaf 4921 lwpid_of (thread));
e2b44075 4922 return;
d50171e4
PA
4923 }
4924
4925 if (lwp->status_pending_p)
4926 {
4927 if (debug_threads)
87ce2a04 4928 debug_printf (" LWP %ld has pending status, leaving stopped\n",
d86d4aaf 4929 lwpid_of (thread));
e2b44075 4930 return;
d50171e4
PA
4931 }
4932
7984d532
PA
4933 gdb_assert (lwp->suspended >= 0);
4934
d50171e4
PA
4935 if (lwp->suspended)
4936 {
4937 if (debug_threads)
d86d4aaf 4938 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
e2b44075 4939 return;
d50171e4
PA
4940 }
4941
1a981360 4942 if (thread->last_resume_kind == resume_stop
013e3554 4943 && lwp->pending_signals_to_report.empty ()
229d26fc
SM
4944 && (lwp->collecting_fast_tracepoint
4945 == fast_tpoint_collect_result::not_collecting))
02fc4de7
PA
4946 {
4947 /* We haven't reported this LWP as stopped yet (otherwise, the
4948 last_status.kind check above would catch it, and we wouldn't
4949 reach here. This LWP may have been momentarily paused by a
4950 stop_all_lwps call while handling for example, another LWP's
4951 step-over. In that case, the pending expected SIGSTOP signal
4952 that was queued at vCont;t handling time will have already
4953 been consumed by wait_for_sigstop, and so we need to requeue
4954 another one here. Note that if the LWP already has a SIGSTOP
4955 pending, this is a no-op. */
4956
4957 if (debug_threads)
87ce2a04
DE
4958 debug_printf ("Client wants LWP %ld to stop. "
4959 "Making sure it has a SIGSTOP pending\n",
d86d4aaf 4960 lwpid_of (thread));
02fc4de7
PA
4961
4962 send_sigstop (lwp);
4963 }
4964
863d01bd
PA
4965 if (thread->last_resume_kind == resume_step)
4966 {
4967 if (debug_threads)
4968 debug_printf (" stepping LWP %ld, client wants it stepping\n",
4969 lwpid_of (thread));
8901d193 4970
3b9a79ef 4971 /* If resume_step is requested by GDB, install single-step
8901d193 4972 breakpoints when the thread is about to be actually resumed if
3b9a79ef 4973 the single-step breakpoints weren't removed. */
7582c77c 4974 if (supports_software_single_step ()
3b9a79ef 4975 && !has_single_step_breakpoints (thread))
8901d193
YQ
4976 install_software_single_step_breakpoints (lwp);
4977
4978 step = maybe_hw_step (thread);
863d01bd
PA
4979 }
4980 else if (lwp->bp_reinsert != 0)
4981 {
4982 if (debug_threads)
4983 debug_printf (" stepping LWP %ld, reinsert set\n",
4984 lwpid_of (thread));
f79b145d
YQ
4985
4986 step = maybe_hw_step (thread);
863d01bd
PA
4987 }
4988 else
4989 step = 0;
4990
df95181f 4991 resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
4992}
4993
df95181f
TBA
4994void
4995linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4996 lwp_info *except)
7984d532 4997{
d86d4aaf 4998 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
4999
5000 if (lwp == except)
e2b44075 5001 return;
7984d532 5002
863d01bd 5003 lwp_suspended_decr (lwp);
7984d532 5004
e2b44075 5005 proceed_one_lwp (thread, except);
d50171e4
PA
5006}
5007
d16f3f6c
TBA
5008void
5009linux_process_target::proceed_all_lwps ()
d50171e4 5010{
d86d4aaf 5011 struct thread_info *need_step_over;
d50171e4
PA
5012
5013 /* If there is a thread which would otherwise be resumed, which is
5014 stopped at a breakpoint that needs stepping over, then don't
5015 resume any threads - have it step over the breakpoint with all
5016 other threads stopped, then resume all threads again. */
5017
bf9ae9d8 5018 if (low_supports_breakpoints ())
d50171e4 5019 {
df95181f
TBA
5020 need_step_over = find_thread ([this] (thread_info *thread)
5021 {
5022 return thread_needs_step_over (thread);
5023 });
d50171e4
PA
5024
5025 if (need_step_over != NULL)
5026 {
5027 if (debug_threads)
87ce2a04
DE
5028 debug_printf ("proceed_all_lwps: found "
5029 "thread %ld needing a step-over\n",
5030 lwpid_of (need_step_over));
d50171e4 5031
d86d4aaf 5032 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
5033 return;
5034 }
5035 }
5544ad89 5036
d50171e4 5037 if (debug_threads)
87ce2a04 5038 debug_printf ("Proceeding, no step-over needed\n");
d50171e4 5039
df95181f 5040 for_each_thread ([this] (thread_info *thread)
e2b44075
SM
5041 {
5042 proceed_one_lwp (thread, NULL);
5043 });
d50171e4
PA
5044}
5045
d16f3f6c
TBA
5046void
5047linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
d50171e4 5048{
5544ad89
DJ
5049 if (debug_threads)
5050 {
87ce2a04 5051 debug_enter ();
d50171e4 5052 if (except)
87ce2a04 5053 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
d86d4aaf 5054 lwpid_of (get_lwp_thread (except)));
5544ad89 5055 else
87ce2a04 5056 debug_printf ("unstopping all lwps\n");
5544ad89
DJ
5057 }
5058
7984d532 5059 if (unsuspend)
e2b44075
SM
5060 for_each_thread ([&] (thread_info *thread)
5061 {
5062 unsuspend_and_proceed_one_lwp (thread, except);
5063 });
7984d532 5064 else
e2b44075
SM
5065 for_each_thread ([&] (thread_info *thread)
5066 {
5067 proceed_one_lwp (thread, except);
5068 });
87ce2a04
DE
5069
5070 if (debug_threads)
5071 {
5072 debug_printf ("unstop_all_lwps done\n");
5073 debug_exit ();
5074 }
0d62e5e8
DJ
5075}
5076
58caa3dc
DJ
5077
5078#ifdef HAVE_LINUX_REGSETS
5079
1faeff08
MR
5080#define use_linux_regsets 1
5081
030031ee
PA
5082/* Returns true if REGSET has been disabled. */
5083
5084static int
5085regset_disabled (struct regsets_info *info, struct regset_info *regset)
5086{
5087 return (info->disabled_regsets != NULL
5088 && info->disabled_regsets[regset - info->regsets]);
5089}
5090
5091/* Disable REGSET. */
5092
5093static void
5094disable_regset (struct regsets_info *info, struct regset_info *regset)
5095{
5096 int dr_offset;
5097
5098 dr_offset = regset - info->regsets;
5099 if (info->disabled_regsets == NULL)
224c3ddb 5100 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
030031ee
PA
5101 info->disabled_regsets[dr_offset] = 1;
5102}
5103
58caa3dc 5104static int
3aee8918
PA
5105regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5106 struct regcache *regcache)
58caa3dc
DJ
5107{
5108 struct regset_info *regset;
e9d25b98 5109 int saw_general_regs = 0;
95954743 5110 int pid;
1570b33e 5111 struct iovec iov;
58caa3dc 5112
0bfdf32f 5113 pid = lwpid_of (current_thread);
28eef672 5114 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5115 {
1570b33e
L
5116 void *buf, *data;
5117 int nt_type, res;
58caa3dc 5118
030031ee 5119 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 5120 continue;
58caa3dc 5121
bca929d3 5122 buf = xmalloc (regset->size);
1570b33e
L
5123
5124 nt_type = regset->nt_type;
5125 if (nt_type)
5126 {
5127 iov.iov_base = buf;
5128 iov.iov_len = regset->size;
5129 data = (void *) &iov;
5130 }
5131 else
5132 data = buf;
5133
dfb64f85 5134#ifndef __sparc__
f15f9948 5135 res = ptrace (regset->get_request, pid,
b8e1b30e 5136 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5137#else
1570b33e 5138 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5139#endif
58caa3dc
DJ
5140 if (res < 0)
5141 {
1ef53e6b
AH
5142 if (errno == EIO
5143 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5144 {
1ef53e6b
AH
5145 /* If we get EIO on a regset, or an EINVAL and the regset is
5146 optional, do not try it again for this process mode. */
030031ee 5147 disable_regset (regsets_info, regset);
58caa3dc 5148 }
e5a9158d
AA
5149 else if (errno == ENODATA)
5150 {
5151 /* ENODATA may be returned if the regset is currently
5152 not "active". This can happen in normal operation,
5153 so suppress the warning in this case. */
5154 }
fcd4a73d
YQ
5155 else if (errno == ESRCH)
5156 {
5157 /* At this point, ESRCH should mean the process is
5158 already gone, in which case we simply ignore attempts
5159 to read its registers. */
5160 }
58caa3dc
DJ
5161 else
5162 {
0d62e5e8 5163 char s[256];
95954743
PA
5164 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5165 pid);
0d62e5e8 5166 perror (s);
58caa3dc
DJ
5167 }
5168 }
098dbe61
AA
5169 else
5170 {
5171 if (regset->type == GENERAL_REGS)
5172 saw_general_regs = 1;
5173 regset->store_function (regcache, buf);
5174 }
fdeb2a12 5175 free (buf);
58caa3dc 5176 }
e9d25b98
DJ
5177 if (saw_general_regs)
5178 return 0;
5179 else
5180 return 1;
58caa3dc
DJ
5181}
5182
5183static int
3aee8918
PA
5184regsets_store_inferior_registers (struct regsets_info *regsets_info,
5185 struct regcache *regcache)
58caa3dc
DJ
5186{
5187 struct regset_info *regset;
e9d25b98 5188 int saw_general_regs = 0;
95954743 5189 int pid;
1570b33e 5190 struct iovec iov;
58caa3dc 5191
0bfdf32f 5192 pid = lwpid_of (current_thread);
28eef672 5193 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5194 {
1570b33e
L
5195 void *buf, *data;
5196 int nt_type, res;
58caa3dc 5197
feea5f36
AA
5198 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5199 || regset->fill_function == NULL)
28eef672 5200 continue;
58caa3dc 5201
bca929d3 5202 buf = xmalloc (regset->size);
545587ee
DJ
5203
5204 /* First fill the buffer with the current register set contents,
5205 in case there are any items in the kernel's regset that are
5206 not in gdbserver's regcache. */
1570b33e
L
5207
5208 nt_type = regset->nt_type;
5209 if (nt_type)
5210 {
5211 iov.iov_base = buf;
5212 iov.iov_len = regset->size;
5213 data = (void *) &iov;
5214 }
5215 else
5216 data = buf;
5217
dfb64f85 5218#ifndef __sparc__
f15f9948 5219 res = ptrace (regset->get_request, pid,
b8e1b30e 5220 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5221#else
689cc2ae 5222 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5223#endif
545587ee
DJ
5224
5225 if (res == 0)
5226 {
5227 /* Then overlay our cached registers on that. */
442ea881 5228 regset->fill_function (regcache, buf);
545587ee
DJ
5229
5230 /* Only now do we write the register set. */
dfb64f85 5231#ifndef __sparc__
f15f9948 5232 res = ptrace (regset->set_request, pid,
b8e1b30e 5233 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5234#else
1570b33e 5235 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 5236#endif
545587ee
DJ
5237 }
5238
58caa3dc
DJ
5239 if (res < 0)
5240 {
1ef53e6b
AH
5241 if (errno == EIO
5242 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5243 {
1ef53e6b
AH
5244 /* If we get EIO on a regset, or an EINVAL and the regset is
5245 optional, do not try it again for this process mode. */
030031ee 5246 disable_regset (regsets_info, regset);
58caa3dc 5247 }
3221518c
UW
5248 else if (errno == ESRCH)
5249 {
1b3f6016
PA
5250 /* At this point, ESRCH should mean the process is
5251 already gone, in which case we simply ignore attempts
5252 to change its registers. See also the related
df95181f 5253 comment in resume_one_lwp. */
fdeb2a12 5254 free (buf);
3221518c
UW
5255 return 0;
5256 }
58caa3dc
DJ
5257 else
5258 {
ce3a066d 5259 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
5260 }
5261 }
e9d25b98
DJ
5262 else if (regset->type == GENERAL_REGS)
5263 saw_general_regs = 1;
09ec9b38 5264 free (buf);
58caa3dc 5265 }
e9d25b98
DJ
5266 if (saw_general_regs)
5267 return 0;
5268 else
5269 return 1;
58caa3dc
DJ
5270}
5271
1faeff08 5272#else /* !HAVE_LINUX_REGSETS */
58caa3dc 5273
1faeff08 5274#define use_linux_regsets 0
3aee8918
PA
5275#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5276#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 5277
58caa3dc 5278#endif
1faeff08
MR
5279
5280/* Return 1 if register REGNO is supported by one of the regset ptrace
5281 calls or 0 if it has to be transferred individually. */
5282
5283static int
3aee8918 5284linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
5285{
5286 unsigned char mask = 1 << (regno % 8);
5287 size_t index = regno / 8;
5288
5289 return (use_linux_regsets
3aee8918
PA
5290 && (regs_info->regset_bitmap == NULL
5291 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
5292}
5293
58caa3dc 5294#ifdef HAVE_LINUX_USRREGS
1faeff08 5295
5b3da067 5296static int
3aee8918 5297register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
5298{
5299 int addr;
5300
3aee8918 5301 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
5302 error ("Invalid register number %d.", regnum);
5303
3aee8918 5304 addr = usrregs->regmap[regnum];
1faeff08
MR
5305
5306 return addr;
5307}
5308
daca57a7
TBA
5309
5310void
5311linux_process_target::fetch_register (const usrregs_info *usrregs,
5312 regcache *regcache, int regno)
1faeff08
MR
5313{
5314 CORE_ADDR regaddr;
5315 int i, size;
5316 char *buf;
5317 int pid;
5318
3aee8918 5319 if (regno >= usrregs->num_regs)
1faeff08 5320 return;
daca57a7 5321 if (low_cannot_fetch_register (regno))
1faeff08
MR
5322 return;
5323
3aee8918 5324 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5325 if (regaddr == -1)
5326 return;
5327
3aee8918
PA
5328 size = ((register_size (regcache->tdesc, regno)
5329 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5330 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5331 buf = (char *) alloca (size);
1faeff08 5332
0bfdf32f 5333 pid = lwpid_of (current_thread);
1faeff08
MR
5334 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5335 {
5336 errno = 0;
5337 *(PTRACE_XFER_TYPE *) (buf + i) =
5338 ptrace (PTRACE_PEEKUSER, pid,
5339 /* Coerce to a uintptr_t first to avoid potential gcc warning
5340 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5341 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
5342 regaddr += sizeof (PTRACE_XFER_TYPE);
5343 if (errno != 0)
9a70f35c
YQ
5344 {
5345 /* Mark register REGNO unavailable. */
5346 supply_register (regcache, regno, NULL);
5347 return;
5348 }
1faeff08
MR
5349 }
5350
b35db733 5351 low_supply_ptrace_register (regcache, regno, buf);
1faeff08
MR
5352}
5353
daca57a7
TBA
5354void
5355linux_process_target::store_register (const usrregs_info *usrregs,
5356 regcache *regcache, int regno)
1faeff08
MR
5357{
5358 CORE_ADDR regaddr;
5359 int i, size;
5360 char *buf;
5361 int pid;
5362
3aee8918 5363 if (regno >= usrregs->num_regs)
1faeff08 5364 return;
daca57a7 5365 if (low_cannot_store_register (regno))
1faeff08
MR
5366 return;
5367
3aee8918 5368 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5369 if (regaddr == -1)
5370 return;
5371
3aee8918
PA
5372 size = ((register_size (regcache->tdesc, regno)
5373 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5374 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5375 buf = (char *) alloca (size);
1faeff08
MR
5376 memset (buf, 0, size);
5377
b35db733 5378 low_collect_ptrace_register (regcache, regno, buf);
1faeff08 5379
0bfdf32f 5380 pid = lwpid_of (current_thread);
1faeff08
MR
5381 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5382 {
5383 errno = 0;
5384 ptrace (PTRACE_POKEUSER, pid,
5385 /* Coerce to a uintptr_t first to avoid potential gcc warning
5386 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5387 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5388 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
5389 if (errno != 0)
5390 {
5391 /* At this point, ESRCH should mean the process is
5392 already gone, in which case we simply ignore attempts
5393 to change its registers. See also the related
df95181f 5394 comment in resume_one_lwp. */
1faeff08
MR
5395 if (errno == ESRCH)
5396 return;
5397
daca57a7
TBA
5398
5399 if (!low_cannot_store_register (regno))
6d91ce9a 5400 error ("writing register %d: %s", regno, safe_strerror (errno));
1faeff08
MR
5401 }
5402 regaddr += sizeof (PTRACE_XFER_TYPE);
5403 }
5404}
daca57a7 5405#endif /* HAVE_LINUX_USRREGS */
1faeff08 5406
b35db733
TBA
5407void
5408linux_process_target::low_collect_ptrace_register (regcache *regcache,
5409 int regno, char *buf)
5410{
5411 collect_register (regcache, regno, buf);
5412}
5413
5414void
5415linux_process_target::low_supply_ptrace_register (regcache *regcache,
5416 int regno, const char *buf)
5417{
5418 supply_register (regcache, regno, buf);
5419}
5420
daca57a7
TBA
5421void
5422linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5423 regcache *regcache,
5424 int regno, int all)
1faeff08 5425{
daca57a7 5426#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5427 struct usrregs_info *usr = regs_info->usrregs;
5428
1faeff08
MR
5429 if (regno == -1)
5430 {
3aee8918
PA
5431 for (regno = 0; regno < usr->num_regs; regno++)
5432 if (all || !linux_register_in_regsets (regs_info, regno))
5433 fetch_register (usr, regcache, regno);
1faeff08
MR
5434 }
5435 else
3aee8918 5436 fetch_register (usr, regcache, regno);
daca57a7 5437#endif
1faeff08
MR
5438}
5439
daca57a7
TBA
5440void
5441linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5442 regcache *regcache,
5443 int regno, int all)
1faeff08 5444{
daca57a7 5445#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5446 struct usrregs_info *usr = regs_info->usrregs;
5447
1faeff08
MR
5448 if (regno == -1)
5449 {
3aee8918
PA
5450 for (regno = 0; regno < usr->num_regs; regno++)
5451 if (all || !linux_register_in_regsets (regs_info, regno))
5452 store_register (usr, regcache, regno);
1faeff08
MR
5453 }
5454 else
3aee8918 5455 store_register (usr, regcache, regno);
58caa3dc 5456#endif
daca57a7 5457}
1faeff08 5458
a5a4d4cd
TBA
5459void
5460linux_process_target::fetch_registers (regcache *regcache, int regno)
1faeff08
MR
5461{
5462 int use_regsets;
5463 int all = 0;
aa8d21c9 5464 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5465
5466 if (regno == -1)
5467 {
bd70b1f2 5468 if (regs_info->usrregs != NULL)
3aee8918 5469 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
bd70b1f2 5470 low_fetch_register (regcache, regno);
c14dfd32 5471
3aee8918
PA
5472 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5473 if (regs_info->usrregs != NULL)
5474 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
5475 }
5476 else
5477 {
bd70b1f2 5478 if (low_fetch_register (regcache, regno))
c14dfd32
PA
5479 return;
5480
3aee8918 5481 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5482 if (use_regsets)
3aee8918
PA
5483 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5484 regcache);
5485 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5486 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5487 }
58caa3dc
DJ
5488}
5489
a5a4d4cd
TBA
5490void
5491linux_process_target::store_registers (regcache *regcache, int regno)
58caa3dc 5492{
1faeff08
MR
5493 int use_regsets;
5494 int all = 0;
aa8d21c9 5495 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5496
5497 if (regno == -1)
5498 {
3aee8918
PA
5499 all = regsets_store_inferior_registers (regs_info->regsets_info,
5500 regcache);
5501 if (regs_info->usrregs != NULL)
5502 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
5503 }
5504 else
5505 {
3aee8918 5506 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5507 if (use_regsets)
3aee8918
PA
5508 all = regsets_store_inferior_registers (regs_info->regsets_info,
5509 regcache);
5510 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5511 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5512 }
58caa3dc
DJ
5513}
5514
bd70b1f2
TBA
5515bool
5516linux_process_target::low_fetch_register (regcache *regcache, int regno)
5517{
5518 return false;
5519}
da6d8c04 5520
e2558df3 5521/* A wrapper for the read_memory target op. */
da6d8c04 5522
c3e735a6 5523static int
f450004a 5524linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
e2558df3 5525{
52405d85 5526 return the_target->read_memory (memaddr, myaddr, len);
e2558df3
TBA
5527}
5528
5529/* Copy LEN bytes from inferior's memory starting at MEMADDR
5530 to debugger memory starting at MYADDR. */
5531
5532int
5533linux_process_target::read_memory (CORE_ADDR memaddr,
5534 unsigned char *myaddr, int len)
da6d8c04 5535{
0bfdf32f 5536 int pid = lwpid_of (current_thread);
ae3e2ccf
SM
5537 PTRACE_XFER_TYPE *buffer;
5538 CORE_ADDR addr;
5539 int count;
4934b29e 5540 char filename[64];
ae3e2ccf 5541 int i;
4934b29e 5542 int ret;
fd462a61 5543 int fd;
fd462a61
DJ
5544
5545 /* Try using /proc. Don't bother for one word. */
5546 if (len >= 3 * sizeof (long))
5547 {
4934b29e
MR
5548 int bytes;
5549
fd462a61
DJ
5550 /* We could keep this file open and cache it - possibly one per
5551 thread. That requires some juggling, but is even faster. */
95954743 5552 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
5553 fd = open (filename, O_RDONLY | O_LARGEFILE);
5554 if (fd == -1)
5555 goto no_proc;
5556
5557 /* If pread64 is available, use it. It's faster if the kernel
5558 supports it (only one syscall), and it's 64-bit safe even on
5559 32-bit platforms (for instance, SPARC debugging a SPARC64
5560 application). */
5561#ifdef HAVE_PREAD64
4934b29e 5562 bytes = pread64 (fd, myaddr, len, memaddr);
fd462a61 5563#else
4934b29e
MR
5564 bytes = -1;
5565 if (lseek (fd, memaddr, SEEK_SET) != -1)
5566 bytes = read (fd, myaddr, len);
fd462a61 5567#endif
fd462a61
DJ
5568
5569 close (fd);
4934b29e
MR
5570 if (bytes == len)
5571 return 0;
5572
5573 /* Some data was read, we'll try to get the rest with ptrace. */
5574 if (bytes > 0)
5575 {
5576 memaddr += bytes;
5577 myaddr += bytes;
5578 len -= bytes;
5579 }
fd462a61 5580 }
da6d8c04 5581
fd462a61 5582 no_proc:
4934b29e
MR
5583 /* Round starting address down to longword boundary. */
5584 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5585 /* Round ending address up; get number of longwords that makes. */
5586 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5587 / sizeof (PTRACE_XFER_TYPE));
5588 /* Allocate buffer of that many longwords. */
8d749320 5589 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
4934b29e 5590
da6d8c04 5591 /* Read all the longwords */
4934b29e 5592 errno = 0;
da6d8c04
DJ
5593 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5594 {
14ce3065
DE
5595 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5596 about coercing an 8 byte integer to a 4 byte pointer. */
5597 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5598 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5599 (PTRACE_TYPE_ARG4) 0);
c3e735a6 5600 if (errno)
4934b29e 5601 break;
da6d8c04 5602 }
4934b29e 5603 ret = errno;
da6d8c04
DJ
5604
5605 /* Copy appropriate bytes out of the buffer. */
8d409d16
MR
5606 if (i > 0)
5607 {
5608 i *= sizeof (PTRACE_XFER_TYPE);
5609 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5610 memcpy (myaddr,
5611 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5612 i < len ? i : len);
5613 }
c3e735a6 5614
4934b29e 5615 return ret;
da6d8c04
DJ
5616}
5617
93ae6fdc
PA
5618/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5619 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 5620 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 5621
e2558df3
TBA
5622int
5623linux_process_target::write_memory (CORE_ADDR memaddr,
5624 const unsigned char *myaddr, int len)
da6d8c04 5625{
ae3e2ccf 5626 int i;
da6d8c04 5627 /* Round starting address down to longword boundary. */
ae3e2ccf 5628 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
da6d8c04 5629 /* Round ending address up; get number of longwords that makes. */
ae3e2ccf 5630 int count
493e2a69
MS
5631 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5632 / sizeof (PTRACE_XFER_TYPE);
5633
da6d8c04 5634 /* Allocate buffer of that many longwords. */
ae3e2ccf 5635 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
493e2a69 5636
0bfdf32f 5637 int pid = lwpid_of (current_thread);
da6d8c04 5638
f0ae6fc3
PA
5639 if (len == 0)
5640 {
5641 /* Zero length write always succeeds. */
5642 return 0;
5643 }
5644
0d62e5e8
DJ
5645 if (debug_threads)
5646 {
58d6951d 5647 /* Dump up to four bytes. */
bf47e248
PA
5648 char str[4 * 2 + 1];
5649 char *p = str;
5650 int dump = len < 4 ? len : 4;
5651
5652 for (i = 0; i < dump; i++)
5653 {
5654 sprintf (p, "%02x", myaddr[i]);
5655 p += 2;
5656 }
5657 *p = '\0';
5658
5659 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5660 str, (long) memaddr, pid);
0d62e5e8
DJ
5661 }
5662
da6d8c04
DJ
5663 /* Fill start and end extra bytes of buffer with existing memory data. */
5664
93ae6fdc 5665 errno = 0;
14ce3065
DE
5666 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5667 about coercing an 8 byte integer to a 4 byte pointer. */
5668 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5669 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5670 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5671 if (errno)
5672 return errno;
da6d8c04
DJ
5673
5674 if (count > 1)
5675 {
93ae6fdc 5676 errno = 0;
da6d8c04 5677 buffer[count - 1]
95954743 5678 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
5679 /* Coerce to a uintptr_t first to avoid potential gcc warning
5680 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5681 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
14ce3065 5682 * sizeof (PTRACE_XFER_TYPE)),
b8e1b30e 5683 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5684 if (errno)
5685 return errno;
da6d8c04
DJ
5686 }
5687
93ae6fdc 5688 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 5689
493e2a69
MS
5690 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5691 myaddr, len);
da6d8c04
DJ
5692
5693 /* Write the entire buffer. */
5694
5695 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5696 {
5697 errno = 0;
14ce3065
DE
5698 ptrace (PTRACE_POKETEXT, pid,
5699 /* Coerce to a uintptr_t first to avoid potential gcc warning
5700 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5701 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5702 (PTRACE_TYPE_ARG4) buffer[i]);
da6d8c04
DJ
5703 if (errno)
5704 return errno;
5705 }
5706
5707 return 0;
5708}
2f2893d9 5709
2a31c7aa
TBA
5710void
5711linux_process_target::look_up_symbols ()
2f2893d9 5712{
0d62e5e8 5713#ifdef USE_THREAD_DB
95954743
PA
5714 struct process_info *proc = current_process ();
5715
fe978cb0 5716 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5717 return;
5718
9b4c5f87 5719 thread_db_init ();
0d62e5e8
DJ
5720#endif
5721}
5722
eb497a2a
TBA
5723void
5724linux_process_target::request_interrupt ()
e5379b03 5725{
78708b7c
PA
5726 /* Send a SIGINT to the process group. This acts just like the user
5727 typed a ^C on the controlling terminal. */
eb497a2a 5728 ::kill (-signal_pid, SIGINT);
e5379b03
DJ
5729}
5730
eac215cc
TBA
5731bool
5732linux_process_target::supports_read_auxv ()
5733{
5734 return true;
5735}
5736
aa691b87
RM
5737/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5738 to debugger memory starting at MYADDR. */
5739
eac215cc
TBA
5740int
5741linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5742 unsigned int len)
aa691b87
RM
5743{
5744 char filename[PATH_MAX];
5745 int fd, n;
0bfdf32f 5746 int pid = lwpid_of (current_thread);
aa691b87 5747
6cebaf6e 5748 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5749
5750 fd = open (filename, O_RDONLY);
5751 if (fd < 0)
5752 return -1;
5753
5754 if (offset != (CORE_ADDR) 0
5755 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5756 n = -1;
5757 else
5758 n = read (fd, myaddr, len);
5759
5760 close (fd);
5761
5762 return n;
5763}
5764
7e0bde70
TBA
5765int
5766linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5767 int size, raw_breakpoint *bp)
e013ee27 5768{
c8f4bfdd
YQ
5769 if (type == raw_bkpt_type_sw)
5770 return insert_memory_breakpoint (bp);
e013ee27 5771 else
9db9aa23
TBA
5772 return low_insert_point (type, addr, size, bp);
5773}
5774
5775int
5776linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5777 int size, raw_breakpoint *bp)
5778{
5779 /* Unsupported (see target.h). */
5780 return 1;
e013ee27
OF
5781}
5782
7e0bde70
TBA
5783int
5784linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5785 int size, raw_breakpoint *bp)
e013ee27 5786{
c8f4bfdd
YQ
5787 if (type == raw_bkpt_type_sw)
5788 return remove_memory_breakpoint (bp);
e013ee27 5789 else
9db9aa23
TBA
5790 return low_remove_point (type, addr, size, bp);
5791}
5792
5793int
5794linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5795 int size, raw_breakpoint *bp)
5796{
5797 /* Unsupported (see target.h). */
5798 return 1;
e013ee27
OF
5799}
5800
84320c4e 5801/* Implement the stopped_by_sw_breakpoint target_ops
3e572f71
PA
5802 method. */
5803
84320c4e
TBA
5804bool
5805linux_process_target::stopped_by_sw_breakpoint ()
3e572f71
PA
5806{
5807 struct lwp_info *lwp = get_thread_lwp (current_thread);
5808
5809 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5810}
5811
84320c4e 5812/* Implement the supports_stopped_by_sw_breakpoint target_ops
3e572f71
PA
5813 method. */
5814
84320c4e
TBA
5815bool
5816linux_process_target::supports_stopped_by_sw_breakpoint ()
3e572f71
PA
5817{
5818 return USE_SIGTRAP_SIGINFO;
5819}
5820
93fe88b2 5821/* Implement the stopped_by_hw_breakpoint target_ops
3e572f71
PA
5822 method. */
5823
93fe88b2
TBA
5824bool
5825linux_process_target::stopped_by_hw_breakpoint ()
3e572f71
PA
5826{
5827 struct lwp_info *lwp = get_thread_lwp (current_thread);
5828
5829 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5830}
5831
93fe88b2 5832/* Implement the supports_stopped_by_hw_breakpoint target_ops
3e572f71
PA
5833 method. */
5834
93fe88b2
TBA
5835bool
5836linux_process_target::supports_stopped_by_hw_breakpoint ()
3e572f71
PA
5837{
5838 return USE_SIGTRAP_SIGINFO;
5839}
5840
70b90b91 5841/* Implement the supports_hardware_single_step target_ops method. */
45614f15 5842
22aa6223
TBA
5843bool
5844linux_process_target::supports_hardware_single_step ()
45614f15 5845{
b31cdfa6 5846 return true;
45614f15
YQ
5847}
5848
6eeb5c55
TBA
5849bool
5850linux_process_target::stopped_by_watchpoint ()
e013ee27 5851{
0bfdf32f 5852 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 5853
15c66dd6 5854 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
5855}
5856
6eeb5c55
TBA
5857CORE_ADDR
5858linux_process_target::stopped_data_address ()
e013ee27 5859{
0bfdf32f 5860 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
5861
5862 return lwp->stopped_data_address;
e013ee27
OF
5863}
5864
db0dfaa0
LM
5865/* This is only used for targets that define PT_TEXT_ADDR,
5866 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5867 the target has different ways of acquiring this information, like
5868 loadmaps. */
52fb6437 5869
5203ae1e
TBA
5870bool
5871linux_process_target::supports_read_offsets ()
5872{
5873#ifdef SUPPORTS_READ_OFFSETS
5874 return true;
5875#else
5876 return false;
5877#endif
5878}
5879
52fb6437
NS
5880/* Under uClinux, programs are loaded at non-zero offsets, which we need
5881 to tell gdb about. */
5882
5203ae1e
TBA
5883int
5884linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
52fb6437 5885{
5203ae1e 5886#ifdef SUPPORTS_READ_OFFSETS
52fb6437 5887 unsigned long text, text_end, data;
62828379 5888 int pid = lwpid_of (current_thread);
52fb6437
NS
5889
5890 errno = 0;
5891
b8e1b30e
LM
5892 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5893 (PTRACE_TYPE_ARG4) 0);
5894 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5895 (PTRACE_TYPE_ARG4) 0);
5896 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5897 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
5898
5899 if (errno == 0)
5900 {
5901 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
5902 used by gdb) are relative to the beginning of the program,
5903 with the data segment immediately following the text segment.
5904 However, the actual runtime layout in memory may put the data
5905 somewhere else, so when we send gdb a data base-address, we
5906 use the real data base address and subtract the compile-time
5907 data base-address from it (which is just the length of the
5908 text segment). BSS immediately follows data in both
5909 cases. */
52fb6437
NS
5910 *text_p = text;
5911 *data_p = data - (text_end - text);
1b3f6016 5912
52fb6437
NS
5913 return 1;
5914 }
5203ae1e
TBA
5915 return 0;
5916#else
5917 gdb_assert_not_reached ("target op read_offsets not supported");
52fb6437 5918#endif
5203ae1e 5919}
52fb6437 5920
6e3fd7e9
TBA
5921bool
5922linux_process_target::supports_get_tls_address ()
5923{
5924#ifdef USE_THREAD_DB
5925 return true;
5926#else
5927 return false;
5928#endif
5929}
5930
5931int
5932linux_process_target::get_tls_address (thread_info *thread,
5933 CORE_ADDR offset,
5934 CORE_ADDR load_module,
5935 CORE_ADDR *address)
5936{
5937#ifdef USE_THREAD_DB
5938 return thread_db_get_tls_address (thread, offset, load_module, address);
5939#else
5940 return -1;
5941#endif
5942}
5943
2d0795ee
TBA
5944bool
5945linux_process_target::supports_qxfer_osdata ()
5946{
5947 return true;
5948}
5949
5950int
5951linux_process_target::qxfer_osdata (const char *annex,
5952 unsigned char *readbuf,
5953 unsigned const char *writebuf,
5954 CORE_ADDR offset, int len)
07e059b5 5955{
d26e3629 5956 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
5957}
5958
cb63de7c
TBA
5959void
5960linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5961 gdb_byte *inf_siginfo, int direction)
d0722149 5962{
cb63de7c 5963 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
d0722149
DE
5964
5965 /* If there was no callback, or the callback didn't do anything,
5966 then just do a straight memcpy. */
5967 if (!done)
5968 {
5969 if (direction == 1)
a5362b9a 5970 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 5971 else
a5362b9a 5972 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
5973 }
5974}
5975
cb63de7c
TBA
5976bool
5977linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5978 int direction)
5979{
5980 return false;
5981}
5982
d7abedf7
TBA
5983bool
5984linux_process_target::supports_qxfer_siginfo ()
5985{
5986 return true;
5987}
5988
5989int
5990linux_process_target::qxfer_siginfo (const char *annex,
5991 unsigned char *readbuf,
5992 unsigned const char *writebuf,
5993 CORE_ADDR offset, int len)
4aa995e1 5994{
d0722149 5995 int pid;
a5362b9a 5996 siginfo_t siginfo;
8adce034 5997 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1 5998
0bfdf32f 5999 if (current_thread == NULL)
4aa995e1
PA
6000 return -1;
6001
0bfdf32f 6002 pid = lwpid_of (current_thread);
4aa995e1
PA
6003
6004 if (debug_threads)
87ce2a04
DE
6005 debug_printf ("%s siginfo for lwp %d.\n",
6006 readbuf != NULL ? "Reading" : "Writing",
6007 pid);
4aa995e1 6008
0adea5f7 6009 if (offset >= sizeof (siginfo))
4aa995e1
PA
6010 return -1;
6011
b8e1b30e 6012 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
6013 return -1;
6014
d0722149
DE
6015 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6016 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6017 inferior with a 64-bit GDBSERVER should look the same as debugging it
6018 with a 32-bit GDBSERVER, we need to convert it. */
6019 siginfo_fixup (&siginfo, inf_siginfo, 0);
6020
4aa995e1
PA
6021 if (offset + len > sizeof (siginfo))
6022 len = sizeof (siginfo) - offset;
6023
6024 if (readbuf != NULL)
d0722149 6025 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
6026 else
6027 {
d0722149
DE
6028 memcpy (inf_siginfo + offset, writebuf, len);
6029
6030 /* Convert back to ptrace layout before flushing it out. */
6031 siginfo_fixup (&siginfo, inf_siginfo, 1);
6032
b8e1b30e 6033 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
6034 return -1;
6035 }
6036
6037 return len;
6038}
6039
bd99dc85
PA
6040/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6041 so we notice when children change state; as the handler for the
6042 sigsuspend in my_waitpid. */
6043
6044static void
6045sigchld_handler (int signo)
6046{
6047 int old_errno = errno;
6048
6049 if (debug_threads)
e581f2b4
PA
6050 {
6051 do
6052 {
a7e559cc
AH
6053 /* Use the async signal safe debug function. */
6054 if (debug_write ("sigchld_handler\n",
6055 sizeof ("sigchld_handler\n") - 1) < 0)
e581f2b4
PA
6056 break; /* just ignore */
6057 } while (0);
6058 }
bd99dc85
PA
6059
6060 if (target_is_async_p ())
6061 async_file_mark (); /* trigger a linux_wait */
6062
6063 errno = old_errno;
6064}
6065
0dc587d4
TBA
6066bool
6067linux_process_target::supports_non_stop ()
bd99dc85 6068{
0dc587d4 6069 return true;
bd99dc85
PA
6070}
6071
0dc587d4
TBA
6072bool
6073linux_process_target::async (bool enable)
bd99dc85 6074{
0dc587d4 6075 bool previous = target_is_async_p ();
bd99dc85 6076
8336d594 6077 if (debug_threads)
87ce2a04
DE
6078 debug_printf ("linux_async (%d), previous=%d\n",
6079 enable, previous);
8336d594 6080
bd99dc85
PA
6081 if (previous != enable)
6082 {
6083 sigset_t mask;
6084 sigemptyset (&mask);
6085 sigaddset (&mask, SIGCHLD);
6086
21987b9c 6087 gdb_sigmask (SIG_BLOCK, &mask, NULL);
bd99dc85
PA
6088
6089 if (enable)
6090 {
6091 if (pipe (linux_event_pipe) == -1)
aa96c426
GB
6092 {
6093 linux_event_pipe[0] = -1;
6094 linux_event_pipe[1] = -1;
21987b9c 6095 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
aa96c426
GB
6096
6097 warning ("creating event pipe failed.");
6098 return previous;
6099 }
bd99dc85
PA
6100
6101 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6102 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6103
6104 /* Register the event loop handler. */
6105 add_file_handler (linux_event_pipe[0],
2554f6f5
SM
6106 handle_target_event, NULL,
6107 "linux-low");
bd99dc85
PA
6108
6109 /* Always trigger a linux_wait. */
6110 async_file_mark ();
6111 }
6112 else
6113 {
6114 delete_file_handler (linux_event_pipe[0]);
6115
6116 close (linux_event_pipe[0]);
6117 close (linux_event_pipe[1]);
6118 linux_event_pipe[0] = -1;
6119 linux_event_pipe[1] = -1;
6120 }
6121
21987b9c 6122 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
bd99dc85
PA
6123 }
6124
6125 return previous;
6126}
6127
0dc587d4
TBA
6128int
6129linux_process_target::start_non_stop (bool nonstop)
bd99dc85
PA
6130{
6131 /* Register or unregister from event-loop accordingly. */
0dc587d4 6132 target_async (nonstop);
aa96c426 6133
0dc587d4 6134 if (target_is_async_p () != (nonstop != false))
aa96c426
GB
6135 return -1;
6136
bd99dc85
PA
6137 return 0;
6138}
6139
652aef77
TBA
6140bool
6141linux_process_target::supports_multi_process ()
cf8fd78b 6142{
652aef77 6143 return true;
cf8fd78b
PA
6144}
6145
89245bc0
DB
6146/* Check if fork events are supported. */
6147
9690a72a
TBA
6148bool
6149linux_process_target::supports_fork_events ()
89245bc0
DB
6150{
6151 return linux_supports_tracefork ();
6152}
6153
6154/* Check if vfork events are supported. */
6155
9690a72a
TBA
6156bool
6157linux_process_target::supports_vfork_events ()
89245bc0
DB
6158{
6159 return linux_supports_tracefork ();
6160}
6161
94585166
DB
6162/* Check if exec events are supported. */
6163
9690a72a
TBA
6164bool
6165linux_process_target::supports_exec_events ()
94585166
DB
6166{
6167 return linux_supports_traceexec ();
6168}
6169
de0d863e
DB
6170/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6171 ptrace flags for all inferiors. This is in case the new GDB connection
6172 doesn't support the same set of events that the previous one did. */
6173
fb00dfce
TBA
6174void
6175linux_process_target::handle_new_gdb_connection ()
de0d863e 6176{
de0d863e 6177 /* Request that all the lwps reset their ptrace options. */
bbf550d5
SM
6178 for_each_thread ([] (thread_info *thread)
6179 {
6180 struct lwp_info *lwp = get_thread_lwp (thread);
6181
6182 if (!lwp->stopped)
6183 {
6184 /* Stop the lwp so we can modify its ptrace options. */
6185 lwp->must_set_ptrace_flags = 1;
6186 linux_stop_lwp (lwp);
6187 }
6188 else
6189 {
6190 /* Already stopped; go ahead and set the ptrace options. */
6191 struct process_info *proc = find_process_pid (pid_of (thread));
6192 int options = linux_low_ptrace_options (proc->attached);
6193
6194 linux_enable_event_reporting (lwpid_of (thread), options);
6195 lwp->must_set_ptrace_flags = 0;
6196 }
6197 });
de0d863e
DB
6198}
6199
55cf3021
TBA
6200int
6201linux_process_target::handle_monitor_command (char *mon)
6202{
6203#ifdef USE_THREAD_DB
6204 return thread_db_handle_monitor_command (mon);
6205#else
6206 return 0;
6207#endif
6208}
6209
95a45fc1
TBA
6210int
6211linux_process_target::core_of_thread (ptid_t ptid)
6212{
6213 return linux_common_core_of_thread (ptid);
6214}
6215
c756403b
TBA
6216bool
6217linux_process_target::supports_disable_randomization ()
03583c20 6218{
c756403b 6219 return true;
03583c20 6220}
efcbbd14 6221
c0245cb9
TBA
6222bool
6223linux_process_target::supports_agent ()
d1feda86 6224{
c0245cb9 6225 return true;
d1feda86
YQ
6226}
6227
2526e0cd
TBA
6228bool
6229linux_process_target::supports_range_stepping ()
c2d6af84 6230{
7582c77c 6231 if (supports_software_single_step ())
2526e0cd 6232 return true;
c2d6af84 6233
9cfd8715
TBA
6234 return low_supports_range_stepping ();
6235}
6236
6237bool
6238linux_process_target::low_supports_range_stepping ()
6239{
6240 return false;
c2d6af84
PA
6241}
6242
8247b823
TBA
6243bool
6244linux_process_target::supports_pid_to_exec_file ()
6245{
6246 return true;
6247}
6248
04977957 6249const char *
8247b823
TBA
6250linux_process_target::pid_to_exec_file (int pid)
6251{
6252 return linux_proc_pid_to_exec_file (pid);
6253}
6254
c9b7b804
TBA
6255bool
6256linux_process_target::supports_multifs ()
6257{
6258 return true;
6259}
6260
6261int
6262linux_process_target::multifs_open (int pid, const char *filename,
6263 int flags, mode_t mode)
6264{
6265 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6266}
6267
6268int
6269linux_process_target::multifs_unlink (int pid, const char *filename)
6270{
6271 return linux_mntns_unlink (pid, filename);
6272}
6273
6274ssize_t
6275linux_process_target::multifs_readlink (int pid, const char *filename,
6276 char *buf, size_t bufsiz)
6277{
6278 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6279}
6280
723b724b 6281#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
6282struct target_loadseg
6283{
6284 /* Core address to which the segment is mapped. */
6285 Elf32_Addr addr;
6286 /* VMA recorded in the program header. */
6287 Elf32_Addr p_vaddr;
6288 /* Size of this segment in memory. */
6289 Elf32_Word p_memsz;
6290};
6291
723b724b 6292# if defined PT_GETDSBT
78d85199
YQ
6293struct target_loadmap
6294{
6295 /* Protocol version number, must be zero. */
6296 Elf32_Word version;
6297 /* Pointer to the DSBT table, its size, and the DSBT index. */
6298 unsigned *dsbt_table;
6299 unsigned dsbt_size, dsbt_index;
6300 /* Number of segments in this map. */
6301 Elf32_Word nsegs;
6302 /* The actual memory map. */
6303 struct target_loadseg segs[/*nsegs*/];
6304};
723b724b
MF
6305# define LINUX_LOADMAP PT_GETDSBT
6306# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6307# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6308# else
6309struct target_loadmap
6310{
6311 /* Protocol version number, must be zero. */
6312 Elf32_Half version;
6313 /* Number of segments in this map. */
6314 Elf32_Half nsegs;
6315 /* The actual memory map. */
6316 struct target_loadseg segs[/*nsegs*/];
6317};
6318# define LINUX_LOADMAP PTRACE_GETFDPIC
6319# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6320# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6321# endif
78d85199 6322
9da41fda
TBA
6323bool
6324linux_process_target::supports_read_loadmap ()
6325{
6326 return true;
6327}
6328
6329int
6330linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6331 unsigned char *myaddr, unsigned int len)
78d85199 6332{
0bfdf32f 6333 int pid = lwpid_of (current_thread);
78d85199
YQ
6334 int addr = -1;
6335 struct target_loadmap *data = NULL;
6336 unsigned int actual_length, copy_length;
6337
6338 if (strcmp (annex, "exec") == 0)
723b724b 6339 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 6340 else if (strcmp (annex, "interp") == 0)
723b724b 6341 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
6342 else
6343 return -1;
6344
723b724b 6345 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
6346 return -1;
6347
6348 if (data == NULL)
6349 return -1;
6350
6351 actual_length = sizeof (struct target_loadmap)
6352 + sizeof (struct target_loadseg) * data->nsegs;
6353
6354 if (offset < 0 || offset > actual_length)
6355 return -1;
6356
6357 copy_length = actual_length - offset < len ? actual_length - offset : len;
6358 memcpy (myaddr, (char *) data + offset, copy_length);
6359 return copy_length;
6360}
723b724b 6361#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 6362
bc8d3ae4
TBA
6363bool
6364linux_process_target::supports_catch_syscall ()
82075af2 6365{
9eedd27d 6366 return (low_supports_catch_syscall ()
82075af2
JS
6367 && linux_supports_tracesysgood ());
6368}
6369
9eedd27d
TBA
6370bool
6371linux_process_target::low_supports_catch_syscall ()
6372{
6373 return false;
6374}
6375
770d8f6a
TBA
6376CORE_ADDR
6377linux_process_target::read_pc (regcache *regcache)
219f2f23 6378{
bf9ae9d8 6379 if (!low_supports_breakpoints ())
219f2f23
PA
6380 return 0;
6381
bf9ae9d8 6382 return low_get_pc (regcache);
219f2f23
PA
6383}
6384
770d8f6a
TBA
6385void
6386linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
219f2f23 6387{
bf9ae9d8 6388 gdb_assert (low_supports_breakpoints ());
219f2f23 6389
bf9ae9d8 6390 low_set_pc (regcache, pc);
219f2f23
PA
6391}
6392
68119632
TBA
6393bool
6394linux_process_target::supports_thread_stopped ()
6395{
6396 return true;
6397}
6398
6399bool
6400linux_process_target::thread_stopped (thread_info *thread)
8336d594
PA
6401{
6402 return get_thread_lwp (thread)->stopped;
6403}
6404
6405/* This exposes stop-all-threads functionality to other modules. */
6406
29e8dc09
TBA
6407void
6408linux_process_target::pause_all (bool freeze)
8336d594 6409{
7984d532
PA
6410 stop_all_lwps (freeze, NULL);
6411}
6412
6413/* This exposes unstop-all-threads functionality to other gdbserver
6414 modules. */
6415
29e8dc09
TBA
6416void
6417linux_process_target::unpause_all (bool unfreeze)
7984d532
PA
6418{
6419 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
6420}
6421
79b44087
TBA
6422int
6423linux_process_target::prepare_to_access_memory ()
90d74c30
PA
6424{
6425 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6426 running LWP. */
6427 if (non_stop)
29e8dc09 6428 target_pause_all (true);
90d74c30
PA
6429 return 0;
6430}
6431
79b44087
TBA
6432void
6433linux_process_target::done_accessing_memory ()
90d74c30
PA
6434{
6435 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6436 running LWP. */
6437 if (non_stop)
29e8dc09 6438 target_unpause_all (true);
90d74c30
PA
6439}
6440
2268b414
JK
6441/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6442
6443static int
6444get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6445 CORE_ADDR *phdr_memaddr, int *num_phdr)
6446{
6447 char filename[PATH_MAX];
6448 int fd;
6449 const int auxv_size = is_elf64
6450 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6451 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6452
6453 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6454
6455 fd = open (filename, O_RDONLY);
6456 if (fd < 0)
6457 return 1;
6458
6459 *phdr_memaddr = 0;
6460 *num_phdr = 0;
6461 while (read (fd, buf, auxv_size) == auxv_size
6462 && (*phdr_memaddr == 0 || *num_phdr == 0))
6463 {
6464 if (is_elf64)
6465 {
6466 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6467
6468 switch (aux->a_type)
6469 {
6470 case AT_PHDR:
6471 *phdr_memaddr = aux->a_un.a_val;
6472 break;
6473 case AT_PHNUM:
6474 *num_phdr = aux->a_un.a_val;
6475 break;
6476 }
6477 }
6478 else
6479 {
6480 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6481
6482 switch (aux->a_type)
6483 {
6484 case AT_PHDR:
6485 *phdr_memaddr = aux->a_un.a_val;
6486 break;
6487 case AT_PHNUM:
6488 *num_phdr = aux->a_un.a_val;
6489 break;
6490 }
6491 }
6492 }
6493
6494 close (fd);
6495
6496 if (*phdr_memaddr == 0 || *num_phdr == 0)
6497 {
6498 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6499 "phdr_memaddr = %ld, phdr_num = %d",
6500 (long) *phdr_memaddr, *num_phdr);
6501 return 2;
6502 }
6503
6504 return 0;
6505}
6506
6507/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6508
6509static CORE_ADDR
6510get_dynamic (const int pid, const int is_elf64)
6511{
6512 CORE_ADDR phdr_memaddr, relocation;
db1ff28b 6513 int num_phdr, i;
2268b414 6514 unsigned char *phdr_buf;
db1ff28b 6515 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
2268b414
JK
6516
6517 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6518 return 0;
6519
6520 gdb_assert (num_phdr < 100); /* Basic sanity check. */
224c3ddb 6521 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
2268b414
JK
6522
6523 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6524 return 0;
6525
6526 /* Compute relocation: it is expected to be 0 for "regular" executables,
6527 non-zero for PIE ones. */
6528 relocation = -1;
db1ff28b
JK
6529 for (i = 0; relocation == -1 && i < num_phdr; i++)
6530 if (is_elf64)
6531 {
6532 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6533
6534 if (p->p_type == PT_PHDR)
6535 relocation = phdr_memaddr - p->p_vaddr;
6536 }
6537 else
6538 {
6539 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6540
6541 if (p->p_type == PT_PHDR)
6542 relocation = phdr_memaddr - p->p_vaddr;
6543 }
6544
2268b414
JK
6545 if (relocation == -1)
6546 {
e237a7e2
JK
6547 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6548 any real world executables, including PIE executables, have always
6549 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6550 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6551 or present DT_DEBUG anyway (fpc binaries are statically linked).
6552
6553 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6554
6555 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6556
2268b414
JK
6557 return 0;
6558 }
6559
db1ff28b
JK
6560 for (i = 0; i < num_phdr; i++)
6561 {
6562 if (is_elf64)
6563 {
6564 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6565
6566 if (p->p_type == PT_DYNAMIC)
6567 return p->p_vaddr + relocation;
6568 }
6569 else
6570 {
6571 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
2268b414 6572
db1ff28b
JK
6573 if (p->p_type == PT_DYNAMIC)
6574 return p->p_vaddr + relocation;
6575 }
6576 }
2268b414
JK
6577
6578 return 0;
6579}
6580
6581/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
6582 can be 0 if the inferior does not yet have the library list initialized.
6583 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6584 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
6585
6586static CORE_ADDR
6587get_r_debug (const int pid, const int is_elf64)
6588{
6589 CORE_ADDR dynamic_memaddr;
6590 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6591 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 6592 CORE_ADDR map = -1;
2268b414
JK
6593
6594 dynamic_memaddr = get_dynamic (pid, is_elf64);
6595 if (dynamic_memaddr == 0)
367ba2c2 6596 return map;
2268b414
JK
6597
6598 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6599 {
6600 if (is_elf64)
6601 {
6602 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
a738da3a 6603#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6604 union
6605 {
6606 Elf64_Xword map;
6607 unsigned char buf[sizeof (Elf64_Xword)];
6608 }
6609 rld_map;
a738da3a
MF
6610#endif
6611#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6612 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6613 {
6614 if (linux_read_memory (dyn->d_un.d_val,
6615 rld_map.buf, sizeof (rld_map.buf)) == 0)
6616 return rld_map.map;
6617 else
6618 break;
6619 }
75f62ce7 6620#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6621#ifdef DT_MIPS_RLD_MAP_REL
6622 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6623 {
6624 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6625 rld_map.buf, sizeof (rld_map.buf)) == 0)
6626 return rld_map.map;
6627 else
6628 break;
6629 }
6630#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6631
367ba2c2
MR
6632 if (dyn->d_tag == DT_DEBUG && map == -1)
6633 map = dyn->d_un.d_val;
2268b414
JK
6634
6635 if (dyn->d_tag == DT_NULL)
6636 break;
6637 }
6638 else
6639 {
6640 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
a738da3a 6641#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6642 union
6643 {
6644 Elf32_Word map;
6645 unsigned char buf[sizeof (Elf32_Word)];
6646 }
6647 rld_map;
a738da3a
MF
6648#endif
6649#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6650 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6651 {
6652 if (linux_read_memory (dyn->d_un.d_val,
6653 rld_map.buf, sizeof (rld_map.buf)) == 0)
6654 return rld_map.map;
6655 else
6656 break;
6657 }
75f62ce7 6658#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6659#ifdef DT_MIPS_RLD_MAP_REL
6660 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6661 {
6662 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6663 rld_map.buf, sizeof (rld_map.buf)) == 0)
6664 return rld_map.map;
6665 else
6666 break;
6667 }
6668#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6669
367ba2c2
MR
6670 if (dyn->d_tag == DT_DEBUG && map == -1)
6671 map = dyn->d_un.d_val;
2268b414
JK
6672
6673 if (dyn->d_tag == DT_NULL)
6674 break;
6675 }
6676
6677 dynamic_memaddr += dyn_size;
6678 }
6679
367ba2c2 6680 return map;
2268b414
JK
6681}
6682
6683/* Read one pointer from MEMADDR in the inferior. */
6684
6685static int
6686read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6687{
485f1ee4
PA
6688 int ret;
6689
6690 /* Go through a union so this works on either big or little endian
6691 hosts, when the inferior's pointer size is smaller than the size
6692 of CORE_ADDR. It is assumed the inferior's endianness is the
6693 same of the superior's. */
6694 union
6695 {
6696 CORE_ADDR core_addr;
6697 unsigned int ui;
6698 unsigned char uc;
6699 } addr;
6700
6701 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6702 if (ret == 0)
6703 {
6704 if (ptr_size == sizeof (CORE_ADDR))
6705 *ptr = addr.core_addr;
6706 else if (ptr_size == sizeof (unsigned int))
6707 *ptr = addr.ui;
6708 else
6709 gdb_assert_not_reached ("unhandled pointer size");
6710 }
6711 return ret;
2268b414
JK
6712}
6713
974387bb
TBA
6714bool
6715linux_process_target::supports_qxfer_libraries_svr4 ()
6716{
6717 return true;
6718}
6719
2268b414
JK
6720struct link_map_offsets
6721 {
6722 /* Offset and size of r_debug.r_version. */
6723 int r_version_offset;
6724
6725 /* Offset and size of r_debug.r_map. */
6726 int r_map_offset;
6727
6728 /* Offset to l_addr field in struct link_map. */
6729 int l_addr_offset;
6730
6731 /* Offset to l_name field in struct link_map. */
6732 int l_name_offset;
6733
6734 /* Offset to l_ld field in struct link_map. */
6735 int l_ld_offset;
6736
6737 /* Offset to l_next field in struct link_map. */
6738 int l_next_offset;
6739
6740 /* Offset to l_prev field in struct link_map. */
6741 int l_prev_offset;
6742 };
6743
fb723180 6744/* Construct qXfer:libraries-svr4:read reply. */
2268b414 6745
974387bb
TBA
6746int
6747linux_process_target::qxfer_libraries_svr4 (const char *annex,
6748 unsigned char *readbuf,
6749 unsigned const char *writebuf,
6750 CORE_ADDR offset, int len)
2268b414 6751{
fe978cb0 6752 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
6753 char filename[PATH_MAX];
6754 int pid, is_elf64;
6755
6756 static const struct link_map_offsets lmo_32bit_offsets =
6757 {
6758 0, /* r_version offset. */
6759 4, /* r_debug.r_map offset. */
6760 0, /* l_addr offset in link_map. */
6761 4, /* l_name offset in link_map. */
6762 8, /* l_ld offset in link_map. */
6763 12, /* l_next offset in link_map. */
6764 16 /* l_prev offset in link_map. */
6765 };
6766
6767 static const struct link_map_offsets lmo_64bit_offsets =
6768 {
6769 0, /* r_version offset. */
6770 8, /* r_debug.r_map offset. */
6771 0, /* l_addr offset in link_map. */
6772 8, /* l_name offset in link_map. */
6773 16, /* l_ld offset in link_map. */
6774 24, /* l_next offset in link_map. */
6775 32 /* l_prev offset in link_map. */
6776 };
6777 const struct link_map_offsets *lmo;
214d508e 6778 unsigned int machine;
b1fbec62
GB
6779 int ptr_size;
6780 CORE_ADDR lm_addr = 0, lm_prev = 0;
b1fbec62
GB
6781 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6782 int header_done = 0;
2268b414
JK
6783
6784 if (writebuf != NULL)
6785 return -2;
6786 if (readbuf == NULL)
6787 return -1;
6788
0bfdf32f 6789 pid = lwpid_of (current_thread);
2268b414 6790 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 6791 is_elf64 = elf_64_file_p (filename, &machine);
2268b414 6792 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
b1fbec62 6793 ptr_size = is_elf64 ? 8 : 4;
2268b414 6794
b1fbec62
GB
6795 while (annex[0] != '\0')
6796 {
6797 const char *sep;
6798 CORE_ADDR *addrp;
da4ae14a 6799 int name_len;
2268b414 6800
b1fbec62
GB
6801 sep = strchr (annex, '=');
6802 if (sep == NULL)
6803 break;
0c5bf5a9 6804
da4ae14a
TT
6805 name_len = sep - annex;
6806 if (name_len == 5 && startswith (annex, "start"))
b1fbec62 6807 addrp = &lm_addr;
da4ae14a 6808 else if (name_len == 4 && startswith (annex, "prev"))
b1fbec62
GB
6809 addrp = &lm_prev;
6810 else
6811 {
6812 annex = strchr (sep, ';');
6813 if (annex == NULL)
6814 break;
6815 annex++;
6816 continue;
6817 }
6818
6819 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 6820 }
b1fbec62
GB
6821
6822 if (lm_addr == 0)
2268b414 6823 {
b1fbec62
GB
6824 int r_version = 0;
6825
6826 if (priv->r_debug == 0)
6827 priv->r_debug = get_r_debug (pid, is_elf64);
6828
6829 /* We failed to find DT_DEBUG. Such situation will not change
6830 for this inferior - do not retry it. Report it to GDB as
6831 E01, see for the reasons at the GDB solib-svr4.c side. */
6832 if (priv->r_debug == (CORE_ADDR) -1)
6833 return -1;
6834
6835 if (priv->r_debug != 0)
2268b414 6836 {
b1fbec62
GB
6837 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6838 (unsigned char *) &r_version,
6839 sizeof (r_version)) != 0
4eb629d5 6840 || r_version < 1)
b1fbec62
GB
6841 {
6842 warning ("unexpected r_debug version %d", r_version);
6843 }
6844 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6845 &lm_addr, ptr_size) != 0)
6846 {
6847 warning ("unable to read r_map from 0x%lx",
6848 (long) priv->r_debug + lmo->r_map_offset);
6849 }
2268b414 6850 }
b1fbec62 6851 }
2268b414 6852
f6e8a41e 6853 std::string document = "<library-list-svr4 version=\"1.0\"";
b1fbec62
GB
6854
6855 while (lm_addr
6856 && read_one_ptr (lm_addr + lmo->l_name_offset,
6857 &l_name, ptr_size) == 0
6858 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6859 &l_addr, ptr_size) == 0
6860 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6861 &l_ld, ptr_size) == 0
6862 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6863 &l_prev, ptr_size) == 0
6864 && read_one_ptr (lm_addr + lmo->l_next_offset,
6865 &l_next, ptr_size) == 0)
6866 {
6867 unsigned char libname[PATH_MAX];
6868
6869 if (lm_prev != l_prev)
2268b414 6870 {
b1fbec62
GB
6871 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6872 (long) lm_prev, (long) l_prev);
6873 break;
2268b414
JK
6874 }
6875
d878444c
JK
6876 /* Ignore the first entry even if it has valid name as the first entry
6877 corresponds to the main executable. The first entry should not be
6878 skipped if the dynamic loader was loaded late by a static executable
6879 (see solib-svr4.c parameter ignore_first). But in such case the main
6880 executable does not have PT_DYNAMIC present and this function already
6881 exited above due to failed get_r_debug. */
6882 if (lm_prev == 0)
f6e8a41e 6883 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
d878444c
JK
6884 else
6885 {
6886 /* Not checking for error because reading may stop before
6887 we've got PATH_MAX worth of characters. */
6888 libname[0] = '\0';
6889 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6890 libname[sizeof (libname) - 1] = '\0';
6891 if (libname[0] != '\0')
2268b414 6892 {
d878444c
JK
6893 if (!header_done)
6894 {
6895 /* Terminate `<library-list-svr4'. */
f6e8a41e 6896 document += '>';
d878444c
JK
6897 header_done = 1;
6898 }
2268b414 6899
e6a58aa8
SM
6900 string_appendf (document, "<library name=\"");
6901 xml_escape_text_append (&document, (char *) libname);
6902 string_appendf (document, "\" lm=\"0x%lx\" "
f6e8a41e 6903 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
e6a58aa8
SM
6904 (unsigned long) lm_addr, (unsigned long) l_addr,
6905 (unsigned long) l_ld);
d878444c 6906 }
0afae3cf 6907 }
b1fbec62
GB
6908
6909 lm_prev = lm_addr;
6910 lm_addr = l_next;
2268b414
JK
6911 }
6912
b1fbec62
GB
6913 if (!header_done)
6914 {
6915 /* Empty list; terminate `<library-list-svr4'. */
f6e8a41e 6916 document += "/>";
b1fbec62
GB
6917 }
6918 else
f6e8a41e 6919 document += "</library-list-svr4>";
b1fbec62 6920
f6e8a41e 6921 int document_len = document.length ();
2268b414
JK
6922 if (offset < document_len)
6923 document_len -= offset;
6924 else
6925 document_len = 0;
6926 if (len > document_len)
6927 len = document_len;
6928
f6e8a41e 6929 memcpy (readbuf, document.data () + offset, len);
2268b414
JK
6930
6931 return len;
6932}
6933
9accd112
MM
6934#ifdef HAVE_LINUX_BTRACE
6935
79597bdd
TBA
6936btrace_target_info *
6937linux_process_target::enable_btrace (ptid_t ptid,
6938 const btrace_config *conf)
6939{
6940 return linux_enable_btrace (ptid, conf);
6941}
6942
969c39fb 6943/* See to_disable_btrace target method. */
9accd112 6944
79597bdd
TBA
6945int
6946linux_process_target::disable_btrace (btrace_target_info *tinfo)
969c39fb
MM
6947{
6948 enum btrace_error err;
6949
6950 err = linux_disable_btrace (tinfo);
6951 return (err == BTRACE_ERR_NONE ? 0 : -1);
6952}
6953
bc504a31 6954/* Encode an Intel Processor Trace configuration. */
b20a6524
MM
6955
6956static void
6957linux_low_encode_pt_config (struct buffer *buffer,
6958 const struct btrace_data_pt_config *config)
6959{
6960 buffer_grow_str (buffer, "<pt-config>\n");
6961
6962 switch (config->cpu.vendor)
6963 {
6964 case CV_INTEL:
6965 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6966 "model=\"%u\" stepping=\"%u\"/>\n",
6967 config->cpu.family, config->cpu.model,
6968 config->cpu.stepping);
6969 break;
6970
6971 default:
6972 break;
6973 }
6974
6975 buffer_grow_str (buffer, "</pt-config>\n");
6976}
6977
6978/* Encode a raw buffer. */
6979
6980static void
6981linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6982 unsigned int size)
6983{
6984 if (size == 0)
6985 return;
6986
268a13a5 6987 /* We use hex encoding - see gdbsupport/rsp-low.h. */
b20a6524
MM
6988 buffer_grow_str (buffer, "<raw>\n");
6989
6990 while (size-- > 0)
6991 {
6992 char elem[2];
6993
6994 elem[0] = tohex ((*data >> 4) & 0xf);
6995 elem[1] = tohex (*data++ & 0xf);
6996
6997 buffer_grow (buffer, elem, 2);
6998 }
6999
7000 buffer_grow_str (buffer, "</raw>\n");
7001}
7002
969c39fb
MM
7003/* See to_read_btrace target method. */
7004
79597bdd
TBA
7005int
7006linux_process_target::read_btrace (btrace_target_info *tinfo,
7007 buffer *buffer,
7008 enum btrace_read_type type)
9accd112 7009{
734b0e4b 7010 struct btrace_data btrace;
969c39fb 7011 enum btrace_error err;
9accd112 7012
969c39fb
MM
7013 err = linux_read_btrace (&btrace, tinfo, type);
7014 if (err != BTRACE_ERR_NONE)
7015 {
7016 if (err == BTRACE_ERR_OVERFLOW)
7017 buffer_grow_str0 (buffer, "E.Overflow.");
7018 else
7019 buffer_grow_str0 (buffer, "E.Generic Error.");
7020
8dcc53b3 7021 return -1;
969c39fb 7022 }
9accd112 7023
734b0e4b
MM
7024 switch (btrace.format)
7025 {
7026 case BTRACE_FORMAT_NONE:
7027 buffer_grow_str0 (buffer, "E.No Trace.");
8dcc53b3 7028 return -1;
734b0e4b
MM
7029
7030 case BTRACE_FORMAT_BTS:
7031 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7032 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
9accd112 7033
46f29a9a 7034 for (const btrace_block &block : *btrace.variant.bts.blocks)
734b0e4b 7035 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
46f29a9a 7036 paddress (block.begin), paddress (block.end));
9accd112 7037
734b0e4b
MM
7038 buffer_grow_str0 (buffer, "</btrace>\n");
7039 break;
7040
b20a6524
MM
7041 case BTRACE_FORMAT_PT:
7042 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7043 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7044 buffer_grow_str (buffer, "<pt>\n");
7045
7046 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
9accd112 7047
b20a6524
MM
7048 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7049 btrace.variant.pt.size);
7050
7051 buffer_grow_str (buffer, "</pt>\n");
7052 buffer_grow_str0 (buffer, "</btrace>\n");
7053 break;
7054
7055 default:
7056 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
8dcc53b3 7057 return -1;
734b0e4b 7058 }
969c39fb
MM
7059
7060 return 0;
9accd112 7061}
f4abbc16
MM
7062
7063/* See to_btrace_conf target method. */
7064
79597bdd
TBA
7065int
7066linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
7067 buffer *buffer)
f4abbc16
MM
7068{
7069 const struct btrace_config *conf;
7070
7071 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7072 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7073
7074 conf = linux_btrace_conf (tinfo);
7075 if (conf != NULL)
7076 {
7077 switch (conf->format)
7078 {
7079 case BTRACE_FORMAT_NONE:
7080 break;
7081
7082 case BTRACE_FORMAT_BTS:
d33501a5
MM
7083 buffer_xml_printf (buffer, "<bts");
7084 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7085 buffer_xml_printf (buffer, " />\n");
f4abbc16 7086 break;
b20a6524
MM
7087
7088 case BTRACE_FORMAT_PT:
7089 buffer_xml_printf (buffer, "<pt");
7090 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7091 buffer_xml_printf (buffer, "/>\n");
7092 break;
f4abbc16
MM
7093 }
7094 }
7095
7096 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7097 return 0;
7098}
9accd112
MM
7099#endif /* HAVE_LINUX_BTRACE */
7100
7b669087
GB
7101/* See nat/linux-nat.h. */
7102
7103ptid_t
7104current_lwp_ptid (void)
7105{
7106 return ptid_of (current_thread);
7107}
7108
7f63b89b
TBA
7109const char *
7110linux_process_target::thread_name (ptid_t thread)
7111{
7112 return linux_proc_tid_get_name (thread);
7113}
7114
7115#if USE_THREAD_DB
7116bool
7117linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7118 int *handle_len)
7119{
7120 return thread_db_thread_handle (ptid, handle, handle_len);
7121}
7122#endif
7123
276d4552
YQ
7124/* Default implementation of linux_target_ops method "set_pc" for
7125 32-bit pc register which is literally named "pc". */
7126
7127void
7128linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7129{
7130 uint32_t newpc = pc;
7131
7132 supply_register_by_name (regcache, "pc", &newpc);
7133}
7134
7135/* Default implementation of linux_target_ops method "get_pc" for
7136 32-bit pc register which is literally named "pc". */
7137
7138CORE_ADDR
7139linux_get_pc_32bit (struct regcache *regcache)
7140{
7141 uint32_t pc;
7142
7143 collect_register_by_name (regcache, "pc", &pc);
7144 if (debug_threads)
7145 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7146 return pc;
7147}
7148
6f69e520
YQ
7149/* Default implementation of linux_target_ops method "set_pc" for
7150 64-bit pc register which is literally named "pc". */
7151
7152void
7153linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7154{
7155 uint64_t newpc = pc;
7156
7157 supply_register_by_name (regcache, "pc", &newpc);
7158}
7159
7160/* Default implementation of linux_target_ops method "get_pc" for
7161 64-bit pc register which is literally named "pc". */
7162
7163CORE_ADDR
7164linux_get_pc_64bit (struct regcache *regcache)
7165{
7166 uint64_t pc;
7167
7168 collect_register_by_name (regcache, "pc", &pc);
7169 if (debug_threads)
7170 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7171 return pc;
7172}
7173
0570503d 7174/* See linux-low.h. */
974c89e0 7175
0570503d
PFC
7176int
7177linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
974c89e0
AH
7178{
7179 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7180 int offset = 0;
7181
7182 gdb_assert (wordsize == 4 || wordsize == 8);
7183
52405d85 7184 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
974c89e0
AH
7185 {
7186 if (wordsize == 4)
7187 {
0570503d 7188 uint32_t *data_p = (uint32_t *) data;
974c89e0 7189 if (data_p[0] == match)
0570503d
PFC
7190 {
7191 *valp = data_p[1];
7192 return 1;
7193 }
974c89e0
AH
7194 }
7195 else
7196 {
0570503d 7197 uint64_t *data_p = (uint64_t *) data;
974c89e0 7198 if (data_p[0] == match)
0570503d
PFC
7199 {
7200 *valp = data_p[1];
7201 return 1;
7202 }
974c89e0
AH
7203 }
7204
7205 offset += 2 * wordsize;
7206 }
7207
7208 return 0;
7209}
7210
7211/* See linux-low.h. */
7212
7213CORE_ADDR
7214linux_get_hwcap (int wordsize)
7215{
0570503d
PFC
7216 CORE_ADDR hwcap = 0;
7217 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7218 return hwcap;
974c89e0
AH
7219}
7220
7221/* See linux-low.h. */
7222
7223CORE_ADDR
7224linux_get_hwcap2 (int wordsize)
7225{
0570503d
PFC
7226 CORE_ADDR hwcap2 = 0;
7227 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7228 return hwcap2;
974c89e0 7229}
6f69e520 7230
3aee8918
PA
7231#ifdef HAVE_LINUX_REGSETS
7232void
7233initialize_regsets_info (struct regsets_info *info)
7234{
7235 for (info->num_regsets = 0;
7236 info->regsets[info->num_regsets].size >= 0;
7237 info->num_regsets++)
7238 ;
3aee8918
PA
7239}
7240#endif
7241
da6d8c04
DJ
7242void
7243initialize_low (void)
7244{
bd99dc85 7245 struct sigaction sigchld_action;
dd373349 7246
bd99dc85 7247 memset (&sigchld_action, 0, sizeof (sigchld_action));
ef0478f6 7248 set_target_ops (the_linux_target);
dd373349 7249
aa7c7447 7250 linux_ptrace_init_warnings ();
1b919490 7251 linux_proc_init_warnings ();
bd99dc85
PA
7252
7253 sigchld_action.sa_handler = sigchld_handler;
7254 sigemptyset (&sigchld_action.sa_mask);
7255 sigchld_action.sa_flags = SA_RESTART;
7256 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
7257
7258 initialize_low_arch ();
89245bc0
DB
7259
7260 linux_check_ptrace_features ();
da6d8c04 7261}