]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/linux-nat.c
make-target-delegates: line break between return type and function name
[thirdparty/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
e2882c85 3 Copyright (C) 2001-2018 Free Software Foundation, Inc.
3993f6b1
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
19
20#include "defs.h"
21#include "inferior.h"
45741a9c 22#include "infrun.h"
3993f6b1 23#include "target.h"
96d7229d
LM
24#include "nat/linux-nat.h"
25#include "nat/linux-waitpid.h"
3993f6b1 26#include "gdb_wait.h"
d6b0e80f
AC
27#include <unistd.h>
28#include <sys/syscall.h>
5826e159 29#include "nat/gdb_ptrace.h"
0274a8ce 30#include "linux-nat.h"
125f8a3d
GB
31#include "nat/linux-ptrace.h"
32#include "nat/linux-procfs.h"
8cc73a39 33#include "nat/linux-personality.h"
ac264b3b 34#include "linux-fork.h"
d6b0e80f
AC
35#include "gdbthread.h"
36#include "gdbcmd.h"
37#include "regcache.h"
4f844a66 38#include "regset.h"
dab06dbe 39#include "inf-child.h"
10d6c8cd
DJ
40#include "inf-ptrace.h"
41#include "auxv.h"
1777feb0 42#include <sys/procfs.h> /* for elf_gregset etc. */
dba24537
AC
43#include "elf-bfd.h" /* for elfcore_write_* */
44#include "gregset.h" /* for gregset */
45#include "gdbcore.h" /* for get_exec_file */
46#include <ctype.h> /* for isdigit */
53ce3c39 47#include <sys/stat.h> /* for struct stat */
dba24537 48#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
49#include "inf-loop.h"
50#include "event-loop.h"
51#include "event-top.h"
07e059b5
VP
52#include <pwd.h>
53#include <sys/types.h>
2978b111 54#include <dirent.h>
07e059b5 55#include "xml-support.h"
efcbbd14 56#include <sys/vfs.h>
6c95b8df 57#include "solib.h"
125f8a3d 58#include "nat/linux-osdata.h"
6432734d 59#include "linux-tdep.h"
7dcd53a0 60#include "symfile.h"
5808517f
YQ
61#include "agent.h"
62#include "tracepoint.h"
87b0bb13 63#include "buffer.h"
6ecd4729 64#include "target-descriptions.h"
614c279d 65#include "filestuff.h"
77e371c0 66#include "objfiles.h"
7a6a1731
GB
67#include "nat/linux-namespaces.h"
68#include "fileio.h"
efcbbd14
UW
69
70#ifndef SPUFS_MAGIC
71#define SPUFS_MAGIC 0x23c9b64e
72#endif
dba24537 73
1777feb0 74/* This comment documents high-level logic of this file.
8a77dff3
VP
75
76Waiting for events in sync mode
77===============================
78
4a6ed09b
PA
79When waiting for an event in a specific thread, we just use waitpid,
80passing the specific pid, and not passing WNOHANG.
81
82When waiting for an event in all threads, waitpid is not quite good:
83
84- If the thread group leader exits while other threads in the thread
85 group still exist, waitpid(TGID, ...) hangs. That waitpid won't
86 return an exit status until the other threads in the group are
87 reaped.
88
89- When a non-leader thread execs, that thread just vanishes without
90 reporting an exit (so we'd hang if we waited for it explicitly in
91 that case). The exec event is instead reported to the TGID pid.
92
93The solution is to always use -1 and WNOHANG, together with
94sigsuspend.
95
96First, we use non-blocking waitpid to check for events. If nothing is
97found, we use sigsuspend to wait for SIGCHLD. When SIGCHLD arrives,
98it means something happened to a child process. As soon as we know
99there's an event, we get back to calling nonblocking waitpid.
100
101Note that SIGCHLD should be blocked between waitpid and sigsuspend
102calls, so that we don't miss a signal. If SIGCHLD arrives in between,
103when it's blocked, the signal becomes pending and sigsuspend
104immediately notices it and returns.
105
106Waiting for events in async mode (TARGET_WNOHANG)
107=================================================
8a77dff3 108
7feb7d06
PA
109In async mode, GDB should always be ready to handle both user input
110and target events, so neither blocking waitpid nor sigsuspend are
111viable options. Instead, we should asynchronously notify the GDB main
112event loop whenever there's an unprocessed event from the target. We
113detect asynchronous target events by handling SIGCHLD signals. To
114notify the event loop about target events, the self-pipe trick is used
115--- a pipe is registered as waitable event source in the event loop,
116the event loop select/poll's on the read end of this pipe (as well on
117other event sources, e.g., stdin), and the SIGCHLD handler writes a
118byte to this pipe. This is more portable than relying on
119pselect/ppoll, since on kernels that lack those syscalls, libc
120emulates them with select/poll+sigprocmask, and that is racy
121(a.k.a. plain broken).
122
123Obviously, if we fail to notify the event loop if there's a target
124event, it's bad. OTOH, if we notify the event loop when there's no
125event from the target, linux_nat_wait will detect that there's no real
126event to report, and return event of type TARGET_WAITKIND_IGNORE.
127This is mostly harmless, but it will waste time and is better avoided.
128
129The main design point is that every time GDB is outside linux-nat.c,
130we have a SIGCHLD handler installed that is called when something
131happens to the target and notifies the GDB event loop. Whenever GDB
132core decides to handle the event, and calls into linux-nat.c, we
133process things as in sync mode, except that the we never block in
134sigsuspend.
135
136While processing an event, we may end up momentarily blocked in
137waitpid calls. Those waitpid calls, while blocking, are guarantied to
138return quickly. E.g., in all-stop mode, before reporting to the core
139that an LWP hit a breakpoint, all LWPs are stopped by sending them
140SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
141Note that this is different from blocking indefinitely waiting for the
142next event --- here, we're already handling an event.
8a77dff3
VP
143
144Use of signals
145==============
146
147We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
148signal is not entirely significant; we just need for a signal to be delivered,
149so that we can intercept it. SIGSTOP's advantage is that it can not be
150blocked. A disadvantage is that it is not a real-time signal, so it can only
151be queued once; we do not keep track of other sources of SIGSTOP.
152
153Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
154use them, because they have special behavior when the signal is generated -
155not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
156kills the entire thread group.
157
158A delivered SIGSTOP would stop the entire thread group, not just the thread we
159tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
160cancel it (by PTRACE_CONT without passing SIGSTOP).
161
162We could use a real-time signal instead. This would solve those problems; we
163could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
164But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
165generates it, and there are races with trying to find a signal that is not
4a6ed09b
PA
166blocked.
167
168Exec events
169===========
170
171The case of a thread group (process) with 3 or more threads, and a
172thread other than the leader execs is worth detailing:
173
174On an exec, the Linux kernel destroys all threads except the execing
175one in the thread group, and resets the execing thread's tid to the
176tgid. No exit notification is sent for the execing thread -- from the
177ptracer's perspective, it appears as though the execing thread just
178vanishes. Until we reap all other threads except the leader and the
179execing thread, the leader will be zombie, and the execing thread will
180be in `D (disc sleep)' state. As soon as all other threads are
181reaped, the execing thread changes its tid to the tgid, and the
182previous (zombie) leader vanishes, giving place to the "new"
183leader. */
a0ef4274 184
dba24537
AC
185#ifndef O_LARGEFILE
186#define O_LARGEFILE 0
187#endif
0274a8ce 188
f6ac5f3d
PA
189struct linux_nat_target *linux_target;
190
433bbbf8 191/* Does the current host support PTRACE_GETREGSET? */
0bdb2f78 192enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;
433bbbf8 193
9f0bdab8 194/* The method to call, if any, when a new thread is attached. */
7b50312a
PA
195static void (*linux_nat_new_thread) (struct lwp_info *);
196
466eecee
SM
197/* The method to call, if any, when a thread is destroyed. */
198static void (*linux_nat_delete_thread) (struct arch_lwp_info *);
199
26cb8b7c
PA
200/* The method to call, if any, when a new fork is attached. */
201static linux_nat_new_fork_ftype *linux_nat_new_fork;
202
203/* The method to call, if any, when a process is no longer
204 attached. */
205static linux_nat_forget_process_ftype *linux_nat_forget_process_hook;
206
7b50312a
PA
207/* Hook to call prior to resuming a thread. */
208static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
9f0bdab8 209
5b009018
PA
210/* The method to call, if any, when the siginfo object needs to be
211 converted between the layout returned by ptrace, and the layout in
212 the architecture of the inferior. */
a5362b9a 213static int (*linux_nat_siginfo_fixup) (siginfo_t *,
5b009018
PA
214 gdb_byte *,
215 int);
216
6a3cb8e8
PA
217/* The saved to_close method, inherited from inf-ptrace.c.
218 Called by our to_close. */
219static void (*super_close) (struct target_ops *);
220
ccce17b0 221static unsigned int debug_linux_nat;
920d2a44
AC
222static void
223show_debug_linux_nat (struct ui_file *file, int from_tty,
224 struct cmd_list_element *c, const char *value)
225{
226 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
227 value);
228}
d6b0e80f 229
ae087d01
DJ
230struct simple_pid_list
231{
232 int pid;
3d799a95 233 int status;
ae087d01
DJ
234 struct simple_pid_list *next;
235};
236struct simple_pid_list *stopped_pids;
237
aa01bd36
PA
238/* Whether target_thread_events is in effect. */
239static int report_thread_events;
240
3dd5b83d
PA
241/* Async mode support. */
242
b84876c2
PA
243/* The read/write ends of the pipe registered as waitable file in the
244 event loop. */
245static int linux_nat_event_pipe[2] = { -1, -1 };
246
198297aa
PA
247/* True if we're currently in async mode. */
248#define linux_is_async_p() (linux_nat_event_pipe[0] != -1)
249
7feb7d06 250/* Flush the event pipe. */
b84876c2 251
7feb7d06
PA
252static void
253async_file_flush (void)
b84876c2 254{
7feb7d06
PA
255 int ret;
256 char buf;
b84876c2 257
7feb7d06 258 do
b84876c2 259 {
7feb7d06 260 ret = read (linux_nat_event_pipe[0], &buf, 1);
b84876c2 261 }
7feb7d06 262 while (ret >= 0 || (ret == -1 && errno == EINTR));
b84876c2
PA
263}
264
7feb7d06
PA
265/* Put something (anything, doesn't matter what, or how much) in event
266 pipe, so that the select/poll in the event-loop realizes we have
267 something to process. */
252fbfc8 268
b84876c2 269static void
7feb7d06 270async_file_mark (void)
b84876c2 271{
7feb7d06 272 int ret;
b84876c2 273
7feb7d06
PA
274 /* It doesn't really matter what the pipe contains, as long we end
275 up with something in it. Might as well flush the previous
276 left-overs. */
277 async_file_flush ();
b84876c2 278
7feb7d06 279 do
b84876c2 280 {
7feb7d06 281 ret = write (linux_nat_event_pipe[1], "+", 1);
b84876c2 282 }
7feb7d06 283 while (ret == -1 && errno == EINTR);
b84876c2 284
7feb7d06
PA
285 /* Ignore EAGAIN. If the pipe is full, the event loop will already
286 be awakened anyway. */
b84876c2
PA
287}
288
7feb7d06
PA
289static int kill_lwp (int lwpid, int signo);
290
291static int stop_callback (struct lwp_info *lp, void *data);
2db9a427 292static int resume_stopped_resumed_lwps (struct lwp_info *lp, void *data);
7feb7d06
PA
293
294static void block_child_signals (sigset_t *prev_mask);
295static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
296
297struct lwp_info;
298static struct lwp_info *add_lwp (ptid_t ptid);
299static void purge_lwp_list (int pid);
4403d8e9 300static void delete_lwp (ptid_t ptid);
2277426b
PA
301static struct lwp_info *find_lwp_pid (ptid_t ptid);
302
8a99810d
PA
303static int lwp_status_pending_p (struct lwp_info *lp);
304
9c02b525
PA
305static int sigtrap_is_event (int status);
306static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
307
e7ad2f14
PA
308static void save_stop_reason (struct lwp_info *lp);
309
cff068da
GB
310\f
311/* LWP accessors. */
312
313/* See nat/linux-nat.h. */
314
315ptid_t
316ptid_of_lwp (struct lwp_info *lwp)
317{
318 return lwp->ptid;
319}
320
321/* See nat/linux-nat.h. */
322
4b134ca1
GB
323void
324lwp_set_arch_private_info (struct lwp_info *lwp,
325 struct arch_lwp_info *info)
326{
327 lwp->arch_private = info;
328}
329
330/* See nat/linux-nat.h. */
331
332struct arch_lwp_info *
333lwp_arch_private_info (struct lwp_info *lwp)
334{
335 return lwp->arch_private;
336}
337
338/* See nat/linux-nat.h. */
339
cff068da
GB
340int
341lwp_is_stopped (struct lwp_info *lwp)
342{
343 return lwp->stopped;
344}
345
346/* See nat/linux-nat.h. */
347
348enum target_stop_reason
349lwp_stop_reason (struct lwp_info *lwp)
350{
351 return lwp->stop_reason;
352}
353
0e00e962
AA
354/* See nat/linux-nat.h. */
355
356int
357lwp_is_stepping (struct lwp_info *lwp)
358{
359 return lwp->step;
360}
361
ae087d01
DJ
362\f
363/* Trivial list manipulation functions to keep track of a list of
364 new stopped processes. */
365static void
3d799a95 366add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01 367{
8d749320 368 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
e0881a8e 369
ae087d01 370 new_pid->pid = pid;
3d799a95 371 new_pid->status = status;
ae087d01
DJ
372 new_pid->next = *listp;
373 *listp = new_pid;
374}
375
376static int
46a96992 377pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
378{
379 struct simple_pid_list **p;
380
381 for (p = listp; *p != NULL; p = &(*p)->next)
382 if ((*p)->pid == pid)
383 {
384 struct simple_pid_list *next = (*p)->next;
e0881a8e 385
46a96992 386 *statusp = (*p)->status;
ae087d01
DJ
387 xfree (*p);
388 *p = next;
389 return 1;
390 }
391 return 0;
392}
393
de0d863e
DB
394/* Return the ptrace options that we want to try to enable. */
395
396static int
397linux_nat_ptrace_options (int attached)
398{
399 int options = 0;
400
401 if (!attached)
402 options |= PTRACE_O_EXITKILL;
403
404 options |= (PTRACE_O_TRACESYSGOOD
405 | PTRACE_O_TRACEVFORKDONE
406 | PTRACE_O_TRACEVFORK
407 | PTRACE_O_TRACEFORK
408 | PTRACE_O_TRACEEXEC);
409
410 return options;
411}
412
96d7229d 413/* Initialize ptrace warnings and check for supported ptrace
beed38b8
JB
414 features given PID.
415
416 ATTACHED should be nonzero iff we attached to the inferior. */
3993f6b1
DJ
417
418static void
beed38b8 419linux_init_ptrace (pid_t pid, int attached)
3993f6b1 420{
de0d863e
DB
421 int options = linux_nat_ptrace_options (attached);
422
423 linux_enable_event_reporting (pid, options);
96d7229d 424 linux_ptrace_init_warnings ();
4de4c07c
DJ
425}
426
f6ac5f3d
PA
427linux_nat_target::~linux_nat_target ()
428{}
429
430void
431linux_nat_target::post_attach (int pid)
4de4c07c 432{
beed38b8 433 linux_init_ptrace (pid, 1);
4de4c07c
DJ
434}
435
f6ac5f3d
PA
436void
437linux_nat_target::post_startup_inferior (ptid_t ptid)
4de4c07c 438{
beed38b8 439 linux_init_ptrace (ptid_get_pid (ptid), 0);
4de4c07c
DJ
440}
441
4403d8e9
JK
442/* Return the number of known LWPs in the tgid given by PID. */
443
444static int
445num_lwps (int pid)
446{
447 int count = 0;
448 struct lwp_info *lp;
449
450 for (lp = lwp_list; lp; lp = lp->next)
451 if (ptid_get_pid (lp->ptid) == pid)
452 count++;
453
454 return count;
455}
456
457/* Call delete_lwp with prototype compatible for make_cleanup. */
458
459static void
460delete_lwp_cleanup (void *lp_voidp)
461{
9a3c8263 462 struct lwp_info *lp = (struct lwp_info *) lp_voidp;
4403d8e9
JK
463
464 delete_lwp (lp->ptid);
465}
466
d83ad864
DB
467/* Target hook for follow_fork. On entry inferior_ptid must be the
468 ptid of the followed inferior. At return, inferior_ptid will be
469 unchanged. */
470
f6ac5f3d
PA
471int
472linux_nat_target::follow_fork (int follow_child, int detach_fork)
3993f6b1 473{
d83ad864 474 if (!follow_child)
4de4c07c 475 {
6c95b8df 476 struct lwp_info *child_lp = NULL;
d83ad864 477 int status = W_STOPCODE (0);
d83ad864 478 int has_vforked;
79639e11 479 ptid_t parent_ptid, child_ptid;
d83ad864
DB
480 int parent_pid, child_pid;
481
482 has_vforked = (inferior_thread ()->pending_follow.kind
483 == TARGET_WAITKIND_VFORKED);
79639e11
PA
484 parent_ptid = inferior_ptid;
485 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
486 parent_pid = ptid_get_lwp (parent_ptid);
487 child_pid = ptid_get_lwp (child_ptid);
4de4c07c 488
1777feb0 489 /* We're already attached to the parent, by default. */
2989a365 490 child_lp = add_lwp (child_ptid);
d83ad864
DB
491 child_lp->stopped = 1;
492 child_lp->last_resume_kind = resume_stop;
4de4c07c 493
ac264b3b
MS
494 /* Detach new forked process? */
495 if (detach_fork)
f75c00e4 496 {
2989a365
TT
497 struct cleanup *old_chain = make_cleanup (delete_lwp_cleanup,
498 child_lp);
4403d8e9 499
4403d8e9
JK
500 if (linux_nat_prepare_to_resume != NULL)
501 linux_nat_prepare_to_resume (child_lp);
c077881a
HZ
502
503 /* When debugging an inferior in an architecture that supports
504 hardware single stepping on a kernel without commit
505 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
506 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
507 set if the parent process had them set.
508 To work around this, single step the child process
509 once before detaching to clear the flags. */
510
2fd9d7ca
PA
511 /* Note that we consult the parent's architecture instead of
512 the child's because there's no inferior for the child at
513 this point. */
c077881a 514 if (!gdbarch_software_single_step_p (target_thread_architecture
2fd9d7ca 515 (parent_ptid)))
c077881a 516 {
c077881a
HZ
517 linux_disable_event_reporting (child_pid);
518 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
519 perror_with_name (_("Couldn't do single step"));
520 if (my_waitpid (child_pid, &status, 0) < 0)
521 perror_with_name (_("Couldn't wait vfork process"));
522 }
523
524 if (WIFSTOPPED (status))
9caaaa83
PA
525 {
526 int signo;
527
528 signo = WSTOPSIG (status);
529 if (signo != 0
530 && !signal_pass_state (gdb_signal_from_host (signo)))
531 signo = 0;
532 ptrace (PTRACE_DETACH, child_pid, 0, signo);
533 }
4403d8e9
JK
534
535 do_cleanups (old_chain);
ac264b3b
MS
536 }
537 else
538 {
2989a365
TT
539 scoped_restore save_inferior_ptid
540 = make_scoped_restore (&inferior_ptid);
541 inferior_ptid = child_ptid;
542
6c95b8df 543 /* Let the thread_db layer learn about this new process. */
2277426b 544 check_for_thread_db ();
ac264b3b 545 }
9016a515
DJ
546
547 if (has_vforked)
548 {
3ced3da4 549 struct lwp_info *parent_lp;
6c95b8df 550
79639e11 551 parent_lp = find_lwp_pid (parent_ptid);
96d7229d 552 gdb_assert (linux_supports_tracefork () >= 0);
3ced3da4 553
96d7229d 554 if (linux_supports_tracevforkdone ())
9016a515 555 {
6c95b8df
PA
556 if (debug_linux_nat)
557 fprintf_unfiltered (gdb_stdlog,
558 "LCFF: waiting for VFORK_DONE on %d\n",
559 parent_pid);
3ced3da4 560 parent_lp->stopped = 1;
9016a515 561
6c95b8df
PA
562 /* We'll handle the VFORK_DONE event like any other
563 event, in target_wait. */
9016a515
DJ
564 }
565 else
566 {
567 /* We can't insert breakpoints until the child has
568 finished with the shared memory region. We need to
569 wait until that happens. Ideal would be to just
570 call:
571 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
572 - waitpid (parent_pid, &status, __WALL);
573 However, most architectures can't handle a syscall
574 being traced on the way out if it wasn't traced on
575 the way in.
576
577 We might also think to loop, continuing the child
578 until it exits or gets a SIGTRAP. One problem is
579 that the child might call ptrace with PTRACE_TRACEME.
580
581 There's no simple and reliable way to figure out when
582 the vforked child will be done with its copy of the
583 shared memory. We could step it out of the syscall,
584 two instructions, let it go, and then single-step the
585 parent once. When we have hardware single-step, this
586 would work; with software single-step it could still
587 be made to work but we'd have to be able to insert
588 single-step breakpoints in the child, and we'd have
589 to insert -just- the single-step breakpoint in the
590 parent. Very awkward.
591
592 In the end, the best we can do is to make sure it
593 runs for a little while. Hopefully it will be out of
594 range of any breakpoints we reinsert. Usually this
595 is only the single-step breakpoint at vfork's return
596 point. */
597
6c95b8df
PA
598 if (debug_linux_nat)
599 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
600 "LCFF: no VFORK_DONE "
601 "support, sleeping a bit\n");
6c95b8df 602
9016a515 603 usleep (10000);
9016a515 604
6c95b8df
PA
605 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
606 and leave it pending. The next linux_nat_resume call
607 will notice a pending event, and bypasses actually
608 resuming the inferior. */
3ced3da4
PA
609 parent_lp->status = 0;
610 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
611 parent_lp->stopped = 1;
6c95b8df
PA
612
613 /* If we're in async mode, need to tell the event loop
614 there's something here to process. */
d9d41e78 615 if (target_is_async_p ())
6c95b8df
PA
616 async_file_mark ();
617 }
9016a515 618 }
4de4c07c 619 }
3993f6b1 620 else
4de4c07c 621 {
3ced3da4 622 struct lwp_info *child_lp;
4de4c07c 623
3ced3da4
PA
624 child_lp = add_lwp (inferior_ptid);
625 child_lp->stopped = 1;
25289eb2 626 child_lp->last_resume_kind = resume_stop;
6c95b8df 627
6c95b8df 628 /* Let the thread_db layer learn about this new process. */
ef29ce1a 629 check_for_thread_db ();
4de4c07c
DJ
630 }
631
632 return 0;
633}
634
4de4c07c 635\f
f6ac5f3d
PA
636int
637linux_nat_target::insert_fork_catchpoint (int pid)
4de4c07c 638{
96d7229d 639 return !linux_supports_tracefork ();
3993f6b1
DJ
640}
641
f6ac5f3d
PA
642int
643linux_nat_target::remove_fork_catchpoint (int pid)
eb73ad13
PA
644{
645 return 0;
646}
647
f6ac5f3d
PA
648int
649linux_nat_target::insert_vfork_catchpoint (int pid)
3993f6b1 650{
96d7229d 651 return !linux_supports_tracefork ();
3993f6b1
DJ
652}
653
f6ac5f3d
PA
654int
655linux_nat_target::remove_vfork_catchpoint (int pid)
eb73ad13
PA
656{
657 return 0;
658}
659
f6ac5f3d
PA
660int
661linux_nat_target::insert_exec_catchpoint (int pid)
3993f6b1 662{
96d7229d 663 return !linux_supports_tracefork ();
3993f6b1
DJ
664}
665
f6ac5f3d
PA
666int
667linux_nat_target::remove_exec_catchpoint (int pid)
eb73ad13
PA
668{
669 return 0;
670}
671
f6ac5f3d
PA
672int
673linux_nat_target::set_syscall_catchpoint (int pid, bool needed, int any_count,
674 gdb::array_view<const int> syscall_counts)
a96d9b2e 675{
96d7229d 676 if (!linux_supports_tracesysgood ())
77b06cd7
TJB
677 return 1;
678
a96d9b2e
SDJ
679 /* On GNU/Linux, we ignore the arguments. It means that we only
680 enable the syscall catchpoints, but do not disable them.
77b06cd7 681
649a140c 682 Also, we do not use the `syscall_counts' information because we do not
a96d9b2e
SDJ
683 filter system calls here. We let GDB do the logic for us. */
684 return 0;
685}
686
774113b0
PA
687/* List of known LWPs, keyed by LWP PID. This speeds up the common
688 case of mapping a PID returned from the kernel to our corresponding
689 lwp_info data structure. */
690static htab_t lwp_lwpid_htab;
691
692/* Calculate a hash from a lwp_info's LWP PID. */
693
694static hashval_t
695lwp_info_hash (const void *ap)
696{
697 const struct lwp_info *lp = (struct lwp_info *) ap;
698 pid_t pid = ptid_get_lwp (lp->ptid);
699
700 return iterative_hash_object (pid, 0);
701}
702
703/* Equality function for the lwp_info hash table. Compares the LWP's
704 PID. */
705
706static int
707lwp_lwpid_htab_eq (const void *a, const void *b)
708{
709 const struct lwp_info *entry = (const struct lwp_info *) a;
710 const struct lwp_info *element = (const struct lwp_info *) b;
711
712 return ptid_get_lwp (entry->ptid) == ptid_get_lwp (element->ptid);
713}
714
715/* Create the lwp_lwpid_htab hash table. */
716
717static void
718lwp_lwpid_htab_create (void)
719{
720 lwp_lwpid_htab = htab_create (100, lwp_info_hash, lwp_lwpid_htab_eq, NULL);
721}
722
723/* Add LP to the hash table. */
724
725static void
726lwp_lwpid_htab_add_lwp (struct lwp_info *lp)
727{
728 void **slot;
729
730 slot = htab_find_slot (lwp_lwpid_htab, lp, INSERT);
731 gdb_assert (slot != NULL && *slot == NULL);
732 *slot = lp;
733}
734
735/* Head of doubly-linked list of known LWPs. Sorted by reverse
736 creation order. This order is assumed in some cases. E.g.,
737 reaping status after killing alls lwps of a process: the leader LWP
738 must be reaped last. */
9f0bdab8 739struct lwp_info *lwp_list;
774113b0
PA
740
741/* Add LP to sorted-by-reverse-creation-order doubly-linked list. */
742
743static void
744lwp_list_add (struct lwp_info *lp)
745{
746 lp->next = lwp_list;
747 if (lwp_list != NULL)
748 lwp_list->prev = lp;
749 lwp_list = lp;
750}
751
752/* Remove LP from sorted-by-reverse-creation-order doubly-linked
753 list. */
754
755static void
756lwp_list_remove (struct lwp_info *lp)
757{
758 /* Remove from sorted-by-creation-order list. */
759 if (lp->next != NULL)
760 lp->next->prev = lp->prev;
761 if (lp->prev != NULL)
762 lp->prev->next = lp->next;
763 if (lp == lwp_list)
764 lwp_list = lp->next;
765}
766
d6b0e80f
AC
767\f
768
d6b0e80f
AC
769/* Original signal mask. */
770static sigset_t normal_mask;
771
772/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
773 _initialize_linux_nat. */
774static sigset_t suspend_mask;
775
7feb7d06
PA
776/* Signals to block to make that sigsuspend work. */
777static sigset_t blocked_mask;
778
779/* SIGCHLD action. */
780struct sigaction sigchld_action;
b84876c2 781
7feb7d06
PA
782/* Block child signals (SIGCHLD and linux threads signals), and store
783 the previous mask in PREV_MASK. */
84e46146 784
7feb7d06
PA
785static void
786block_child_signals (sigset_t *prev_mask)
787{
788 /* Make sure SIGCHLD is blocked. */
789 if (!sigismember (&blocked_mask, SIGCHLD))
790 sigaddset (&blocked_mask, SIGCHLD);
791
792 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
793}
794
795/* Restore child signals mask, previously returned by
796 block_child_signals. */
797
798static void
799restore_child_signals_mask (sigset_t *prev_mask)
800{
801 sigprocmask (SIG_SETMASK, prev_mask, NULL);
802}
2455069d
UW
803
804/* Mask of signals to pass directly to the inferior. */
805static sigset_t pass_mask;
806
807/* Update signals to pass to the inferior. */
f6ac5f3d
PA
808void
809linux_nat_target::pass_signals (int numsigs, unsigned char *pass_signals)
2455069d
UW
810{
811 int signo;
812
813 sigemptyset (&pass_mask);
814
815 for (signo = 1; signo < NSIG; signo++)
816 {
2ea28649 817 int target_signo = gdb_signal_from_host (signo);
2455069d
UW
818 if (target_signo < numsigs && pass_signals[target_signo])
819 sigaddset (&pass_mask, signo);
820 }
821}
822
d6b0e80f
AC
823\f
824
825/* Prototypes for local functions. */
826static int stop_wait_callback (struct lwp_info *lp, void *data);
20ba1ce6 827static int resume_stopped_resumed_lwps (struct lwp_info *lp, void *data);
ced2dffb 828static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
710151dd 829
d6b0e80f 830\f
d6b0e80f 831
7b50312a
PA
832/* Destroy and free LP. */
833
834static void
835lwp_free (struct lwp_info *lp)
836{
466eecee
SM
837 /* Let the arch specific bits release arch_lwp_info. */
838 if (linux_nat_delete_thread != NULL)
839 linux_nat_delete_thread (lp->arch_private);
840 else
841 gdb_assert (lp->arch_private == NULL);
842
7b50312a
PA
843 xfree (lp);
844}
845
774113b0 846/* Traversal function for purge_lwp_list. */
d90e17a7 847
774113b0
PA
848static int
849lwp_lwpid_htab_remove_pid (void **slot, void *info)
d90e17a7 850{
774113b0
PA
851 struct lwp_info *lp = (struct lwp_info *) *slot;
852 int pid = *(int *) info;
d90e17a7 853
774113b0 854 if (ptid_get_pid (lp->ptid) == pid)
d90e17a7 855 {
774113b0
PA
856 htab_clear_slot (lwp_lwpid_htab, slot);
857 lwp_list_remove (lp);
858 lwp_free (lp);
859 }
d90e17a7 860
774113b0
PA
861 return 1;
862}
d90e17a7 863
774113b0
PA
864/* Remove all LWPs belong to PID from the lwp list. */
865
866static void
867purge_lwp_list (int pid)
868{
869 htab_traverse_noresize (lwp_lwpid_htab, lwp_lwpid_htab_remove_pid, &pid);
d90e17a7
PA
870}
871
26cb8b7c
PA
872/* Add the LWP specified by PTID to the list. PTID is the first LWP
873 in the process. Return a pointer to the structure describing the
874 new LWP.
875
876 This differs from add_lwp in that we don't let the arch specific
877 bits know about this new thread. Current clients of this callback
878 take the opportunity to install watchpoints in the new thread, and
879 we shouldn't do that for the first thread. If we're spawning a
880 child ("run"), the thread executes the shell wrapper first, and we
881 shouldn't touch it until it execs the program we want to debug.
882 For "attach", it'd be okay to call the callback, but it's not
883 necessary, because watchpoints can't yet have been inserted into
884 the inferior. */
d6b0e80f
AC
885
886static struct lwp_info *
26cb8b7c 887add_initial_lwp (ptid_t ptid)
d6b0e80f
AC
888{
889 struct lwp_info *lp;
890
dfd4cc63 891 gdb_assert (ptid_lwp_p (ptid));
d6b0e80f 892
8d749320 893 lp = XNEW (struct lwp_info);
d6b0e80f
AC
894
895 memset (lp, 0, sizeof (struct lwp_info));
896
25289eb2 897 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
898 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
899
900 lp->ptid = ptid;
dc146f7c 901 lp->core = -1;
d6b0e80f 902
774113b0
PA
903 /* Add to sorted-by-reverse-creation-order list. */
904 lwp_list_add (lp);
905
906 /* Add to keyed-by-pid htab. */
907 lwp_lwpid_htab_add_lwp (lp);
d6b0e80f 908
26cb8b7c
PA
909 return lp;
910}
911
912/* Add the LWP specified by PID to the list. Return a pointer to the
913 structure describing the new LWP. The LWP should already be
914 stopped. */
915
916static struct lwp_info *
917add_lwp (ptid_t ptid)
918{
919 struct lwp_info *lp;
920
921 lp = add_initial_lwp (ptid);
922
6e012a6c
PA
923 /* Let the arch specific bits know about this new thread. Current
924 clients of this callback take the opportunity to install
26cb8b7c
PA
925 watchpoints in the new thread. We don't do this for the first
926 thread though. See add_initial_lwp. */
927 if (linux_nat_new_thread != NULL)
7b50312a 928 linux_nat_new_thread (lp);
9f0bdab8 929
d6b0e80f
AC
930 return lp;
931}
932
933/* Remove the LWP specified by PID from the list. */
934
935static void
936delete_lwp (ptid_t ptid)
937{
774113b0
PA
938 struct lwp_info *lp;
939 void **slot;
940 struct lwp_info dummy;
d6b0e80f 941
774113b0
PA
942 dummy.ptid = ptid;
943 slot = htab_find_slot (lwp_lwpid_htab, &dummy, NO_INSERT);
944 if (slot == NULL)
945 return;
d6b0e80f 946
774113b0
PA
947 lp = *(struct lwp_info **) slot;
948 gdb_assert (lp != NULL);
d6b0e80f 949
774113b0 950 htab_clear_slot (lwp_lwpid_htab, slot);
d6b0e80f 951
774113b0
PA
952 /* Remove from sorted-by-creation-order list. */
953 lwp_list_remove (lp);
d6b0e80f 954
774113b0 955 /* Release. */
7b50312a 956 lwp_free (lp);
d6b0e80f
AC
957}
958
959/* Return a pointer to the structure describing the LWP corresponding
960 to PID. If no corresponding LWP could be found, return NULL. */
961
962static struct lwp_info *
963find_lwp_pid (ptid_t ptid)
964{
965 struct lwp_info *lp;
966 int lwp;
774113b0 967 struct lwp_info dummy;
d6b0e80f 968
dfd4cc63
LM
969 if (ptid_lwp_p (ptid))
970 lwp = ptid_get_lwp (ptid);
d6b0e80f 971 else
dfd4cc63 972 lwp = ptid_get_pid (ptid);
d6b0e80f 973
774113b0
PA
974 dummy.ptid = ptid_build (0, lwp, 0);
975 lp = (struct lwp_info *) htab_find (lwp_lwpid_htab, &dummy);
976 return lp;
d6b0e80f
AC
977}
978
6d4ee8c6 979/* See nat/linux-nat.h. */
d6b0e80f
AC
980
981struct lwp_info *
d90e17a7 982iterate_over_lwps (ptid_t filter,
6d4ee8c6 983 iterate_over_lwps_ftype callback,
d90e17a7 984 void *data)
d6b0e80f
AC
985{
986 struct lwp_info *lp, *lpnext;
987
988 for (lp = lwp_list; lp; lp = lpnext)
989 {
990 lpnext = lp->next;
d90e17a7
PA
991
992 if (ptid_match (lp->ptid, filter))
993 {
6d4ee8c6 994 if ((*callback) (lp, data) != 0)
d90e17a7
PA
995 return lp;
996 }
d6b0e80f
AC
997 }
998
999 return NULL;
1000}
1001
2277426b
PA
1002/* Update our internal state when changing from one checkpoint to
1003 another indicated by NEW_PTID. We can only switch single-threaded
1004 applications, so we only create one new LWP, and the previous list
1005 is discarded. */
f973ed9c
DJ
1006
1007void
1008linux_nat_switch_fork (ptid_t new_ptid)
1009{
1010 struct lwp_info *lp;
1011
dfd4cc63 1012 purge_lwp_list (ptid_get_pid (inferior_ptid));
2277426b 1013
f973ed9c
DJ
1014 lp = add_lwp (new_ptid);
1015 lp->stopped = 1;
e26af52f 1016
2277426b
PA
1017 /* This changes the thread's ptid while preserving the gdb thread
1018 num. Also changes the inferior pid, while preserving the
1019 inferior num. */
1020 thread_change_ptid (inferior_ptid, new_ptid);
1021
1022 /* We've just told GDB core that the thread changed target id, but,
1023 in fact, it really is a different thread, with different register
1024 contents. */
1025 registers_changed ();
e26af52f
DJ
1026}
1027
e26af52f
DJ
1028/* Handle the exit of a single thread LP. */
1029
1030static void
1031exit_lwp (struct lwp_info *lp)
1032{
e09875d4 1033 struct thread_info *th = find_thread_ptid (lp->ptid);
063bfe2e
VP
1034
1035 if (th)
e26af52f 1036 {
17faa917
DJ
1037 if (print_thread_events)
1038 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1039
4f8d22e3 1040 delete_thread (lp->ptid);
e26af52f
DJ
1041 }
1042
1043 delete_lwp (lp->ptid);
1044}
1045
a0ef4274
DJ
1046/* Wait for the LWP specified by LP, which we have just attached to.
1047 Returns a wait status for that LWP, to cache. */
1048
1049static int
22827c51 1050linux_nat_post_attach_wait (ptid_t ptid, int *signalled)
a0ef4274 1051{
dfd4cc63 1052 pid_t new_pid, pid = ptid_get_lwp (ptid);
a0ef4274
DJ
1053 int status;
1054
644cebc9 1055 if (linux_proc_pid_is_stopped (pid))
a0ef4274
DJ
1056 {
1057 if (debug_linux_nat)
1058 fprintf_unfiltered (gdb_stdlog,
1059 "LNPAW: Attaching to a stopped process\n");
1060
1061 /* The process is definitely stopped. It is in a job control
1062 stop, unless the kernel predates the TASK_STOPPED /
1063 TASK_TRACED distinction, in which case it might be in a
1064 ptrace stop. Make sure it is in a ptrace stop; from there we
1065 can kill it, signal it, et cetera.
1066
1067 First make sure there is a pending SIGSTOP. Since we are
1068 already attached, the process can not transition from stopped
1069 to running without a PTRACE_CONT; so we know this signal will
1070 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1071 probably already in the queue (unless this kernel is old
1072 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1073 is not an RT signal, it can only be queued once. */
1074 kill_lwp (pid, SIGSTOP);
1075
1076 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1077 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1078 ptrace (PTRACE_CONT, pid, 0, 0);
1079 }
1080
1081 /* Make sure the initial process is stopped. The user-level threads
1082 layer might want to poke around in the inferior, and that won't
1083 work if things haven't stabilized yet. */
4a6ed09b 1084 new_pid = my_waitpid (pid, &status, __WALL);
dacc9cb2
PP
1085 gdb_assert (pid == new_pid);
1086
1087 if (!WIFSTOPPED (status))
1088 {
1089 /* The pid we tried to attach has apparently just exited. */
1090 if (debug_linux_nat)
1091 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1092 pid, status_to_str (status));
1093 return status;
1094 }
a0ef4274
DJ
1095
1096 if (WSTOPSIG (status) != SIGSTOP)
1097 {
1098 *signalled = 1;
1099 if (debug_linux_nat)
1100 fprintf_unfiltered (gdb_stdlog,
1101 "LNPAW: Received %s after attaching\n",
1102 status_to_str (status));
1103 }
1104
1105 return status;
1106}
1107
f6ac5f3d
PA
1108void
1109linux_nat_target::create_inferior (const char *exec_file,
1110 const std::string &allargs,
1111 char **env, int from_tty)
b84876c2 1112{
41272101
TT
1113 maybe_disable_address_space_randomization restore_personality
1114 (disable_randomization);
b84876c2
PA
1115
1116 /* The fork_child mechanism is synchronous and calls target_wait, so
1117 we have to mask the async mode. */
1118
2455069d 1119 /* Make sure we report all signals during startup. */
f6ac5f3d 1120 pass_signals (0, NULL);
2455069d 1121
f6ac5f3d 1122 inf_ptrace_target::create_inferior (exec_file, allargs, env, from_tty);
b84876c2
PA
1123}
1124
8784d563
PA
1125/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1126 already attached. Returns true if a new LWP is found, false
1127 otherwise. */
1128
1129static int
1130attach_proc_task_lwp_callback (ptid_t ptid)
1131{
1132 struct lwp_info *lp;
1133
1134 /* Ignore LWPs we're already attached to. */
1135 lp = find_lwp_pid (ptid);
1136 if (lp == NULL)
1137 {
1138 int lwpid = ptid_get_lwp (ptid);
1139
1140 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1141 {
1142 int err = errno;
1143
1144 /* Be quiet if we simply raced with the thread exiting.
1145 EPERM is returned if the thread's task still exists, and
1146 is marked as exited or zombie, as well as other
1147 conditions, so in that case, confirm the status in
1148 /proc/PID/status. */
1149 if (err == ESRCH
1150 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1151 {
1152 if (debug_linux_nat)
1153 {
1154 fprintf_unfiltered (gdb_stdlog,
1155 "Cannot attach to lwp %d: "
1156 "thread is gone (%d: %s)\n",
1157 lwpid, err, safe_strerror (err));
1158 }
1159 }
1160 else
1161 {
4d9b86e1
SM
1162 std::string reason
1163 = linux_ptrace_attach_fail_reason_string (ptid, err);
1164
f71f0b0d 1165 warning (_("Cannot attach to lwp %d: %s"),
4d9b86e1 1166 lwpid, reason.c_str ());
8784d563
PA
1167 }
1168 }
1169 else
1170 {
1171 if (debug_linux_nat)
1172 fprintf_unfiltered (gdb_stdlog,
1173 "PTRACE_ATTACH %s, 0, 0 (OK)\n",
1174 target_pid_to_str (ptid));
1175
1176 lp = add_lwp (ptid);
8784d563
PA
1177
1178 /* The next time we wait for this LWP we'll see a SIGSTOP as
1179 PTRACE_ATTACH brings it to a halt. */
1180 lp->signalled = 1;
1181
1182 /* We need to wait for a stop before being able to make the
1183 next ptrace call on this LWP. */
1184 lp->must_set_ptrace_flags = 1;
026a9174
PA
1185
1186 /* So that wait collects the SIGSTOP. */
1187 lp->resumed = 1;
1188
1189 /* Also add the LWP to gdb's thread list, in case a
1190 matching libthread_db is not found (or the process uses
1191 raw clone). */
1192 add_thread (lp->ptid);
1193 set_running (lp->ptid, 1);
1194 set_executing (lp->ptid, 1);
8784d563
PA
1195 }
1196
1197 return 1;
1198 }
1199 return 0;
1200}
1201
f6ac5f3d
PA
1202void
1203linux_nat_target::attach (const char *args, int from_tty)
d6b0e80f
AC
1204{
1205 struct lwp_info *lp;
d6b0e80f 1206 int status;
af990527 1207 ptid_t ptid;
d6b0e80f 1208
2455069d 1209 /* Make sure we report all signals during attach. */
f6ac5f3d 1210 pass_signals (0, NULL);
2455069d 1211
492d29ea 1212 TRY
87b0bb13 1213 {
f6ac5f3d 1214 inf_ptrace_target::attach (args, from_tty);
87b0bb13 1215 }
492d29ea 1216 CATCH (ex, RETURN_MASK_ERROR)
87b0bb13
JK
1217 {
1218 pid_t pid = parse_pid_to_attach (args);
4d9b86e1 1219 std::string reason = linux_ptrace_attach_fail_reason (pid);
87b0bb13 1220
4d9b86e1
SM
1221 if (!reason.empty ())
1222 throw_error (ex.error, "warning: %s\n%s", reason.c_str (), ex.message);
7ae1a6a6 1223 else
a7b2d0fb 1224 throw_error (ex.error, "%s", ex.message);
87b0bb13 1225 }
492d29ea 1226 END_CATCH
d6b0e80f 1227
af990527
PA
1228 /* The ptrace base target adds the main thread with (pid,0,0)
1229 format. Decorate it with lwp info. */
dfd4cc63
LM
1230 ptid = ptid_build (ptid_get_pid (inferior_ptid),
1231 ptid_get_pid (inferior_ptid),
1232 0);
af990527
PA
1233 thread_change_ptid (inferior_ptid, ptid);
1234
9f0bdab8 1235 /* Add the initial process as the first LWP to the list. */
26cb8b7c 1236 lp = add_initial_lwp (ptid);
a0ef4274 1237
22827c51 1238 status = linux_nat_post_attach_wait (lp->ptid, &lp->signalled);
dacc9cb2
PP
1239 if (!WIFSTOPPED (status))
1240 {
1241 if (WIFEXITED (status))
1242 {
1243 int exit_code = WEXITSTATUS (status);
1244
223ffa71 1245 target_terminal::ours ();
bc1e6c81 1246 target_mourn_inferior (inferior_ptid);
dacc9cb2
PP
1247 if (exit_code == 0)
1248 error (_("Unable to attach: program exited normally."));
1249 else
1250 error (_("Unable to attach: program exited with code %d."),
1251 exit_code);
1252 }
1253 else if (WIFSIGNALED (status))
1254 {
2ea28649 1255 enum gdb_signal signo;
dacc9cb2 1256
223ffa71 1257 target_terminal::ours ();
bc1e6c81 1258 target_mourn_inferior (inferior_ptid);
dacc9cb2 1259
2ea28649 1260 signo = gdb_signal_from_host (WTERMSIG (status));
dacc9cb2
PP
1261 error (_("Unable to attach: program terminated with signal "
1262 "%s, %s."),
2ea28649
PA
1263 gdb_signal_to_name (signo),
1264 gdb_signal_to_string (signo));
dacc9cb2
PP
1265 }
1266
1267 internal_error (__FILE__, __LINE__,
1268 _("unexpected status %d for PID %ld"),
dfd4cc63 1269 status, (long) ptid_get_lwp (ptid));
dacc9cb2
PP
1270 }
1271
a0ef4274 1272 lp->stopped = 1;
9f0bdab8 1273
a0ef4274 1274 /* Save the wait status to report later. */
d6b0e80f 1275 lp->resumed = 1;
a0ef4274
DJ
1276 if (debug_linux_nat)
1277 fprintf_unfiltered (gdb_stdlog,
1278 "LNA: waitpid %ld, saving status %s\n",
dfd4cc63 1279 (long) ptid_get_pid (lp->ptid), status_to_str (status));
710151dd 1280
7feb7d06
PA
1281 lp->status = status;
1282
8784d563
PA
1283 /* We must attach to every LWP. If /proc is mounted, use that to
1284 find them now. The inferior may be using raw clone instead of
1285 using pthreads. But even if it is using pthreads, thread_db
1286 walks structures in the inferior's address space to find the list
1287 of threads/LWPs, and those structures may well be corrupted.
1288 Note that once thread_db is loaded, we'll still use it to list
1289 threads and associate pthread info with each LWP. */
1290 linux_proc_attach_tgid_threads (ptid_get_pid (lp->ptid),
1291 attach_proc_task_lwp_callback);
1292
7feb7d06 1293 if (target_can_async_p ())
6a3753b3 1294 target_async (1);
d6b0e80f
AC
1295}
1296
ced2dffb
PA
1297/* Get pending signal of THREAD as a host signal number, for detaching
1298 purposes. This is the signal the thread last stopped for, which we
1299 need to deliver to the thread when detaching, otherwise, it'd be
1300 suppressed/lost. */
1301
a0ef4274 1302static int
ced2dffb 1303get_detach_signal (struct lwp_info *lp)
a0ef4274 1304{
a493e3e2 1305 enum gdb_signal signo = GDB_SIGNAL_0;
ca2163eb
PA
1306
1307 /* If we paused threads momentarily, we may have stored pending
1308 events in lp->status or lp->waitstatus (see stop_wait_callback),
1309 and GDB core hasn't seen any signal for those threads.
1310 Otherwise, the last signal reported to the core is found in the
1311 thread object's stop_signal.
1312
1313 There's a corner case that isn't handled here at present. Only
1314 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1315 stop_signal make sense as a real signal to pass to the inferior.
1316 Some catchpoint related events, like
1317 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
a493e3e2 1318 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
ca2163eb
PA
1319 those traps are debug API (ptrace in our case) related and
1320 induced; the inferior wouldn't see them if it wasn't being
1321 traced. Hence, we should never pass them to the inferior, even
1322 when set to pass state. Since this corner case isn't handled by
1323 infrun.c when proceeding with a signal, for consistency, neither
1324 do we handle it here (or elsewhere in the file we check for
1325 signal pass state). Normally SIGTRAP isn't set to pass state, so
1326 this is really a corner case. */
1327
1328 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
a493e3e2 1329 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
ca2163eb 1330 else if (lp->status)
2ea28649 1331 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
fbea99ea 1332 else if (target_is_non_stop_p () && !is_executing (lp->ptid))
ca2163eb
PA
1333 {
1334 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1335
72b049d3
PA
1336 if (tp->suspend.waitstatus_pending_p)
1337 signo = tp->suspend.waitstatus.value.sig;
1338 else
1339 signo = tp->suspend.stop_signal;
ca2163eb 1340 }
fbea99ea 1341 else if (!target_is_non_stop_p ())
a0ef4274 1342 {
ca2163eb
PA
1343 struct target_waitstatus last;
1344 ptid_t last_ptid;
4c28f408 1345
ca2163eb 1346 get_last_target_status (&last_ptid, &last);
4c28f408 1347
dfd4cc63 1348 if (ptid_get_lwp (lp->ptid) == ptid_get_lwp (last_ptid))
ca2163eb 1349 {
e09875d4 1350 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1351
16c381f0 1352 signo = tp->suspend.stop_signal;
4c28f408 1353 }
ca2163eb 1354 }
4c28f408 1355
a493e3e2 1356 if (signo == GDB_SIGNAL_0)
ca2163eb
PA
1357 {
1358 if (debug_linux_nat)
1359 fprintf_unfiltered (gdb_stdlog,
1360 "GPT: lwp %s has no pending signal\n",
1361 target_pid_to_str (lp->ptid));
1362 }
1363 else if (!signal_pass_state (signo))
1364 {
1365 if (debug_linux_nat)
3e43a32a
MS
1366 fprintf_unfiltered (gdb_stdlog,
1367 "GPT: lwp %s had signal %s, "
1368 "but it is in no pass state\n",
ca2163eb 1369 target_pid_to_str (lp->ptid),
2ea28649 1370 gdb_signal_to_string (signo));
a0ef4274 1371 }
a0ef4274 1372 else
4c28f408 1373 {
ca2163eb
PA
1374 if (debug_linux_nat)
1375 fprintf_unfiltered (gdb_stdlog,
1376 "GPT: lwp %s has pending signal %s\n",
1377 target_pid_to_str (lp->ptid),
2ea28649 1378 gdb_signal_to_string (signo));
ced2dffb
PA
1379
1380 return gdb_signal_to_host (signo);
4c28f408 1381 }
a0ef4274
DJ
1382
1383 return 0;
1384}
1385
ced2dffb
PA
1386/* Detach from LP. If SIGNO_P is non-NULL, then it points to the
1387 signal number that should be passed to the LWP when detaching.
1388 Otherwise pass any pending signal the LWP may have, if any. */
1389
1390static void
1391detach_one_lwp (struct lwp_info *lp, int *signo_p)
d6b0e80f 1392{
ced2dffb
PA
1393 int lwpid = ptid_get_lwp (lp->ptid);
1394 int signo;
1395
d6b0e80f
AC
1396 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1397
1398 if (debug_linux_nat && lp->status)
1399 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1400 strsignal (WSTOPSIG (lp->status)),
1401 target_pid_to_str (lp->ptid));
1402
a0ef4274
DJ
1403 /* If there is a pending SIGSTOP, get rid of it. */
1404 if (lp->signalled)
d6b0e80f 1405 {
d6b0e80f
AC
1406 if (debug_linux_nat)
1407 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1408 "DC: Sending SIGCONT to %s\n",
1409 target_pid_to_str (lp->ptid));
d6b0e80f 1410
ced2dffb 1411 kill_lwp (lwpid, SIGCONT);
d6b0e80f 1412 lp->signalled = 0;
d6b0e80f
AC
1413 }
1414
ced2dffb 1415 if (signo_p == NULL)
d6b0e80f 1416 {
a0ef4274 1417 /* Pass on any pending signal for this LWP. */
ced2dffb
PA
1418 signo = get_detach_signal (lp);
1419 }
1420 else
1421 signo = *signo_p;
a0ef4274 1422
ced2dffb
PA
1423 /* Preparing to resume may try to write registers, and fail if the
1424 lwp is zombie. If that happens, ignore the error. We'll handle
1425 it below, when detach fails with ESRCH. */
1426 TRY
1427 {
7b50312a
PA
1428 if (linux_nat_prepare_to_resume != NULL)
1429 linux_nat_prepare_to_resume (lp);
ced2dffb
PA
1430 }
1431 CATCH (ex, RETURN_MASK_ERROR)
1432 {
1433 if (!check_ptrace_stopped_lwp_gone (lp))
1434 throw_exception (ex);
1435 }
1436 END_CATCH
d6b0e80f 1437
ced2dffb
PA
1438 if (ptrace (PTRACE_DETACH, lwpid, 0, signo) < 0)
1439 {
1440 int save_errno = errno;
1441
1442 /* We know the thread exists, so ESRCH must mean the lwp is
1443 zombie. This can happen if one of the already-detached
1444 threads exits the whole thread group. In that case we're
1445 still attached, and must reap the lwp. */
1446 if (save_errno == ESRCH)
1447 {
1448 int ret, status;
d6b0e80f 1449
ced2dffb
PA
1450 ret = my_waitpid (lwpid, &status, __WALL);
1451 if (ret == -1)
1452 {
1453 warning (_("Couldn't reap LWP %d while detaching: %s"),
1454 lwpid, strerror (errno));
1455 }
1456 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1457 {
1458 warning (_("Reaping LWP %d while detaching "
1459 "returned unexpected status 0x%x"),
1460 lwpid, status);
1461 }
1462 }
1463 else
1464 {
1465 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1466 safe_strerror (save_errno));
1467 }
d6b0e80f 1468 }
ced2dffb
PA
1469 else if (debug_linux_nat)
1470 {
1471 fprintf_unfiltered (gdb_stdlog,
1472 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1473 target_pid_to_str (lp->ptid),
1474 strsignal (signo));
1475 }
1476
1477 delete_lwp (lp->ptid);
1478}
d6b0e80f 1479
ced2dffb
PA
1480static int
1481detach_callback (struct lwp_info *lp, void *data)
1482{
1483 /* We don't actually detach from the thread group leader just yet.
1484 If the thread group exits, we must reap the zombie clone lwps
1485 before we're able to reap the leader. */
1486 if (ptid_get_lwp (lp->ptid) != ptid_get_pid (lp->ptid))
1487 detach_one_lwp (lp, NULL);
d6b0e80f
AC
1488 return 0;
1489}
1490
f6ac5f3d
PA
1491void
1492linux_nat_target::detach (inferior *inf, int from_tty)
d6b0e80f 1493{
d90e17a7 1494 struct lwp_info *main_lwp;
bc09b0c1 1495 int pid = inf->pid;
a0ef4274 1496
ae5e0686
MK
1497 /* Don't unregister from the event loop, as there may be other
1498 inferiors running. */
b84876c2 1499
4c28f408
PA
1500 /* Stop all threads before detaching. ptrace requires that the
1501 thread is stopped to sucessfully detach. */
d90e17a7 1502 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
4c28f408
PA
1503 /* ... and wait until all of them have reported back that
1504 they're no longer running. */
d90e17a7 1505 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
4c28f408 1506
d90e17a7 1507 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
d6b0e80f
AC
1508
1509 /* Only the initial process should be left right now. */
bc09b0c1 1510 gdb_assert (num_lwps (pid) == 1);
d90e17a7
PA
1511
1512 main_lwp = find_lwp_pid (pid_to_ptid (pid));
d6b0e80f 1513
7a7d3353
PA
1514 if (forks_exist_p ())
1515 {
1516 /* Multi-fork case. The current inferior_ptid is being detached
1517 from, but there are other viable forks to debug. Detach from
1518 the current fork, and context-switch to the first
1519 available. */
6bd6f3b6 1520 linux_fork_detach (from_tty);
7a7d3353
PA
1521 }
1522 else
ced2dffb 1523 {
ced2dffb
PA
1524 target_announce_detach (from_tty);
1525
6bd6f3b6
SM
1526 /* Pass on any pending signal for the last LWP. */
1527 int signo = get_detach_signal (main_lwp);
ced2dffb
PA
1528
1529 detach_one_lwp (main_lwp, &signo);
1530
f6ac5f3d 1531 detach_success (inf);
ced2dffb 1532 }
d6b0e80f
AC
1533}
1534
8a99810d
PA
1535/* Resume execution of the inferior process. If STEP is nonzero,
1536 single-step it. If SIGNAL is nonzero, give it that signal. */
1537
1538static void
23f238d3
PA
1539linux_resume_one_lwp_throw (struct lwp_info *lp, int step,
1540 enum gdb_signal signo)
8a99810d 1541{
8a99810d 1542 lp->step = step;
9c02b525
PA
1543
1544 /* stop_pc doubles as the PC the LWP had when it was last resumed.
1545 We only presently need that if the LWP is stepped though (to
1546 handle the case of stepping a breakpoint instruction). */
1547 if (step)
1548 {
1549 struct regcache *regcache = get_thread_regcache (lp->ptid);
1550
1551 lp->stop_pc = regcache_read_pc (regcache);
1552 }
1553 else
1554 lp->stop_pc = 0;
1555
8a99810d
PA
1556 if (linux_nat_prepare_to_resume != NULL)
1557 linux_nat_prepare_to_resume (lp);
f6ac5f3d 1558 linux_target->low_resume (lp->ptid, step, signo);
23f238d3
PA
1559
1560 /* Successfully resumed. Clear state that no longer makes sense,
1561 and mark the LWP as running. Must not do this before resuming
1562 otherwise if that fails other code will be confused. E.g., we'd
1563 later try to stop the LWP and hang forever waiting for a stop
1564 status. Note that we must not throw after this is cleared,
1565 otherwise handle_zombie_lwp_error would get confused. */
8a99810d 1566 lp->stopped = 0;
1ad3de98 1567 lp->core = -1;
23f238d3 1568 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8a99810d
PA
1569 registers_changed_ptid (lp->ptid);
1570}
1571
23f238d3
PA
1572/* Called when we try to resume a stopped LWP and that errors out. If
1573 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
1574 or about to become), discard the error, clear any pending status
1575 the LWP may have, and return true (we'll collect the exit status
1576 soon enough). Otherwise, return false. */
1577
1578static int
1579check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
1580{
1581 /* If we get an error after resuming the LWP successfully, we'd
1582 confuse !T state for the LWP being gone. */
1583 gdb_assert (lp->stopped);
1584
1585 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
1586 because even if ptrace failed with ESRCH, the tracee may be "not
1587 yet fully dead", but already refusing ptrace requests. In that
1588 case the tracee has 'R (Running)' state for a little bit
1589 (observed in Linux 3.18). See also the note on ESRCH in the
1590 ptrace(2) man page. Instead, check whether the LWP has any state
1591 other than ptrace-stopped. */
1592
1593 /* Don't assume anything if /proc/PID/status can't be read. */
1594 if (linux_proc_pid_is_trace_stopped_nowarn (ptid_get_lwp (lp->ptid)) == 0)
1595 {
1596 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1597 lp->status = 0;
1598 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1599 return 1;
1600 }
1601 return 0;
1602}
1603
1604/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
1605 disappears while we try to resume it. */
1606
1607static void
1608linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1609{
1610 TRY
1611 {
1612 linux_resume_one_lwp_throw (lp, step, signo);
1613 }
1614 CATCH (ex, RETURN_MASK_ERROR)
1615 {
1616 if (!check_ptrace_stopped_lwp_gone (lp))
1617 throw_exception (ex);
1618 }
1619 END_CATCH
1620}
1621
d6b0e80f
AC
1622/* Resume LP. */
1623
25289eb2 1624static void
e5ef252a 1625resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
d6b0e80f 1626{
25289eb2 1627 if (lp->stopped)
6c95b8df 1628 {
c9657e70 1629 struct inferior *inf = find_inferior_ptid (lp->ptid);
25289eb2
PA
1630
1631 if (inf->vfork_child != NULL)
1632 {
1633 if (debug_linux_nat)
1634 fprintf_unfiltered (gdb_stdlog,
1635 "RC: Not resuming %s (vfork parent)\n",
1636 target_pid_to_str (lp->ptid));
1637 }
8a99810d 1638 else if (!lwp_status_pending_p (lp))
25289eb2
PA
1639 {
1640 if (debug_linux_nat)
1641 fprintf_unfiltered (gdb_stdlog,
e5ef252a
PA
1642 "RC: Resuming sibling %s, %s, %s\n",
1643 target_pid_to_str (lp->ptid),
1644 (signo != GDB_SIGNAL_0
1645 ? strsignal (gdb_signal_to_host (signo))
1646 : "0"),
1647 step ? "step" : "resume");
25289eb2 1648
8a99810d 1649 linux_resume_one_lwp (lp, step, signo);
25289eb2
PA
1650 }
1651 else
1652 {
1653 if (debug_linux_nat)
1654 fprintf_unfiltered (gdb_stdlog,
1655 "RC: Not resuming sibling %s (has pending)\n",
1656 target_pid_to_str (lp->ptid));
1657 }
6c95b8df 1658 }
25289eb2 1659 else
d6b0e80f 1660 {
d90e17a7
PA
1661 if (debug_linux_nat)
1662 fprintf_unfiltered (gdb_stdlog,
25289eb2 1663 "RC: Not resuming sibling %s (not stopped)\n",
d6b0e80f 1664 target_pid_to_str (lp->ptid));
d6b0e80f 1665 }
25289eb2 1666}
d6b0e80f 1667
8817a6f2
PA
1668/* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1669 Resume LWP with the last stop signal, if it is in pass state. */
e5ef252a 1670
25289eb2 1671static int
8817a6f2 1672linux_nat_resume_callback (struct lwp_info *lp, void *except)
25289eb2 1673{
e5ef252a
PA
1674 enum gdb_signal signo = GDB_SIGNAL_0;
1675
8817a6f2
PA
1676 if (lp == except)
1677 return 0;
1678
e5ef252a
PA
1679 if (lp->stopped)
1680 {
1681 struct thread_info *thread;
1682
1683 thread = find_thread_ptid (lp->ptid);
1684 if (thread != NULL)
1685 {
70509625 1686 signo = thread->suspend.stop_signal;
e5ef252a
PA
1687 thread->suspend.stop_signal = GDB_SIGNAL_0;
1688 }
1689 }
1690
1691 resume_lwp (lp, 0, signo);
d6b0e80f
AC
1692 return 0;
1693}
1694
1695static int
1696resume_clear_callback (struct lwp_info *lp, void *data)
1697{
1698 lp->resumed = 0;
25289eb2 1699 lp->last_resume_kind = resume_stop;
d6b0e80f
AC
1700 return 0;
1701}
1702
1703static int
1704resume_set_callback (struct lwp_info *lp, void *data)
1705{
1706 lp->resumed = 1;
25289eb2 1707 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1708 return 0;
1709}
1710
f6ac5f3d
PA
1711void
1712linux_nat_target::resume (ptid_t ptid, int step, enum gdb_signal signo)
d6b0e80f
AC
1713{
1714 struct lwp_info *lp;
d90e17a7 1715 int resume_many;
d6b0e80f 1716
76f50ad1
DJ
1717 if (debug_linux_nat)
1718 fprintf_unfiltered (gdb_stdlog,
1719 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1720 step ? "step" : "resume",
1721 target_pid_to_str (ptid),
a493e3e2 1722 (signo != GDB_SIGNAL_0
2ea28649 1723 ? strsignal (gdb_signal_to_host (signo)) : "0"),
76f50ad1
DJ
1724 target_pid_to_str (inferior_ptid));
1725
d6b0e80f 1726 /* A specific PTID means `step only this process id'. */
d90e17a7
PA
1727 resume_many = (ptid_equal (minus_one_ptid, ptid)
1728 || ptid_is_pid (ptid));
4c28f408 1729
e3e9f5a2
PA
1730 /* Mark the lwps we're resuming as resumed. */
1731 iterate_over_lwps (ptid, resume_set_callback, NULL);
d6b0e80f 1732
d90e17a7
PA
1733 /* See if it's the current inferior that should be handled
1734 specially. */
1735 if (resume_many)
1736 lp = find_lwp_pid (inferior_ptid);
1737 else
1738 lp = find_lwp_pid (ptid);
9f0bdab8 1739 gdb_assert (lp != NULL);
d6b0e80f 1740
9f0bdab8 1741 /* Remember if we're stepping. */
25289eb2 1742 lp->last_resume_kind = step ? resume_step : resume_continue;
d6b0e80f 1743
9f0bdab8
DJ
1744 /* If we have a pending wait status for this thread, there is no
1745 point in resuming the process. But first make sure that
1746 linux_nat_wait won't preemptively handle the event - we
1747 should never take this short-circuit if we are going to
1748 leave LP running, since we have skipped resuming all the
1749 other threads. This bit of code needs to be synchronized
1750 with linux_nat_wait. */
76f50ad1 1751
9f0bdab8
DJ
1752 if (lp->status && WIFSTOPPED (lp->status))
1753 {
2455069d
UW
1754 if (!lp->step
1755 && WSTOPSIG (lp->status)
1756 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 1757 {
9f0bdab8
DJ
1758 if (debug_linux_nat)
1759 fprintf_unfiltered (gdb_stdlog,
1760 "LLR: Not short circuiting for ignored "
1761 "status 0x%x\n", lp->status);
1762
d6b0e80f
AC
1763 /* FIXME: What should we do if we are supposed to continue
1764 this thread with a signal? */
a493e3e2 1765 gdb_assert (signo == GDB_SIGNAL_0);
2ea28649 1766 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
1767 lp->status = 0;
1768 }
1769 }
76f50ad1 1770
8a99810d 1771 if (lwp_status_pending_p (lp))
9f0bdab8
DJ
1772 {
1773 /* FIXME: What should we do if we are supposed to continue
1774 this thread with a signal? */
a493e3e2 1775 gdb_assert (signo == GDB_SIGNAL_0);
76f50ad1 1776
9f0bdab8
DJ
1777 if (debug_linux_nat)
1778 fprintf_unfiltered (gdb_stdlog,
1779 "LLR: Short circuiting for status 0x%x\n",
1780 lp->status);
d6b0e80f 1781
7feb7d06
PA
1782 if (target_can_async_p ())
1783 {
6a3753b3 1784 target_async (1);
7feb7d06
PA
1785 /* Tell the event loop we have something to process. */
1786 async_file_mark ();
1787 }
9f0bdab8 1788 return;
d6b0e80f
AC
1789 }
1790
d90e17a7 1791 if (resume_many)
8817a6f2 1792 iterate_over_lwps (ptid, linux_nat_resume_callback, lp);
d90e17a7 1793
d6b0e80f
AC
1794 if (debug_linux_nat)
1795 fprintf_unfiltered (gdb_stdlog,
1796 "LLR: %s %s, %s (resume event thread)\n",
1797 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2bf6fb9d 1798 target_pid_to_str (lp->ptid),
a493e3e2 1799 (signo != GDB_SIGNAL_0
2ea28649 1800 ? strsignal (gdb_signal_to_host (signo)) : "0"));
b84876c2 1801
2bf6fb9d
PA
1802 linux_resume_one_lwp (lp, step, signo);
1803
b84876c2 1804 if (target_can_async_p ())
6a3753b3 1805 target_async (1);
d6b0e80f
AC
1806}
1807
c5f62d5f 1808/* Send a signal to an LWP. */
d6b0e80f
AC
1809
1810static int
1811kill_lwp (int lwpid, int signo)
1812{
4a6ed09b 1813 int ret;
d6b0e80f 1814
4a6ed09b
PA
1815 errno = 0;
1816 ret = syscall (__NR_tkill, lwpid, signo);
1817 if (errno == ENOSYS)
1818 {
1819 /* If tkill fails, then we are not using nptl threads, a
1820 configuration we no longer support. */
1821 perror_with_name (("tkill"));
1822 }
1823 return ret;
d6b0e80f
AC
1824}
1825
ca2163eb
PA
1826/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1827 event, check if the core is interested in it: if not, ignore the
1828 event, and keep waiting; otherwise, we need to toggle the LWP's
1829 syscall entry/exit status, since the ptrace event itself doesn't
1830 indicate it, and report the trap to higher layers. */
1831
1832static int
1833linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1834{
1835 struct target_waitstatus *ourstatus = &lp->waitstatus;
1836 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
1837 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
1838
1839 if (stopping)
1840 {
1841 /* If we're stopping threads, there's a SIGSTOP pending, which
1842 makes it so that the LWP reports an immediate syscall return,
1843 followed by the SIGSTOP. Skip seeing that "return" using
1844 PTRACE_CONT directly, and let stop_wait_callback collect the
1845 SIGSTOP. Later when the thread is resumed, a new syscall
1846 entry event. If we didn't do this (and returned 0), we'd
1847 leave a syscall entry pending, and our caller, by using
1848 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1849 itself. Later, when the user re-resumes this LWP, we'd see
1850 another syscall entry event and we'd mistake it for a return.
1851
1852 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1853 (leaving immediately with LWP->signalled set, without issuing
1854 a PTRACE_CONT), it would still be problematic to leave this
1855 syscall enter pending, as later when the thread is resumed,
1856 it would then see the same syscall exit mentioned above,
1857 followed by the delayed SIGSTOP, while the syscall didn't
1858 actually get to execute. It seems it would be even more
1859 confusing to the user. */
1860
1861 if (debug_linux_nat)
1862 fprintf_unfiltered (gdb_stdlog,
1863 "LHST: ignoring syscall %d "
1864 "for LWP %ld (stopping threads), "
1865 "resuming with PTRACE_CONT for SIGSTOP\n",
1866 syscall_number,
dfd4cc63 1867 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1868
1869 lp->syscall_state = TARGET_WAITKIND_IGNORE;
dfd4cc63 1870 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
8817a6f2 1871 lp->stopped = 0;
ca2163eb
PA
1872 return 1;
1873 }
1874
bfd09d20
JS
1875 /* Always update the entry/return state, even if this particular
1876 syscall isn't interesting to the core now. In async mode,
1877 the user could install a new catchpoint for this syscall
1878 between syscall enter/return, and we'll need to know to
1879 report a syscall return if that happens. */
1880 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1881 ? TARGET_WAITKIND_SYSCALL_RETURN
1882 : TARGET_WAITKIND_SYSCALL_ENTRY);
1883
ca2163eb
PA
1884 if (catch_syscall_enabled ())
1885 {
ca2163eb
PA
1886 if (catching_syscall_number (syscall_number))
1887 {
1888 /* Alright, an event to report. */
1889 ourstatus->kind = lp->syscall_state;
1890 ourstatus->value.syscall_number = syscall_number;
1891
1892 if (debug_linux_nat)
1893 fprintf_unfiltered (gdb_stdlog,
1894 "LHST: stopping for %s of syscall %d"
1895 " for LWP %ld\n",
3e43a32a
MS
1896 lp->syscall_state
1897 == TARGET_WAITKIND_SYSCALL_ENTRY
ca2163eb
PA
1898 ? "entry" : "return",
1899 syscall_number,
dfd4cc63 1900 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1901 return 0;
1902 }
1903
1904 if (debug_linux_nat)
1905 fprintf_unfiltered (gdb_stdlog,
1906 "LHST: ignoring %s of syscall %d "
1907 "for LWP %ld\n",
1908 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1909 ? "entry" : "return",
1910 syscall_number,
dfd4cc63 1911 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1912 }
1913 else
1914 {
1915 /* If we had been syscall tracing, and hence used PT_SYSCALL
1916 before on this LWP, it could happen that the user removes all
1917 syscall catchpoints before we get to process this event.
1918 There are two noteworthy issues here:
1919
1920 - When stopped at a syscall entry event, resuming with
1921 PT_STEP still resumes executing the syscall and reports a
1922 syscall return.
1923
1924 - Only PT_SYSCALL catches syscall enters. If we last
1925 single-stepped this thread, then this event can't be a
1926 syscall enter. If we last single-stepped this thread, this
1927 has to be a syscall exit.
1928
1929 The points above mean that the next resume, be it PT_STEP or
1930 PT_CONTINUE, can not trigger a syscall trace event. */
1931 if (debug_linux_nat)
1932 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
1933 "LHST: caught syscall event "
1934 "with no syscall catchpoints."
ca2163eb
PA
1935 " %d for LWP %ld, ignoring\n",
1936 syscall_number,
dfd4cc63 1937 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1938 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1939 }
1940
1941 /* The core isn't interested in this event. For efficiency, avoid
1942 stopping all threads only to have the core resume them all again.
1943 Since we're not stopping threads, if we're still syscall tracing
1944 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1945 subsequent syscall. Simply resume using the inf-ptrace layer,
1946 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1947
8a99810d 1948 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
ca2163eb
PA
1949 return 1;
1950}
1951
3d799a95
DJ
1952/* Handle a GNU/Linux extended wait response. If we see a clone
1953 event, we need to add the new LWP to our list (and not report the
1954 trap to higher layers). This function returns non-zero if the
1955 event should be ignored and we should wait again. If STOPPING is
1956 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
1957
1958static int
4dd63d48 1959linux_handle_extended_wait (struct lwp_info *lp, int status)
d6b0e80f 1960{
dfd4cc63 1961 int pid = ptid_get_lwp (lp->ptid);
3d799a95 1962 struct target_waitstatus *ourstatus = &lp->waitstatus;
89a5711c 1963 int event = linux_ptrace_get_extended_event (status);
d6b0e80f 1964
bfd09d20
JS
1965 /* All extended events we currently use are mid-syscall. Only
1966 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
1967 you have to be using PTRACE_SEIZE to get that. */
1968 lp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
1969
3d799a95
DJ
1970 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1971 || event == PTRACE_EVENT_CLONE)
d6b0e80f 1972 {
3d799a95
DJ
1973 unsigned long new_pid;
1974 int ret;
1975
1976 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 1977
3d799a95
DJ
1978 /* If we haven't already seen the new PID stop, wait for it now. */
1979 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1980 {
1981 /* The new child has a pending SIGSTOP. We can't affect it until it
1982 hits the SIGSTOP, but we're already attached. */
4a6ed09b 1983 ret = my_waitpid (new_pid, &status, __WALL);
3d799a95
DJ
1984 if (ret == -1)
1985 perror_with_name (_("waiting for new child"));
1986 else if (ret != new_pid)
1987 internal_error (__FILE__, __LINE__,
1988 _("wait returned unexpected PID %d"), ret);
1989 else if (!WIFSTOPPED (status))
1990 internal_error (__FILE__, __LINE__,
1991 _("wait returned unexpected status 0x%x"), status);
1992 }
1993
3a3e9ee3 1994 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95 1995
26cb8b7c
PA
1996 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1997 {
1998 /* The arch-specific native code may need to know about new
1999 forks even if those end up never mapped to an
2000 inferior. */
2001 if (linux_nat_new_fork != NULL)
2002 linux_nat_new_fork (lp, new_pid);
2003 }
2004
2277426b 2005 if (event == PTRACE_EVENT_FORK
dfd4cc63 2006 && linux_fork_checkpointing_p (ptid_get_pid (lp->ptid)))
2277426b 2007 {
2277426b
PA
2008 /* Handle checkpointing by linux-fork.c here as a special
2009 case. We don't want the follow-fork-mode or 'catch fork'
2010 to interfere with this. */
2011
2012 /* This won't actually modify the breakpoint list, but will
2013 physically remove the breakpoints from the child. */
d80ee84f 2014 detach_breakpoints (ptid_build (new_pid, new_pid, 0));
2277426b
PA
2015
2016 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
2017 if (!find_fork_pid (new_pid))
2018 add_fork (new_pid);
2277426b
PA
2019
2020 /* Report as spurious, so that infrun doesn't want to follow
2021 this fork. We're actually doing an infcall in
2022 linux-fork.c. */
2023 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2277426b
PA
2024
2025 /* Report the stop to the core. */
2026 return 0;
2027 }
2028
3d799a95
DJ
2029 if (event == PTRACE_EVENT_FORK)
2030 ourstatus->kind = TARGET_WAITKIND_FORKED;
2031 else if (event == PTRACE_EVENT_VFORK)
2032 ourstatus->kind = TARGET_WAITKIND_VFORKED;
4dd63d48 2033 else if (event == PTRACE_EVENT_CLONE)
3d799a95 2034 {
78768c4a
JK
2035 struct lwp_info *new_lp;
2036
3d799a95 2037 ourstatus->kind = TARGET_WAITKIND_IGNORE;
78768c4a 2038
3c4d7e12
PA
2039 if (debug_linux_nat)
2040 fprintf_unfiltered (gdb_stdlog,
2041 "LHEW: Got clone event "
2042 "from LWP %d, new child is LWP %ld\n",
2043 pid, new_pid);
2044
dfd4cc63 2045 new_lp = add_lwp (ptid_build (ptid_get_pid (lp->ptid), new_pid, 0));
4c28f408 2046 new_lp->stopped = 1;
4dd63d48 2047 new_lp->resumed = 1;
d6b0e80f 2048
2db9a427
PA
2049 /* If the thread_db layer is active, let it record the user
2050 level thread id and status, and add the thread to GDB's
2051 list. */
2052 if (!thread_db_notice_clone (lp->ptid, new_lp->ptid))
3d799a95 2053 {
2db9a427
PA
2054 /* The process is not using thread_db. Add the LWP to
2055 GDB's list. */
2056 target_post_attach (ptid_get_lwp (new_lp->ptid));
2057 add_thread (new_lp->ptid);
2058 }
4c28f408 2059
2ee52aa4 2060 /* Even if we're stopping the thread for some reason
4dd63d48
PA
2061 internal to this module, from the perspective of infrun
2062 and the user/frontend, this new thread is running until
2063 it next reports a stop. */
2ee52aa4 2064 set_running (new_lp->ptid, 1);
4dd63d48 2065 set_executing (new_lp->ptid, 1);
4c28f408 2066
4dd63d48 2067 if (WSTOPSIG (status) != SIGSTOP)
79395f92 2068 {
4dd63d48
PA
2069 /* This can happen if someone starts sending signals to
2070 the new thread before it gets a chance to run, which
2071 have a lower number than SIGSTOP (e.g. SIGUSR1).
2072 This is an unlikely case, and harder to handle for
2073 fork / vfork than for clone, so we do not try - but
2074 we handle it for clone events here. */
2075
2076 new_lp->signalled = 1;
2077
79395f92
PA
2078 /* We created NEW_LP so it cannot yet contain STATUS. */
2079 gdb_assert (new_lp->status == 0);
2080
2081 /* Save the wait status to report later. */
2082 if (debug_linux_nat)
2083 fprintf_unfiltered (gdb_stdlog,
2084 "LHEW: waitpid of new LWP %ld, "
2085 "saving status %s\n",
dfd4cc63 2086 (long) ptid_get_lwp (new_lp->ptid),
79395f92
PA
2087 status_to_str (status));
2088 new_lp->status = status;
2089 }
aa01bd36
PA
2090 else if (report_thread_events)
2091 {
2092 new_lp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
2093 new_lp->status = status;
2094 }
79395f92 2095
3d799a95
DJ
2096 return 1;
2097 }
2098
2099 return 0;
d6b0e80f
AC
2100 }
2101
3d799a95
DJ
2102 if (event == PTRACE_EVENT_EXEC)
2103 {
a75724bc
PA
2104 if (debug_linux_nat)
2105 fprintf_unfiltered (gdb_stdlog,
2106 "LHEW: Got exec event from LWP %ld\n",
dfd4cc63 2107 ptid_get_lwp (lp->ptid));
a75724bc 2108
3d799a95
DJ
2109 ourstatus->kind = TARGET_WAITKIND_EXECD;
2110 ourstatus->value.execd_pathname
f6ac5f3d 2111 = xstrdup (linux_proc_pid_to_exec_file (pid));
3d799a95 2112
8af756ef
PA
2113 /* The thread that execed must have been resumed, but, when a
2114 thread execs, it changes its tid to the tgid, and the old
2115 tgid thread might have not been resumed. */
2116 lp->resumed = 1;
6c95b8df
PA
2117 return 0;
2118 }
2119
2120 if (event == PTRACE_EVENT_VFORK_DONE)
2121 {
2122 if (current_inferior ()->waiting_for_vfork_done)
3d799a95 2123 {
6c95b8df 2124 if (debug_linux_nat)
3e43a32a
MS
2125 fprintf_unfiltered (gdb_stdlog,
2126 "LHEW: Got expected PTRACE_EVENT_"
2127 "VFORK_DONE from LWP %ld: stopping\n",
dfd4cc63 2128 ptid_get_lwp (lp->ptid));
3d799a95 2129
6c95b8df
PA
2130 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2131 return 0;
3d799a95
DJ
2132 }
2133
6c95b8df 2134 if (debug_linux_nat)
3e43a32a
MS
2135 fprintf_unfiltered (gdb_stdlog,
2136 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
20ba1ce6 2137 "from LWP %ld: ignoring\n",
dfd4cc63 2138 ptid_get_lwp (lp->ptid));
6c95b8df 2139 return 1;
3d799a95
DJ
2140 }
2141
2142 internal_error (__FILE__, __LINE__,
2143 _("unknown ptrace event %d"), event);
d6b0e80f
AC
2144}
2145
9c3a5d93
PA
2146/* Suspend waiting for a signal. We're mostly interested in
2147 SIGCHLD/SIGINT. */
2148
2149static void
2150wait_for_signal ()
2151{
2152 if (debug_linux_nat)
2153 fprintf_unfiltered (gdb_stdlog, "linux-nat: about to sigsuspend\n");
2154 sigsuspend (&suspend_mask);
2155
2156 /* If the quit flag is set, it means that the user pressed Ctrl-C
2157 and we're debugging a process that is running on a separate
2158 terminal, so we must forward the Ctrl-C to the inferior. (If the
2159 inferior is sharing GDB's terminal, then the Ctrl-C reaches the
2160 inferior directly.) We must do this here because functions that
2161 need to block waiting for a signal loop forever until there's an
2162 event to report before returning back to the event loop. */
2163 if (!target_terminal::is_ours ())
2164 {
2165 if (check_quit_flag ())
2166 target_pass_ctrlc ();
2167 }
2168}
2169
d6b0e80f
AC
2170/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2171 exited. */
2172
2173static int
2174wait_lwp (struct lwp_info *lp)
2175{
2176 pid_t pid;
432b4d03 2177 int status = 0;
d6b0e80f 2178 int thread_dead = 0;
432b4d03 2179 sigset_t prev_mask;
d6b0e80f
AC
2180
2181 gdb_assert (!lp->stopped);
2182 gdb_assert (lp->status == 0);
2183
432b4d03
JK
2184 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2185 block_child_signals (&prev_mask);
2186
2187 for (;;)
d6b0e80f 2188 {
4a6ed09b 2189 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, __WALL | WNOHANG);
a9f4bb21
PA
2190 if (pid == -1 && errno == ECHILD)
2191 {
2192 /* The thread has previously exited. We need to delete it
4a6ed09b
PA
2193 now because if this was a non-leader thread execing, we
2194 won't get an exit event. See comments on exec events at
2195 the top of the file. */
a9f4bb21
PA
2196 thread_dead = 1;
2197 if (debug_linux_nat)
2198 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2199 target_pid_to_str (lp->ptid));
2200 }
432b4d03
JK
2201 if (pid != 0)
2202 break;
2203
2204 /* Bugs 10970, 12702.
2205 Thread group leader may have exited in which case we'll lock up in
2206 waitpid if there are other threads, even if they are all zombies too.
2207 Basically, we're not supposed to use waitpid this way.
4a6ed09b
PA
2208 tkill(pid,0) cannot be used here as it gets ESRCH for both
2209 for zombie and running processes.
432b4d03
JK
2210
2211 As a workaround, check if we're waiting for the thread group leader and
2212 if it's a zombie, and avoid calling waitpid if it is.
2213
2214 This is racy, what if the tgl becomes a zombie right after we check?
2215 Therefore always use WNOHANG with sigsuspend - it is equivalent to
5f572dec 2216 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
432b4d03 2217
dfd4cc63
LM
2218 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid)
2219 && linux_proc_pid_is_zombie (ptid_get_lwp (lp->ptid)))
d6b0e80f 2220 {
d6b0e80f
AC
2221 thread_dead = 1;
2222 if (debug_linux_nat)
432b4d03
JK
2223 fprintf_unfiltered (gdb_stdlog,
2224 "WL: Thread group leader %s vanished.\n",
d6b0e80f 2225 target_pid_to_str (lp->ptid));
432b4d03 2226 break;
d6b0e80f 2227 }
432b4d03
JK
2228
2229 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2230 get invoked despite our caller had them intentionally blocked by
2231 block_child_signals. This is sensitive only to the loop of
2232 linux_nat_wait_1 and there if we get called my_waitpid gets called
2233 again before it gets to sigsuspend so we can safely let the handlers
2234 get executed here. */
9c3a5d93 2235 wait_for_signal ();
432b4d03
JK
2236 }
2237
2238 restore_child_signals_mask (&prev_mask);
2239
d6b0e80f
AC
2240 if (!thread_dead)
2241 {
dfd4cc63 2242 gdb_assert (pid == ptid_get_lwp (lp->ptid));
d6b0e80f
AC
2243
2244 if (debug_linux_nat)
2245 {
2246 fprintf_unfiltered (gdb_stdlog,
2247 "WL: waitpid %s received %s\n",
2248 target_pid_to_str (lp->ptid),
2249 status_to_str (status));
2250 }
d6b0e80f 2251
a9f4bb21
PA
2252 /* Check if the thread has exited. */
2253 if (WIFEXITED (status) || WIFSIGNALED (status))
2254 {
aa01bd36
PA
2255 if (report_thread_events
2256 || ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid))
69dde7dc
PA
2257 {
2258 if (debug_linux_nat)
aa01bd36 2259 fprintf_unfiltered (gdb_stdlog, "WL: LWP %d exited.\n",
69dde7dc
PA
2260 ptid_get_pid (lp->ptid));
2261
aa01bd36 2262 /* If this is the leader exiting, it means the whole
69dde7dc
PA
2263 process is gone. Store the status to report to the
2264 core. Store it in lp->waitstatus, because lp->status
2265 would be ambiguous (W_EXITCODE(0,0) == 0). */
2266 store_waitstatus (&lp->waitstatus, status);
2267 return 0;
2268 }
2269
a9f4bb21
PA
2270 thread_dead = 1;
2271 if (debug_linux_nat)
2272 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2273 target_pid_to_str (lp->ptid));
2274 }
d6b0e80f
AC
2275 }
2276
2277 if (thread_dead)
2278 {
e26af52f 2279 exit_lwp (lp);
d6b0e80f
AC
2280 return 0;
2281 }
2282
2283 gdb_assert (WIFSTOPPED (status));
8817a6f2 2284 lp->stopped = 1;
d6b0e80f 2285
8784d563
PA
2286 if (lp->must_set_ptrace_flags)
2287 {
2288 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
de0d863e 2289 int options = linux_nat_ptrace_options (inf->attach_flag);
8784d563 2290
de0d863e 2291 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), options);
8784d563
PA
2292 lp->must_set_ptrace_flags = 0;
2293 }
2294
ca2163eb
PA
2295 /* Handle GNU/Linux's syscall SIGTRAPs. */
2296 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2297 {
2298 /* No longer need the sysgood bit. The ptrace event ends up
2299 recorded in lp->waitstatus if we care for it. We can carry
2300 on handling the event like a regular SIGTRAP from here
2301 on. */
2302 status = W_STOPCODE (SIGTRAP);
2303 if (linux_handle_syscall_trap (lp, 1))
2304 return wait_lwp (lp);
2305 }
bfd09d20
JS
2306 else
2307 {
2308 /* Almost all other ptrace-stops are known to be outside of system
2309 calls, with further exceptions in linux_handle_extended_wait. */
2310 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2311 }
ca2163eb 2312
d6b0e80f 2313 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2314 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2315 && linux_is_extended_waitstatus (status))
d6b0e80f
AC
2316 {
2317 if (debug_linux_nat)
2318 fprintf_unfiltered (gdb_stdlog,
2319 "WL: Handling extended status 0x%06x\n",
2320 status);
4dd63d48 2321 linux_handle_extended_wait (lp, status);
20ba1ce6 2322 return 0;
d6b0e80f
AC
2323 }
2324
2325 return status;
2326}
2327
2328/* Send a SIGSTOP to LP. */
2329
2330static int
2331stop_callback (struct lwp_info *lp, void *data)
2332{
2333 if (!lp->stopped && !lp->signalled)
2334 {
2335 int ret;
2336
2337 if (debug_linux_nat)
2338 {
2339 fprintf_unfiltered (gdb_stdlog,
2340 "SC: kill %s **<SIGSTOP>**\n",
2341 target_pid_to_str (lp->ptid));
2342 }
2343 errno = 0;
dfd4cc63 2344 ret = kill_lwp (ptid_get_lwp (lp->ptid), SIGSTOP);
d6b0e80f
AC
2345 if (debug_linux_nat)
2346 {
2347 fprintf_unfiltered (gdb_stdlog,
2348 "SC: lwp kill %d %s\n",
2349 ret,
2350 errno ? safe_strerror (errno) : "ERRNO-OK");
2351 }
2352
2353 lp->signalled = 1;
2354 gdb_assert (lp->status == 0);
2355 }
2356
2357 return 0;
2358}
2359
7b50312a
PA
2360/* Request a stop on LWP. */
2361
2362void
2363linux_stop_lwp (struct lwp_info *lwp)
2364{
2365 stop_callback (lwp, NULL);
2366}
2367
2db9a427
PA
2368/* See linux-nat.h */
2369
2370void
2371linux_stop_and_wait_all_lwps (void)
2372{
2373 /* Stop all LWP's ... */
2374 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
2375
2376 /* ... and wait until all of them have reported back that
2377 they're no longer running. */
2378 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
2379}
2380
2381/* See linux-nat.h */
2382
2383void
2384linux_unstop_all_lwps (void)
2385{
2386 iterate_over_lwps (minus_one_ptid,
2387 resume_stopped_resumed_lwps, &minus_one_ptid);
2388}
2389
57380f4e 2390/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2391
2392static int
57380f4e
DJ
2393linux_nat_has_pending_sigint (int pid)
2394{
2395 sigset_t pending, blocked, ignored;
57380f4e
DJ
2396
2397 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2398
2399 if (sigismember (&pending, SIGINT)
2400 && !sigismember (&ignored, SIGINT))
2401 return 1;
2402
2403 return 0;
2404}
2405
2406/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2407
2408static int
2409set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2410{
57380f4e
DJ
2411 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2412 flag to consume the next one. */
2413 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2414 && WSTOPSIG (lp->status) == SIGINT)
2415 lp->status = 0;
2416 else
2417 lp->ignore_sigint = 1;
2418
2419 return 0;
2420}
2421
2422/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2423 This function is called after we know the LWP has stopped; if the LWP
2424 stopped before the expected SIGINT was delivered, then it will never have
2425 arrived. Also, if the signal was delivered to a shared queue and consumed
2426 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2427
57380f4e
DJ
2428static void
2429maybe_clear_ignore_sigint (struct lwp_info *lp)
2430{
2431 if (!lp->ignore_sigint)
2432 return;
2433
dfd4cc63 2434 if (!linux_nat_has_pending_sigint (ptid_get_lwp (lp->ptid)))
57380f4e
DJ
2435 {
2436 if (debug_linux_nat)
2437 fprintf_unfiltered (gdb_stdlog,
2438 "MCIS: Clearing bogus flag for %s\n",
2439 target_pid_to_str (lp->ptid));
2440 lp->ignore_sigint = 0;
2441 }
2442}
2443
ebec9a0f
PA
2444/* Fetch the possible triggered data watchpoint info and store it in
2445 LP.
2446
2447 On some archs, like x86, that use debug registers to set
2448 watchpoints, it's possible that the way to know which watched
2449 address trapped, is to check the register that is used to select
2450 which address to watch. Problem is, between setting the watchpoint
2451 and reading back which data address trapped, the user may change
2452 the set of watchpoints, and, as a consequence, GDB changes the
2453 debug registers in the inferior. To avoid reading back a stale
2454 stopped-data-address when that happens, we cache in LP the fact
2455 that a watchpoint trapped, and the corresponding data address, as
2456 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2457 registers meanwhile, we have the cached data we can rely on. */
2458
9c02b525
PA
2459static int
2460check_stopped_by_watchpoint (struct lwp_info *lp)
ebec9a0f 2461{
2989a365 2462 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
ebec9a0f
PA
2463 inferior_ptid = lp->ptid;
2464
f6ac5f3d 2465 if (linux_target->low_stopped_by_watchpoint ())
ebec9a0f 2466 {
15c66dd6 2467 lp->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
f6ac5f3d
PA
2468 lp->stopped_data_address_p
2469 = linux_target->low_stopped_data_address (&lp->stopped_data_address);
ebec9a0f
PA
2470 }
2471
15c66dd6 2472 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
9c02b525
PA
2473}
2474
9c02b525 2475/* Returns true if the LWP had stopped for a watchpoint. */
ebec9a0f 2476
f6ac5f3d
PA
2477int
2478linux_nat_target::stopped_by_watchpoint ()
ebec9a0f
PA
2479{
2480 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2481
2482 gdb_assert (lp != NULL);
2483
15c66dd6 2484 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
ebec9a0f
PA
2485}
2486
f6ac5f3d
PA
2487int
2488linux_nat_target::stopped_data_address (CORE_ADDR *addr_p)
ebec9a0f
PA
2489{
2490 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2491
2492 gdb_assert (lp != NULL);
2493
2494 *addr_p = lp->stopped_data_address;
2495
2496 return lp->stopped_data_address_p;
2497}
2498
26ab7092
JK
2499/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2500
2501static int
2502sigtrap_is_event (int status)
2503{
2504 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2505}
2506
26ab7092
JK
2507/* Set alternative SIGTRAP-like events recognizer. If
2508 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2509 applied. */
2510
2511void
2512linux_nat_set_status_is_event (struct target_ops *t,
2513 int (*status_is_event) (int status))
2514{
2515 linux_nat_status_is_event = status_is_event;
2516}
2517
57380f4e
DJ
2518/* Wait until LP is stopped. */
2519
2520static int
2521stop_wait_callback (struct lwp_info *lp, void *data)
2522{
c9657e70 2523 struct inferior *inf = find_inferior_ptid (lp->ptid);
6c95b8df
PA
2524
2525 /* If this is a vfork parent, bail out, it is not going to report
2526 any SIGSTOP until the vfork is done with. */
2527 if (inf->vfork_child != NULL)
2528 return 0;
2529
d6b0e80f
AC
2530 if (!lp->stopped)
2531 {
2532 int status;
2533
2534 status = wait_lwp (lp);
2535 if (status == 0)
2536 return 0;
2537
57380f4e
DJ
2538 if (lp->ignore_sigint && WIFSTOPPED (status)
2539 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2540 {
57380f4e 2541 lp->ignore_sigint = 0;
d6b0e80f
AC
2542
2543 errno = 0;
dfd4cc63 2544 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
8817a6f2 2545 lp->stopped = 0;
d6b0e80f
AC
2546 if (debug_linux_nat)
2547 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2548 "PTRACE_CONT %s, 0, 0 (%s) "
2549 "(discarding SIGINT)\n",
d6b0e80f
AC
2550 target_pid_to_str (lp->ptid),
2551 errno ? safe_strerror (errno) : "OK");
2552
57380f4e 2553 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2554 }
2555
57380f4e
DJ
2556 maybe_clear_ignore_sigint (lp);
2557
d6b0e80f
AC
2558 if (WSTOPSIG (status) != SIGSTOP)
2559 {
e5ef252a 2560 /* The thread was stopped with a signal other than SIGSTOP. */
7feb7d06 2561
e5ef252a
PA
2562 if (debug_linux_nat)
2563 fprintf_unfiltered (gdb_stdlog,
2564 "SWC: Pending event %s in %s\n",
2565 status_to_str ((int) status),
2566 target_pid_to_str (lp->ptid));
2567
2568 /* Save the sigtrap event. */
2569 lp->status = status;
e5ef252a 2570 gdb_assert (lp->signalled);
e7ad2f14 2571 save_stop_reason (lp);
d6b0e80f
AC
2572 }
2573 else
2574 {
2575 /* We caught the SIGSTOP that we intended to catch, so
2576 there's no SIGSTOP pending. */
e5ef252a
PA
2577
2578 if (debug_linux_nat)
2579 fprintf_unfiltered (gdb_stdlog,
2bf6fb9d 2580 "SWC: Expected SIGSTOP caught for %s.\n",
e5ef252a
PA
2581 target_pid_to_str (lp->ptid));
2582
e5ef252a
PA
2583 /* Reset SIGNALLED only after the stop_wait_callback call
2584 above as it does gdb_assert on SIGNALLED. */
d6b0e80f
AC
2585 lp->signalled = 0;
2586 }
2587 }
2588
2589 return 0;
2590}
2591
9c02b525
PA
2592/* Return non-zero if LP has a wait status pending. Discard the
2593 pending event and resume the LWP if the event that originally
2594 caused the stop became uninteresting. */
d6b0e80f
AC
2595
2596static int
2597status_callback (struct lwp_info *lp, void *data)
2598{
2599 /* Only report a pending wait status if we pretend that this has
2600 indeed been resumed. */
ca2163eb
PA
2601 if (!lp->resumed)
2602 return 0;
2603
eb54c8bf
PA
2604 if (!lwp_status_pending_p (lp))
2605 return 0;
2606
15c66dd6
PA
2607 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
2608 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
9c02b525
PA
2609 {
2610 struct regcache *regcache = get_thread_regcache (lp->ptid);
9c02b525
PA
2611 CORE_ADDR pc;
2612 int discard = 0;
2613
9c02b525
PA
2614 pc = regcache_read_pc (regcache);
2615
2616 if (pc != lp->stop_pc)
2617 {
2618 if (debug_linux_nat)
2619 fprintf_unfiltered (gdb_stdlog,
2620 "SC: PC of %s changed. was=%s, now=%s\n",
2621 target_pid_to_str (lp->ptid),
2622 paddress (target_gdbarch (), lp->stop_pc),
2623 paddress (target_gdbarch (), pc));
2624 discard = 1;
2625 }
faf09f01
PA
2626
2627#if !USE_SIGTRAP_SIGINFO
a01bda52 2628 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
9c02b525
PA
2629 {
2630 if (debug_linux_nat)
2631 fprintf_unfiltered (gdb_stdlog,
2632 "SC: previous breakpoint of %s, at %s gone\n",
2633 target_pid_to_str (lp->ptid),
2634 paddress (target_gdbarch (), lp->stop_pc));
2635
2636 discard = 1;
2637 }
faf09f01 2638#endif
9c02b525
PA
2639
2640 if (discard)
2641 {
2642 if (debug_linux_nat)
2643 fprintf_unfiltered (gdb_stdlog,
2644 "SC: pending event of %s cancelled.\n",
2645 target_pid_to_str (lp->ptid));
2646
2647 lp->status = 0;
2648 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2649 return 0;
2650 }
9c02b525
PA
2651 }
2652
eb54c8bf 2653 return 1;
d6b0e80f
AC
2654}
2655
d6b0e80f
AC
2656/* Count the LWP's that have had events. */
2657
2658static int
2659count_events_callback (struct lwp_info *lp, void *data)
2660{
9a3c8263 2661 int *count = (int *) data;
d6b0e80f
AC
2662
2663 gdb_assert (count != NULL);
2664
9c02b525
PA
2665 /* Select only resumed LWPs that have an event pending. */
2666 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2667 (*count)++;
2668
2669 return 0;
2670}
2671
2672/* Select the LWP (if any) that is currently being single-stepped. */
2673
2674static int
2675select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2676{
25289eb2
PA
2677 if (lp->last_resume_kind == resume_step
2678 && lp->status != 0)
d6b0e80f
AC
2679 return 1;
2680 else
2681 return 0;
2682}
2683
8a99810d
PA
2684/* Returns true if LP has a status pending. */
2685
2686static int
2687lwp_status_pending_p (struct lwp_info *lp)
2688{
2689 /* We check for lp->waitstatus in addition to lp->status, because we
2690 can have pending process exits recorded in lp->status and
2691 W_EXITCODE(0,0) happens to be 0. */
2692 return lp->status != 0 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE;
2693}
2694
b90fc188 2695/* Select the Nth LWP that has had an event. */
d6b0e80f
AC
2696
2697static int
2698select_event_lwp_callback (struct lwp_info *lp, void *data)
2699{
9a3c8263 2700 int *selector = (int *) data;
d6b0e80f
AC
2701
2702 gdb_assert (selector != NULL);
2703
9c02b525
PA
2704 /* Select only resumed LWPs that have an event pending. */
2705 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2706 if ((*selector)-- == 0)
2707 return 1;
2708
2709 return 0;
2710}
2711
e7ad2f14
PA
2712/* Called when the LWP stopped for a signal/trap. If it stopped for a
2713 trap check what caused it (breakpoint, watchpoint, trace, etc.),
2714 and save the result in the LWP's stop_reason field. If it stopped
2715 for a breakpoint, decrement the PC if necessary on the lwp's
2716 architecture. */
9c02b525 2717
e7ad2f14
PA
2718static void
2719save_stop_reason (struct lwp_info *lp)
710151dd 2720{
e7ad2f14
PA
2721 struct regcache *regcache;
2722 struct gdbarch *gdbarch;
515630c5 2723 CORE_ADDR pc;
9c02b525 2724 CORE_ADDR sw_bp_pc;
faf09f01
PA
2725#if USE_SIGTRAP_SIGINFO
2726 siginfo_t siginfo;
2727#endif
9c02b525 2728
e7ad2f14
PA
2729 gdb_assert (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON);
2730 gdb_assert (lp->status != 0);
2731
2732 if (!linux_nat_status_is_event (lp->status))
2733 return;
2734
2735 regcache = get_thread_regcache (lp->ptid);
ac7936df 2736 gdbarch = regcache->arch ();
e7ad2f14 2737
9c02b525 2738 pc = regcache_read_pc (regcache);
527a273a 2739 sw_bp_pc = pc - gdbarch_decr_pc_after_break (gdbarch);
515630c5 2740
faf09f01
PA
2741#if USE_SIGTRAP_SIGINFO
2742 if (linux_nat_get_siginfo (lp->ptid, &siginfo))
2743 {
2744 if (siginfo.si_signo == SIGTRAP)
2745 {
e7ad2f14
PA
2746 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
2747 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
faf09f01 2748 {
e7ad2f14
PA
2749 /* The si_code is ambiguous on this arch -- check debug
2750 registers. */
2751 if (!check_stopped_by_watchpoint (lp))
2752 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2753 }
2754 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
2755 {
2756 /* If we determine the LWP stopped for a SW breakpoint,
2757 trust it. Particularly don't check watchpoint
2758 registers, because at least on s390, we'd find
2759 stopped-by-watchpoint as long as there's a watchpoint
2760 set. */
faf09f01 2761 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
faf09f01 2762 }
e7ad2f14 2763 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
faf09f01 2764 {
e7ad2f14
PA
2765 /* This can indicate either a hardware breakpoint or
2766 hardware watchpoint. Check debug registers. */
2767 if (!check_stopped_by_watchpoint (lp))
2768 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
faf09f01 2769 }
2bf6fb9d
PA
2770 else if (siginfo.si_code == TRAP_TRACE)
2771 {
2772 if (debug_linux_nat)
2773 fprintf_unfiltered (gdb_stdlog,
2774 "CSBB: %s stopped by trace\n",
2775 target_pid_to_str (lp->ptid));
e7ad2f14
PA
2776
2777 /* We may have single stepped an instruction that
2778 triggered a watchpoint. In that case, on some
2779 architectures (such as x86), instead of TRAP_HWBKPT,
2780 si_code indicates TRAP_TRACE, and we need to check
2781 the debug registers separately. */
2782 check_stopped_by_watchpoint (lp);
2bf6fb9d 2783 }
faf09f01
PA
2784 }
2785 }
2786#else
9c02b525 2787 if ((!lp->step || lp->stop_pc == sw_bp_pc)
a01bda52 2788 && software_breakpoint_inserted_here_p (regcache->aspace (),
9c02b525 2789 sw_bp_pc))
710151dd 2790 {
9c02b525
PA
2791 /* The LWP was either continued, or stepped a software
2792 breakpoint instruction. */
e7ad2f14
PA
2793 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2794 }
2795
a01bda52 2796 if (hardware_breakpoint_inserted_here_p (regcache->aspace (), pc))
e7ad2f14
PA
2797 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2798
2799 if (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
2800 check_stopped_by_watchpoint (lp);
2801#endif
2802
2803 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2804 {
710151dd
PA
2805 if (debug_linux_nat)
2806 fprintf_unfiltered (gdb_stdlog,
2bf6fb9d 2807 "CSBB: %s stopped by software breakpoint\n",
710151dd
PA
2808 target_pid_to_str (lp->ptid));
2809
2810 /* Back up the PC if necessary. */
9c02b525
PA
2811 if (pc != sw_bp_pc)
2812 regcache_write_pc (regcache, sw_bp_pc);
515630c5 2813
e7ad2f14
PA
2814 /* Update this so we record the correct stop PC below. */
2815 pc = sw_bp_pc;
710151dd 2816 }
e7ad2f14 2817 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
9c02b525
PA
2818 {
2819 if (debug_linux_nat)
2820 fprintf_unfiltered (gdb_stdlog,
e7ad2f14
PA
2821 "CSBB: %s stopped by hardware breakpoint\n",
2822 target_pid_to_str (lp->ptid));
2823 }
2824 else if (lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
2825 {
2826 if (debug_linux_nat)
2827 fprintf_unfiltered (gdb_stdlog,
2828 "CSBB: %s stopped by hardware watchpoint\n",
9c02b525 2829 target_pid_to_str (lp->ptid));
9c02b525 2830 }
d6b0e80f 2831
e7ad2f14 2832 lp->stop_pc = pc;
d6b0e80f
AC
2833}
2834
faf09f01
PA
2835
2836/* Returns true if the LWP had stopped for a software breakpoint. */
2837
f6ac5f3d
PA
2838int
2839linux_nat_target::stopped_by_sw_breakpoint ()
faf09f01
PA
2840{
2841 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2842
2843 gdb_assert (lp != NULL);
2844
2845 return lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2846}
2847
2848/* Implement the supports_stopped_by_sw_breakpoint method. */
2849
f6ac5f3d
PA
2850int
2851linux_nat_target::supports_stopped_by_sw_breakpoint ()
faf09f01
PA
2852{
2853 return USE_SIGTRAP_SIGINFO;
2854}
2855
2856/* Returns true if the LWP had stopped for a hardware
2857 breakpoint/watchpoint. */
2858
f6ac5f3d
PA
2859int
2860linux_nat_target::stopped_by_hw_breakpoint ()
faf09f01
PA
2861{
2862 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2863
2864 gdb_assert (lp != NULL);
2865
2866 return lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2867}
2868
2869/* Implement the supports_stopped_by_hw_breakpoint method. */
2870
f6ac5f3d
PA
2871int
2872linux_nat_target::supports_stopped_by_hw_breakpoint ()
faf09f01
PA
2873{
2874 return USE_SIGTRAP_SIGINFO;
2875}
2876
d6b0e80f
AC
2877/* Select one LWP out of those that have events pending. */
2878
2879static void
d90e17a7 2880select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
2881{
2882 int num_events = 0;
2883 int random_selector;
9c02b525 2884 struct lwp_info *event_lp = NULL;
d6b0e80f 2885
ac264b3b 2886 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2887 (*orig_lp)->status = *status;
2888
9c02b525
PA
2889 /* In all-stop, give preference to the LWP that is being
2890 single-stepped. There will be at most one, and it will be the
2891 LWP that the core is most interested in. If we didn't do this,
2892 then we'd have to handle pending step SIGTRAPs somehow in case
2893 the core later continues the previously-stepped thread, as
2894 otherwise we'd report the pending SIGTRAP then, and the core, not
2895 having stepped the thread, wouldn't understand what the trap was
2896 for, and therefore would report it to the user as a random
2897 signal. */
fbea99ea 2898 if (!target_is_non_stop_p ())
d6b0e80f 2899 {
9c02b525
PA
2900 event_lp = iterate_over_lwps (filter,
2901 select_singlestep_lwp_callback, NULL);
2902 if (event_lp != NULL)
2903 {
2904 if (debug_linux_nat)
2905 fprintf_unfiltered (gdb_stdlog,
2906 "SEL: Select single-step %s\n",
2907 target_pid_to_str (event_lp->ptid));
2908 }
d6b0e80f 2909 }
9c02b525
PA
2910
2911 if (event_lp == NULL)
d6b0e80f 2912 {
9c02b525 2913 /* Pick one at random, out of those which have had events. */
d6b0e80f 2914
9c02b525 2915 /* First see how many events we have. */
d90e17a7 2916 iterate_over_lwps (filter, count_events_callback, &num_events);
8bf3b159 2917 gdb_assert (num_events > 0);
d6b0e80f 2918
9c02b525
PA
2919 /* Now randomly pick a LWP out of those that have had
2920 events. */
d6b0e80f
AC
2921 random_selector = (int)
2922 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2923
2924 if (debug_linux_nat && num_events > 1)
2925 fprintf_unfiltered (gdb_stdlog,
9c02b525 2926 "SEL: Found %d events, selecting #%d\n",
d6b0e80f
AC
2927 num_events, random_selector);
2928
d90e17a7
PA
2929 event_lp = iterate_over_lwps (filter,
2930 select_event_lwp_callback,
d6b0e80f
AC
2931 &random_selector);
2932 }
2933
2934 if (event_lp != NULL)
2935 {
2936 /* Switch the event LWP. */
2937 *orig_lp = event_lp;
2938 *status = event_lp->status;
2939 }
2940
2941 /* Flush the wait status for the event LWP. */
2942 (*orig_lp)->status = 0;
2943}
2944
2945/* Return non-zero if LP has been resumed. */
2946
2947static int
2948resumed_callback (struct lwp_info *lp, void *data)
2949{
2950 return lp->resumed;
2951}
2952
02f3fc28 2953/* Check if we should go on and pass this event to common code.
9c02b525 2954 Return the affected lwp if we are, or NULL otherwise. */
12d9289a 2955
02f3fc28 2956static struct lwp_info *
9c02b525 2957linux_nat_filter_event (int lwpid, int status)
02f3fc28
PA
2958{
2959 struct lwp_info *lp;
89a5711c 2960 int event = linux_ptrace_get_extended_event (status);
02f3fc28
PA
2961
2962 lp = find_lwp_pid (pid_to_ptid (lwpid));
2963
2964 /* Check for stop events reported by a process we didn't already
2965 know about - anything not already in our LWP list.
2966
2967 If we're expecting to receive stopped processes after
2968 fork, vfork, and clone events, then we'll just add the
2969 new one to our list and go back to waiting for the event
2970 to be reported - the stopped process might be returned
0e5bf2a8
PA
2971 from waitpid before or after the event is.
2972
2973 But note the case of a non-leader thread exec'ing after the
2974 leader having exited, and gone from our lists. The non-leader
2975 thread changes its tid to the tgid. */
2976
2977 if (WIFSTOPPED (status) && lp == NULL
89a5711c 2978 && (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC))
0e5bf2a8
PA
2979 {
2980 /* A multi-thread exec after we had seen the leader exiting. */
2981 if (debug_linux_nat)
2982 fprintf_unfiltered (gdb_stdlog,
2983 "LLW: Re-adding thread group leader LWP %d.\n",
2984 lwpid);
2985
dfd4cc63 2986 lp = add_lwp (ptid_build (lwpid, lwpid, 0));
0e5bf2a8
PA
2987 lp->stopped = 1;
2988 lp->resumed = 1;
2989 add_thread (lp->ptid);
2990 }
2991
02f3fc28
PA
2992 if (WIFSTOPPED (status) && !lp)
2993 {
3b27ef47
PA
2994 if (debug_linux_nat)
2995 fprintf_unfiltered (gdb_stdlog,
2996 "LHEW: saving LWP %ld status %s in stopped_pids list\n",
2997 (long) lwpid, status_to_str (status));
84636d28 2998 add_to_pid_list (&stopped_pids, lwpid, status);
02f3fc28
PA
2999 return NULL;
3000 }
3001
3002 /* Make sure we don't report an event for the exit of an LWP not in
1777feb0 3003 our list, i.e. not part of the current process. This can happen
fd62cb89 3004 if we detach from a program we originally forked and then it
02f3fc28
PA
3005 exits. */
3006 if (!WIFSTOPPED (status) && !lp)
3007 return NULL;
3008
8817a6f2
PA
3009 /* This LWP is stopped now. (And if dead, this prevents it from
3010 ever being continued.) */
3011 lp->stopped = 1;
3012
8784d563
PA
3013 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
3014 {
3015 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
de0d863e 3016 int options = linux_nat_ptrace_options (inf->attach_flag);
8784d563 3017
de0d863e 3018 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), options);
8784d563
PA
3019 lp->must_set_ptrace_flags = 0;
3020 }
3021
ca2163eb
PA
3022 /* Handle GNU/Linux's syscall SIGTRAPs. */
3023 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3024 {
3025 /* No longer need the sysgood bit. The ptrace event ends up
3026 recorded in lp->waitstatus if we care for it. We can carry
3027 on handling the event like a regular SIGTRAP from here
3028 on. */
3029 status = W_STOPCODE (SIGTRAP);
3030 if (linux_handle_syscall_trap (lp, 0))
3031 return NULL;
3032 }
bfd09d20
JS
3033 else
3034 {
3035 /* Almost all other ptrace-stops are known to be outside of system
3036 calls, with further exceptions in linux_handle_extended_wait. */
3037 lp->syscall_state = TARGET_WAITKIND_IGNORE;
3038 }
02f3fc28 3039
ca2163eb 3040 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
3041 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
3042 && linux_is_extended_waitstatus (status))
02f3fc28
PA
3043 {
3044 if (debug_linux_nat)
3045 fprintf_unfiltered (gdb_stdlog,
3046 "LLW: Handling extended status 0x%06x\n",
3047 status);
4dd63d48 3048 if (linux_handle_extended_wait (lp, status))
02f3fc28
PA
3049 return NULL;
3050 }
3051
3052 /* Check if the thread has exited. */
9c02b525
PA
3053 if (WIFEXITED (status) || WIFSIGNALED (status))
3054 {
aa01bd36
PA
3055 if (!report_thread_events
3056 && num_lwps (ptid_get_pid (lp->ptid)) > 1)
02f3fc28 3057 {
9c02b525
PA
3058 if (debug_linux_nat)
3059 fprintf_unfiltered (gdb_stdlog,
3060 "LLW: %s exited.\n",
3061 target_pid_to_str (lp->ptid));
3062
4a6ed09b
PA
3063 /* If there is at least one more LWP, then the exit signal
3064 was not the end of the debugged application and should be
3065 ignored. */
3066 exit_lwp (lp);
3067 return NULL;
02f3fc28
PA
3068 }
3069
77598427
PA
3070 /* Note that even if the leader was ptrace-stopped, it can still
3071 exit, if e.g., some other thread brings down the whole
3072 process (calls `exit'). So don't assert that the lwp is
3073 resumed. */
02f3fc28
PA
3074 if (debug_linux_nat)
3075 fprintf_unfiltered (gdb_stdlog,
aa01bd36 3076 "LWP %ld exited (resumed=%d)\n",
77598427 3077 ptid_get_lwp (lp->ptid), lp->resumed);
02f3fc28 3078
9c02b525
PA
3079 /* Dead LWP's aren't expected to reported a pending sigstop. */
3080 lp->signalled = 0;
3081
3082 /* Store the pending event in the waitstatus, because
3083 W_EXITCODE(0,0) == 0. */
3084 store_waitstatus (&lp->waitstatus, status);
3085 return lp;
02f3fc28
PA
3086 }
3087
02f3fc28
PA
3088 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3089 an attempt to stop an LWP. */
3090 if (lp->signalled
3091 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3092 {
02f3fc28
PA
3093 lp->signalled = 0;
3094
2bf6fb9d 3095 if (lp->last_resume_kind == resume_stop)
25289eb2 3096 {
2bf6fb9d
PA
3097 if (debug_linux_nat)
3098 fprintf_unfiltered (gdb_stdlog,
3099 "LLW: resume_stop SIGSTOP caught for %s.\n",
3100 target_pid_to_str (lp->ptid));
3101 }
3102 else
3103 {
3104 /* This is a delayed SIGSTOP. Filter out the event. */
02f3fc28 3105
25289eb2
PA
3106 if (debug_linux_nat)
3107 fprintf_unfiltered (gdb_stdlog,
2bf6fb9d 3108 "LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
25289eb2
PA
3109 lp->step ?
3110 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3111 target_pid_to_str (lp->ptid));
02f3fc28 3112
2bf6fb9d 3113 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
25289eb2 3114 gdb_assert (lp->resumed);
25289eb2
PA
3115 return NULL;
3116 }
02f3fc28
PA
3117 }
3118
57380f4e
DJ
3119 /* Make sure we don't report a SIGINT that we have already displayed
3120 for another thread. */
3121 if (lp->ignore_sigint
3122 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3123 {
3124 if (debug_linux_nat)
3125 fprintf_unfiltered (gdb_stdlog,
3126 "LLW: Delayed SIGINT caught for %s.\n",
3127 target_pid_to_str (lp->ptid));
3128
3129 /* This is a delayed SIGINT. */
3130 lp->ignore_sigint = 0;
3131
8a99810d 3132 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
57380f4e
DJ
3133 if (debug_linux_nat)
3134 fprintf_unfiltered (gdb_stdlog,
3135 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3136 lp->step ?
3137 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3138 target_pid_to_str (lp->ptid));
57380f4e
DJ
3139 gdb_assert (lp->resumed);
3140
3141 /* Discard the event. */
3142 return NULL;
3143 }
3144
9c02b525
PA
3145 /* Don't report signals that GDB isn't interested in, such as
3146 signals that are neither printed nor stopped upon. Stopping all
3147 threads can be a bit time-consuming so if we want decent
3148 performance with heavily multi-threaded programs, especially when
3149 they're using a high frequency timer, we'd better avoid it if we
3150 can. */
3151 if (WIFSTOPPED (status))
3152 {
3153 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3154
fbea99ea 3155 if (!target_is_non_stop_p ())
9c02b525
PA
3156 {
3157 /* Only do the below in all-stop, as we currently use SIGSTOP
3158 to implement target_stop (see linux_nat_stop) in
3159 non-stop. */
3160 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3161 {
3162 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3163 forwarded to the entire process group, that is, all LWPs
3164 will receive it - unless they're using CLONE_THREAD to
3165 share signals. Since we only want to report it once, we
3166 mark it as ignored for all LWPs except this one. */
3167 iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)),
3168 set_ignore_sigint, NULL);
3169 lp->ignore_sigint = 0;
3170 }
3171 else
3172 maybe_clear_ignore_sigint (lp);
3173 }
3174
3175 /* When using hardware single-step, we need to report every signal.
c9587f88
AT
3176 Otherwise, signals in pass_mask may be short-circuited
3177 except signals that might be caused by a breakpoint. */
9c02b525 3178 if (!lp->step
c9587f88
AT
3179 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))
3180 && !linux_wstatus_maybe_breakpoint (status))
9c02b525
PA
3181 {
3182 linux_resume_one_lwp (lp, lp->step, signo);
3183 if (debug_linux_nat)
3184 fprintf_unfiltered (gdb_stdlog,
3185 "LLW: %s %s, %s (preempt 'handle')\n",
3186 lp->step ?
3187 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3188 target_pid_to_str (lp->ptid),
3189 (signo != GDB_SIGNAL_0
3190 ? strsignal (gdb_signal_to_host (signo))
3191 : "0"));
3192 return NULL;
3193 }
3194 }
3195
02f3fc28
PA
3196 /* An interesting event. */
3197 gdb_assert (lp);
ca2163eb 3198 lp->status = status;
e7ad2f14 3199 save_stop_reason (lp);
02f3fc28
PA
3200 return lp;
3201}
3202
0e5bf2a8
PA
3203/* Detect zombie thread group leaders, and "exit" them. We can't reap
3204 their exits until all other threads in the group have exited. */
3205
3206static void
3207check_zombie_leaders (void)
3208{
3209 struct inferior *inf;
3210
3211 ALL_INFERIORS (inf)
3212 {
3213 struct lwp_info *leader_lp;
3214
3215 if (inf->pid == 0)
3216 continue;
3217
3218 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3219 if (leader_lp != NULL
3220 /* Check if there are other threads in the group, as we may
3221 have raced with the inferior simply exiting. */
3222 && num_lwps (inf->pid) > 1
5f572dec 3223 && linux_proc_pid_is_zombie (inf->pid))
0e5bf2a8
PA
3224 {
3225 if (debug_linux_nat)
3226 fprintf_unfiltered (gdb_stdlog,
3227 "CZL: Thread group leader %d zombie "
3228 "(it exited, or another thread execd).\n",
3229 inf->pid);
3230
3231 /* A leader zombie can mean one of two things:
3232
3233 - It exited, and there's an exit status pending
3234 available, or only the leader exited (not the whole
3235 program). In the latter case, we can't waitpid the
3236 leader's exit status until all other threads are gone.
3237
3238 - There are 3 or more threads in the group, and a thread
4a6ed09b
PA
3239 other than the leader exec'd. See comments on exec
3240 events at the top of the file. We could try
0e5bf2a8
PA
3241 distinguishing the exit and exec cases, by waiting once
3242 more, and seeing if something comes out, but it doesn't
3243 sound useful. The previous leader _does_ go away, and
3244 we'll re-add the new one once we see the exec event
3245 (which is just the same as what would happen if the
3246 previous leader did exit voluntarily before some other
3247 thread execs). */
3248
3249 if (debug_linux_nat)
3250 fprintf_unfiltered (gdb_stdlog,
3251 "CZL: Thread group leader %d vanished.\n",
3252 inf->pid);
3253 exit_lwp (leader_lp);
3254 }
3255 }
3256}
3257
aa01bd36
PA
3258/* Convenience function that is called when the kernel reports an exit
3259 event. This decides whether to report the event to GDB as a
3260 process exit event, a thread exit event, or to suppress the
3261 event. */
3262
3263static ptid_t
3264filter_exit_event (struct lwp_info *event_child,
3265 struct target_waitstatus *ourstatus)
3266{
3267 ptid_t ptid = event_child->ptid;
3268
3269 if (num_lwps (ptid_get_pid (ptid)) > 1)
3270 {
3271 if (report_thread_events)
3272 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3273 else
3274 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3275
3276 exit_lwp (event_child);
3277 }
3278
3279 return ptid;
3280}
3281
d6b0e80f 3282static ptid_t
f6ac5f3d 3283linux_nat_wait_1 (ptid_t ptid, struct target_waitstatus *ourstatus,
47608cb1 3284 int target_options)
d6b0e80f 3285{
fc9b8e47 3286 sigset_t prev_mask;
4b60df3d 3287 enum resume_kind last_resume_kind;
12d9289a 3288 struct lwp_info *lp;
12d9289a 3289 int status;
d6b0e80f 3290
01124a23 3291 if (debug_linux_nat)
b84876c2
PA
3292 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3293
f973ed9c
DJ
3294 /* The first time we get here after starting a new inferior, we may
3295 not have added it to the LWP list yet - this is the earliest
3296 moment at which we know its PID. */
d90e17a7 3297 if (ptid_is_pid (inferior_ptid))
f973ed9c 3298 {
27c9d204
PA
3299 /* Upgrade the main thread's ptid. */
3300 thread_change_ptid (inferior_ptid,
dfd4cc63
LM
3301 ptid_build (ptid_get_pid (inferior_ptid),
3302 ptid_get_pid (inferior_ptid), 0));
27c9d204 3303
26cb8b7c 3304 lp = add_initial_lwp (inferior_ptid);
f973ed9c
DJ
3305 lp->resumed = 1;
3306 }
3307
12696c10 3308 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
7feb7d06 3309 block_child_signals (&prev_mask);
d6b0e80f 3310
d6b0e80f 3311 /* First check if there is a LWP with a wait status pending. */
8a99810d
PA
3312 lp = iterate_over_lwps (ptid, status_callback, NULL);
3313 if (lp != NULL)
d6b0e80f
AC
3314 {
3315 if (debug_linux_nat)
d6b0e80f
AC
3316 fprintf_unfiltered (gdb_stdlog,
3317 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3318 status_to_str (lp->status),
d6b0e80f 3319 target_pid_to_str (lp->ptid));
d6b0e80f
AC
3320 }
3321
9c02b525
PA
3322 /* But if we don't find a pending event, we'll have to wait. Always
3323 pull all events out of the kernel. We'll randomly select an
3324 event LWP out of all that have events, to prevent starvation. */
7feb7d06 3325
d90e17a7 3326 while (lp == NULL)
d6b0e80f
AC
3327 {
3328 pid_t lwpid;
3329
0e5bf2a8
PA
3330 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3331 quirks:
3332
3333 - If the thread group leader exits while other threads in the
3334 thread group still exist, waitpid(TGID, ...) hangs. That
3335 waitpid won't return an exit status until the other threads
3336 in the group are reapped.
3337
3338 - When a non-leader thread execs, that thread just vanishes
3339 without reporting an exit (so we'd hang if we waited for it
3340 explicitly in that case). The exec event is reported to
3341 the TGID pid. */
3342
3343 errno = 0;
4a6ed09b 3344 lwpid = my_waitpid (-1, &status, __WALL | WNOHANG);
0e5bf2a8
PA
3345
3346 if (debug_linux_nat)
3347 fprintf_unfiltered (gdb_stdlog,
3348 "LNW: waitpid(-1, ...) returned %d, %s\n",
3349 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
b84876c2 3350
d6b0e80f
AC
3351 if (lwpid > 0)
3352 {
d6b0e80f
AC
3353 if (debug_linux_nat)
3354 {
3355 fprintf_unfiltered (gdb_stdlog,
3356 "LLW: waitpid %ld received %s\n",
3357 (long) lwpid, status_to_str (status));
3358 }
3359
9c02b525 3360 linux_nat_filter_event (lwpid, status);
0e5bf2a8
PA
3361 /* Retry until nothing comes out of waitpid. A single
3362 SIGCHLD can indicate more than one child stopped. */
3363 continue;
d6b0e80f
AC
3364 }
3365
20ba1ce6
PA
3366 /* Now that we've pulled all events out of the kernel, resume
3367 LWPs that don't have an interesting event to report. */
3368 iterate_over_lwps (minus_one_ptid,
3369 resume_stopped_resumed_lwps, &minus_one_ptid);
3370
3371 /* ... and find an LWP with a status to report to the core, if
3372 any. */
9c02b525
PA
3373 lp = iterate_over_lwps (ptid, status_callback, NULL);
3374 if (lp != NULL)
3375 break;
3376
0e5bf2a8
PA
3377 /* Check for zombie thread group leaders. Those can't be reaped
3378 until all other threads in the thread group are. */
3379 check_zombie_leaders ();
d6b0e80f 3380
0e5bf2a8
PA
3381 /* If there are no resumed children left, bail. We'd be stuck
3382 forever in the sigsuspend call below otherwise. */
3383 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3384 {
3385 if (debug_linux_nat)
3386 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
b84876c2 3387
0e5bf2a8 3388 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
b84876c2 3389
0e5bf2a8
PA
3390 restore_child_signals_mask (&prev_mask);
3391 return minus_one_ptid;
d6b0e80f 3392 }
28736962 3393
0e5bf2a8
PA
3394 /* No interesting event to report to the core. */
3395
3396 if (target_options & TARGET_WNOHANG)
3397 {
01124a23 3398 if (debug_linux_nat)
28736962
PA
3399 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3400
0e5bf2a8 3401 ourstatus->kind = TARGET_WAITKIND_IGNORE;
28736962
PA
3402 restore_child_signals_mask (&prev_mask);
3403 return minus_one_ptid;
3404 }
d6b0e80f
AC
3405
3406 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3407 gdb_assert (lp == NULL);
0e5bf2a8
PA
3408
3409 /* Block until we get an event reported with SIGCHLD. */
9c3a5d93 3410 wait_for_signal ();
d6b0e80f
AC
3411 }
3412
d6b0e80f
AC
3413 gdb_assert (lp);
3414
ca2163eb
PA
3415 status = lp->status;
3416 lp->status = 0;
3417
fbea99ea 3418 if (!target_is_non_stop_p ())
4c28f408
PA
3419 {
3420 /* Now stop all other LWP's ... */
d90e17a7 3421 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
4c28f408
PA
3422
3423 /* ... and wait until all of them have reported back that
3424 they're no longer running. */
d90e17a7 3425 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
9c02b525
PA
3426 }
3427
3428 /* If we're not waiting for a specific LWP, choose an event LWP from
3429 among those that have had events. Giving equal priority to all
3430 LWPs that have had events helps prevent starvation. */
3431 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3432 select_event_lwp (ptid, &lp, &status);
3433
3434 gdb_assert (lp != NULL);
3435
3436 /* Now that we've selected our final event LWP, un-adjust its PC if
faf09f01
PA
3437 it was a software breakpoint, and we can't reliably support the
3438 "stopped by software breakpoint" stop reason. */
3439 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3440 && !USE_SIGTRAP_SIGINFO)
9c02b525
PA
3441 {
3442 struct regcache *regcache = get_thread_regcache (lp->ptid);
ac7936df 3443 struct gdbarch *gdbarch = regcache->arch ();
527a273a 3444 int decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4c28f408 3445
9c02b525
PA
3446 if (decr_pc != 0)
3447 {
3448 CORE_ADDR pc;
d6b0e80f 3449
9c02b525
PA
3450 pc = regcache_read_pc (regcache);
3451 regcache_write_pc (regcache, pc + decr_pc);
3452 }
3453 }
e3e9f5a2 3454
9c02b525
PA
3455 /* We'll need this to determine whether to report a SIGSTOP as
3456 GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3457 clears it. */
3458 last_resume_kind = lp->last_resume_kind;
4b60df3d 3459
fbea99ea 3460 if (!target_is_non_stop_p ())
9c02b525 3461 {
e3e9f5a2
PA
3462 /* In all-stop, from the core's perspective, all LWPs are now
3463 stopped until a new resume action is sent over. */
3464 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3465 }
3466 else
25289eb2 3467 {
4b60df3d 3468 resume_clear_callback (lp, NULL);
25289eb2 3469 }
d6b0e80f 3470
26ab7092 3471 if (linux_nat_status_is_event (status))
d6b0e80f 3472 {
d6b0e80f
AC
3473 if (debug_linux_nat)
3474 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3475 "LLW: trap ptid is %s.\n",
3476 target_pid_to_str (lp->ptid));
d6b0e80f 3477 }
d6b0e80f
AC
3478
3479 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3480 {
3481 *ourstatus = lp->waitstatus;
3482 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3483 }
3484 else
3485 store_waitstatus (ourstatus, status);
3486
01124a23 3487 if (debug_linux_nat)
b84876c2
PA
3488 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3489
7feb7d06 3490 restore_child_signals_mask (&prev_mask);
1e225492 3491
4b60df3d 3492 if (last_resume_kind == resume_stop
25289eb2
PA
3493 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3494 && WSTOPSIG (status) == SIGSTOP)
3495 {
3496 /* A thread that has been requested to stop by GDB with
3497 target_stop, and it stopped cleanly, so report as SIG0. The
3498 use of SIGSTOP is an implementation detail. */
a493e3e2 3499 ourstatus->value.sig = GDB_SIGNAL_0;
25289eb2
PA
3500 }
3501
1e225492
JK
3502 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3503 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3504 lp->core = -1;
3505 else
2e794194 3506 lp->core = linux_common_core_of_thread (lp->ptid);
1e225492 3507
aa01bd36
PA
3508 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3509 return filter_exit_event (lp, ourstatus);
3510
f973ed9c 3511 return lp->ptid;
d6b0e80f
AC
3512}
3513
e3e9f5a2
PA
3514/* Resume LWPs that are currently stopped without any pending status
3515 to report, but are resumed from the core's perspective. */
3516
3517static int
3518resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3519{
9a3c8263 3520 ptid_t *wait_ptid_p = (ptid_t *) data;
e3e9f5a2 3521
4dd63d48
PA
3522 if (!lp->stopped)
3523 {
3524 if (debug_linux_nat)
3525 fprintf_unfiltered (gdb_stdlog,
3526 "RSRL: NOT resuming LWP %s, not stopped\n",
3527 target_pid_to_str (lp->ptid));
3528 }
3529 else if (!lp->resumed)
3530 {
3531 if (debug_linux_nat)
3532 fprintf_unfiltered (gdb_stdlog,
3533 "RSRL: NOT resuming LWP %s, not resumed\n",
3534 target_pid_to_str (lp->ptid));
3535 }
3536 else if (lwp_status_pending_p (lp))
3537 {
3538 if (debug_linux_nat)
3539 fprintf_unfiltered (gdb_stdlog,
3540 "RSRL: NOT resuming LWP %s, has pending status\n",
3541 target_pid_to_str (lp->ptid));
3542 }
3543 else
e3e9f5a2 3544 {
336060f3 3545 struct regcache *regcache = get_thread_regcache (lp->ptid);
ac7936df 3546 struct gdbarch *gdbarch = regcache->arch ();
336060f3 3547
23f238d3 3548 TRY
e3e9f5a2 3549 {
23f238d3
PA
3550 CORE_ADDR pc = regcache_read_pc (regcache);
3551 int leave_stopped = 0;
e3e9f5a2 3552
23f238d3
PA
3553 /* Don't bother if there's a breakpoint at PC that we'd hit
3554 immediately, and we're not waiting for this LWP. */
3555 if (!ptid_match (lp->ptid, *wait_ptid_p))
3556 {
a01bda52 3557 if (breakpoint_inserted_here_p (regcache->aspace (), pc))
23f238d3
PA
3558 leave_stopped = 1;
3559 }
e3e9f5a2 3560
23f238d3
PA
3561 if (!leave_stopped)
3562 {
3563 if (debug_linux_nat)
3564 fprintf_unfiltered (gdb_stdlog,
3565 "RSRL: resuming stopped-resumed LWP %s at "
3566 "%s: step=%d\n",
3567 target_pid_to_str (lp->ptid),
3568 paddress (gdbarch, pc),
3569 lp->step);
3570
3571 linux_resume_one_lwp_throw (lp, lp->step, GDB_SIGNAL_0);
3572 }
3573 }
3574 CATCH (ex, RETURN_MASK_ERROR)
3575 {
3576 if (!check_ptrace_stopped_lwp_gone (lp))
3577 throw_exception (ex);
3578 }
3579 END_CATCH
e3e9f5a2
PA
3580 }
3581
3582 return 0;
3583}
3584
f6ac5f3d
PA
3585ptid_t
3586linux_nat_target::wait (ptid_t ptid, struct target_waitstatus *ourstatus,
3587 int target_options)
7feb7d06
PA
3588{
3589 ptid_t event_ptid;
3590
3591 if (debug_linux_nat)
09826ec5
PA
3592 {
3593 char *options_string;
3594
3595 options_string = target_options_to_string (target_options);
3596 fprintf_unfiltered (gdb_stdlog,
3597 "linux_nat_wait: [%s], [%s]\n",
3598 target_pid_to_str (ptid),
3599 options_string);
3600 xfree (options_string);
3601 }
7feb7d06
PA
3602
3603 /* Flush the async file first. */
d9d41e78 3604 if (target_is_async_p ())
7feb7d06
PA
3605 async_file_flush ();
3606
e3e9f5a2
PA
3607 /* Resume LWPs that are currently stopped without any pending status
3608 to report, but are resumed from the core's perspective. LWPs get
3609 in this state if we find them stopping at a time we're not
3610 interested in reporting the event (target_wait on a
3611 specific_process, for example, see linux_nat_wait_1), and
3612 meanwhile the event became uninteresting. Don't bother resuming
3613 LWPs we're not going to wait for if they'd stop immediately. */
fbea99ea 3614 if (target_is_non_stop_p ())
e3e9f5a2
PA
3615 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3616
f6ac5f3d 3617 event_ptid = linux_nat_wait_1 (ptid, ourstatus, target_options);
7feb7d06
PA
3618
3619 /* If we requested any event, and something came out, assume there
3620 may be more. If we requested a specific lwp or process, also
3621 assume there may be more. */
d9d41e78 3622 if (target_is_async_p ()
6953d224
PA
3623 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3624 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
7feb7d06
PA
3625 || !ptid_equal (ptid, minus_one_ptid)))
3626 async_file_mark ();
3627
7feb7d06
PA
3628 return event_ptid;
3629}
3630
1d2736d4
PA
3631/* Kill one LWP. */
3632
3633static void
3634kill_one_lwp (pid_t pid)
d6b0e80f 3635{
ed731959
JK
3636 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3637
3638 errno = 0;
1d2736d4 3639 kill_lwp (pid, SIGKILL);
ed731959 3640 if (debug_linux_nat)
57745c90
PA
3641 {
3642 int save_errno = errno;
3643
3644 fprintf_unfiltered (gdb_stdlog,
1d2736d4 3645 "KC: kill (SIGKILL) %ld, 0, 0 (%s)\n", (long) pid,
57745c90
PA
3646 save_errno ? safe_strerror (save_errno) : "OK");
3647 }
ed731959
JK
3648
3649 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3650
d6b0e80f 3651 errno = 0;
1d2736d4 3652 ptrace (PTRACE_KILL, pid, 0, 0);
d6b0e80f 3653 if (debug_linux_nat)
57745c90
PA
3654 {
3655 int save_errno = errno;
3656
3657 fprintf_unfiltered (gdb_stdlog,
1d2736d4 3658 "KC: PTRACE_KILL %ld, 0, 0 (%s)\n", (long) pid,
57745c90
PA
3659 save_errno ? safe_strerror (save_errno) : "OK");
3660 }
d6b0e80f
AC
3661}
3662
1d2736d4
PA
3663/* Wait for an LWP to die. */
3664
3665static void
3666kill_wait_one_lwp (pid_t pid)
d6b0e80f 3667{
1d2736d4 3668 pid_t res;
d6b0e80f
AC
3669
3670 /* We must make sure that there are no pending events (delayed
3671 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3672 program doesn't interfere with any following debugging session. */
3673
d6b0e80f
AC
3674 do
3675 {
1d2736d4
PA
3676 res = my_waitpid (pid, NULL, __WALL);
3677 if (res != (pid_t) -1)
d6b0e80f 3678 {
e85a822c
DJ
3679 if (debug_linux_nat)
3680 fprintf_unfiltered (gdb_stdlog,
1d2736d4
PA
3681 "KWC: wait %ld received unknown.\n",
3682 (long) pid);
4a6ed09b
PA
3683 /* The Linux kernel sometimes fails to kill a thread
3684 completely after PTRACE_KILL; that goes from the stop
3685 point in do_fork out to the one in get_signal_to_deliver
3686 and waits again. So kill it again. */
1d2736d4 3687 kill_one_lwp (pid);
d6b0e80f
AC
3688 }
3689 }
1d2736d4
PA
3690 while (res == pid);
3691
3692 gdb_assert (res == -1 && errno == ECHILD);
3693}
3694
3695/* Callback for iterate_over_lwps. */
d6b0e80f 3696
1d2736d4
PA
3697static int
3698kill_callback (struct lwp_info *lp, void *data)
3699{
3700 kill_one_lwp (ptid_get_lwp (lp->ptid));
d6b0e80f
AC
3701 return 0;
3702}
3703
1d2736d4
PA
3704/* Callback for iterate_over_lwps. */
3705
3706static int
3707kill_wait_callback (struct lwp_info *lp, void *data)
3708{
3709 kill_wait_one_lwp (ptid_get_lwp (lp->ptid));
3710 return 0;
3711}
3712
3713/* Kill the fork children of any threads of inferior INF that are
3714 stopped at a fork event. */
3715
3716static void
3717kill_unfollowed_fork_children (struct inferior *inf)
3718{
3719 struct thread_info *thread;
3720
3721 ALL_NON_EXITED_THREADS (thread)
3722 if (thread->inf == inf)
3723 {
3724 struct target_waitstatus *ws = &thread->pending_follow;
3725
3726 if (ws->kind == TARGET_WAITKIND_FORKED
3727 || ws->kind == TARGET_WAITKIND_VFORKED)
3728 {
3729 ptid_t child_ptid = ws->value.related_pid;
3730 int child_pid = ptid_get_pid (child_ptid);
3731 int child_lwp = ptid_get_lwp (child_ptid);
1d2736d4
PA
3732
3733 kill_one_lwp (child_lwp);
3734 kill_wait_one_lwp (child_lwp);
3735
3736 /* Let the arch-specific native code know this process is
3737 gone. */
3738 linux_nat_forget_process (child_pid);
3739 }
3740 }
3741}
3742
f6ac5f3d
PA
3743void
3744linux_nat_target::kill ()
d6b0e80f 3745{
f973ed9c
DJ
3746 /* If we're stopped while forking and we haven't followed yet,
3747 kill the other task. We need to do this first because the
3748 parent will be sleeping if this is a vfork. */
1d2736d4 3749 kill_unfollowed_fork_children (current_inferior ());
f973ed9c
DJ
3750
3751 if (forks_exist_p ())
7feb7d06 3752 linux_fork_killall ();
f973ed9c
DJ
3753 else
3754 {
d90e17a7 3755 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
e0881a8e 3756
4c28f408
PA
3757 /* Stop all threads before killing them, since ptrace requires
3758 that the thread is stopped to sucessfully PTRACE_KILL. */
d90e17a7 3759 iterate_over_lwps (ptid, stop_callback, NULL);
4c28f408
PA
3760 /* ... and wait until all of them have reported back that
3761 they're no longer running. */
d90e17a7 3762 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4c28f408 3763
f973ed9c 3764 /* Kill all LWP's ... */
d90e17a7 3765 iterate_over_lwps (ptid, kill_callback, NULL);
f973ed9c
DJ
3766
3767 /* ... and wait until we've flushed all events. */
d90e17a7 3768 iterate_over_lwps (ptid, kill_wait_callback, NULL);
f973ed9c
DJ
3769 }
3770
bc1e6c81 3771 target_mourn_inferior (inferior_ptid);
d6b0e80f
AC
3772}
3773
f6ac5f3d
PA
3774void
3775linux_nat_target::mourn_inferior ()
d6b0e80f 3776{
26cb8b7c
PA
3777 int pid = ptid_get_pid (inferior_ptid);
3778
3779 purge_lwp_list (pid);
d6b0e80f 3780
f973ed9c 3781 if (! forks_exist_p ())
d90e17a7 3782 /* Normal case, no other forks available. */
f6ac5f3d 3783 inf_ptrace_target::mourn_inferior ();
f973ed9c
DJ
3784 else
3785 /* Multi-fork case. The current inferior_ptid has exited, but
3786 there are other viable forks to debug. Delete the exiting
3787 one and context-switch to the first available. */
3788 linux_fork_mourn_inferior ();
26cb8b7c
PA
3789
3790 /* Let the arch-specific native code know this process is gone. */
3791 linux_nat_forget_process (pid);
d6b0e80f
AC
3792}
3793
5b009018
PA
3794/* Convert a native/host siginfo object, into/from the siginfo in the
3795 layout of the inferiors' architecture. */
3796
3797static void
a5362b9a 3798siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
5b009018
PA
3799{
3800 int done = 0;
3801
3802 if (linux_nat_siginfo_fixup != NULL)
3803 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3804
3805 /* If there was no callback, or the callback didn't do anything,
3806 then just do a straight memcpy. */
3807 if (!done)
3808 {
3809 if (direction == 1)
a5362b9a 3810 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5b009018 3811 else
a5362b9a 3812 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5b009018
PA
3813 }
3814}
3815
9b409511 3816static enum target_xfer_status
f6ac5f3d 3817linux_xfer_siginfo (enum target_object object,
4aa995e1 3818 const char *annex, gdb_byte *readbuf,
9b409511
YQ
3819 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3820 ULONGEST *xfered_len)
4aa995e1 3821{
4aa995e1 3822 int pid;
a5362b9a
TS
3823 siginfo_t siginfo;
3824 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1
PA
3825
3826 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3827 gdb_assert (readbuf || writebuf);
3828
dfd4cc63 3829 pid = ptid_get_lwp (inferior_ptid);
4aa995e1 3830 if (pid == 0)
dfd4cc63 3831 pid = ptid_get_pid (inferior_ptid);
4aa995e1
PA
3832
3833 if (offset > sizeof (siginfo))
2ed4b548 3834 return TARGET_XFER_E_IO;
4aa995e1
PA
3835
3836 errno = 0;
3837 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3838 if (errno != 0)
2ed4b548 3839 return TARGET_XFER_E_IO;
4aa995e1 3840
5b009018
PA
3841 /* When GDB is built as a 64-bit application, ptrace writes into
3842 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3843 inferior with a 64-bit GDB should look the same as debugging it
3844 with a 32-bit GDB, we need to convert it. GDB core always sees
3845 the converted layout, so any read/write will have to be done
3846 post-conversion. */
3847 siginfo_fixup (&siginfo, inf_siginfo, 0);
3848
4aa995e1
PA
3849 if (offset + len > sizeof (siginfo))
3850 len = sizeof (siginfo) - offset;
3851
3852 if (readbuf != NULL)
5b009018 3853 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
3854 else
3855 {
5b009018
PA
3856 memcpy (inf_siginfo + offset, writebuf, len);
3857
3858 /* Convert back to ptrace layout before flushing it out. */
3859 siginfo_fixup (&siginfo, inf_siginfo, 1);
3860
4aa995e1
PA
3861 errno = 0;
3862 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3863 if (errno != 0)
2ed4b548 3864 return TARGET_XFER_E_IO;
4aa995e1
PA
3865 }
3866
9b409511
YQ
3867 *xfered_len = len;
3868 return TARGET_XFER_OK;
4aa995e1
PA
3869}
3870
9b409511 3871static enum target_xfer_status
f6ac5f3d
PA
3872linux_nat_xfer_osdata (enum target_object object,
3873 const char *annex, gdb_byte *readbuf,
3874 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3875 ULONGEST *xfered_len);
3876
3877static enum target_xfer_status
3878linux_proc_xfer_spu (enum target_object object,
3879 const char *annex, gdb_byte *readbuf,
3880 const gdb_byte *writebuf,
3881 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len);
3882
3883static enum target_xfer_status
3884linux_proc_xfer_partial (enum target_object object,
3885 const char *annex, gdb_byte *readbuf,
3886 const gdb_byte *writebuf,
3887 ULONGEST offset, LONGEST len, ULONGEST *xfered_len);
3888
3889enum target_xfer_status
3890linux_nat_target::xfer_partial (enum target_object object,
3891 const char *annex, gdb_byte *readbuf,
3892 const gdb_byte *writebuf,
3893 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
d6b0e80f 3894{
9b409511 3895 enum target_xfer_status xfer;
d6b0e80f 3896
4aa995e1 3897 if (object == TARGET_OBJECT_SIGNAL_INFO)
f6ac5f3d 3898 return linux_xfer_siginfo (object, annex, readbuf, writebuf,
9b409511 3899 offset, len, xfered_len);
4aa995e1 3900
c35b1492
PA
3901 /* The target is connected but no live inferior is selected. Pass
3902 this request down to a lower stratum (e.g., the executable
3903 file). */
3904 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
9b409511 3905 return TARGET_XFER_EOF;
c35b1492 3906
f6ac5f3d
PA
3907 if (object == TARGET_OBJECT_AUXV)
3908 return memory_xfer_auxv (this, object, annex, readbuf, writebuf,
3909 offset, len, xfered_len);
3910
3911 if (object == TARGET_OBJECT_OSDATA)
3912 return linux_nat_xfer_osdata (object, annex, readbuf, writebuf,
3913 offset, len, xfered_len);
d6b0e80f 3914
f6ac5f3d
PA
3915 if (object == TARGET_OBJECT_SPU)
3916 return linux_proc_xfer_spu (object, annex, readbuf, writebuf,
3917 offset, len, xfered_len);
3918
3919 /* GDB calculates all addresses in the largest possible address
3920 width.
3921 The address width must be masked before its final use - either by
3922 linux_proc_xfer_partial or inf_ptrace_target::xfer_partial.
3923
3924 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
3925
3926 if (object == TARGET_OBJECT_MEMORY)
3927 {
3928 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
3929
3930 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
3931 offset &= ((ULONGEST) 1 << addr_bit) - 1;
3932 }
3933
3934 xfer = linux_proc_xfer_partial (object, annex, readbuf, writebuf,
3935 offset, len, xfered_len);
3936 if (xfer != TARGET_XFER_EOF)
3937 return xfer;
3938
3939 return inf_ptrace_target::xfer_partial (object, annex, readbuf, writebuf,
3940 offset, len, xfered_len);
d6b0e80f
AC
3941}
3942
f6ac5f3d
PA
3943int
3944linux_nat_target::thread_alive (ptid_t ptid)
28439f5e 3945{
4a6ed09b
PA
3946 /* As long as a PTID is in lwp list, consider it alive. */
3947 return find_lwp_pid (ptid) != NULL;
28439f5e
PA
3948}
3949
8a06aea7
PA
3950/* Implement the to_update_thread_list target method for this
3951 target. */
3952
f6ac5f3d
PA
3953void
3954linux_nat_target::update_thread_list ()
8a06aea7 3955{
a6904d5a
PA
3956 struct lwp_info *lwp;
3957
4a6ed09b
PA
3958 /* We add/delete threads from the list as clone/exit events are
3959 processed, so just try deleting exited threads still in the
3960 thread list. */
3961 delete_exited_threads ();
a6904d5a
PA
3962
3963 /* Update the processor core that each lwp/thread was last seen
3964 running on. */
3965 ALL_LWPS (lwp)
1ad3de98
PA
3966 {
3967 /* Avoid accessing /proc if the thread hasn't run since we last
3968 time we fetched the thread's core. Accessing /proc becomes
3969 noticeably expensive when we have thousands of LWPs. */
3970 if (lwp->core == -1)
3971 lwp->core = linux_common_core_of_thread (lwp->ptid);
3972 }
8a06aea7
PA
3973}
3974
f6ac5f3d
PA
3975const char *
3976linux_nat_target::pid_to_str (ptid_t ptid)
d6b0e80f
AC
3977{
3978 static char buf[64];
3979
dfd4cc63
LM
3980 if (ptid_lwp_p (ptid)
3981 && (ptid_get_pid (ptid) != ptid_get_lwp (ptid)
3982 || num_lwps (ptid_get_pid (ptid)) > 1))
d6b0e80f 3983 {
dfd4cc63 3984 snprintf (buf, sizeof (buf), "LWP %ld", ptid_get_lwp (ptid));
d6b0e80f
AC
3985 return buf;
3986 }
3987
3988 return normal_pid_to_str (ptid);
3989}
3990
f6ac5f3d
PA
3991const char *
3992linux_nat_target::thread_name (struct thread_info *thr)
4694da01 3993{
79efa585 3994 return linux_proc_tid_get_name (thr->ptid);
4694da01
TT
3995}
3996
dba24537
AC
3997/* Accepts an integer PID; Returns a string representing a file that
3998 can be opened to get the symbols for the child process. */
3999
f6ac5f3d
PA
4000char *
4001linux_nat_target::pid_to_exec_file (int pid)
dba24537 4002{
e0d86d2c 4003 return linux_proc_pid_to_exec_file (pid);
dba24537
AC
4004}
4005
a379284a
AA
4006/* Implement the to_xfer_partial target method using /proc/<pid>/mem.
4007 Because we can use a single read/write call, this can be much more
4008 efficient than banging away at PTRACE_PEEKTEXT. */
10d6c8cd 4009
9b409511 4010static enum target_xfer_status
f6ac5f3d 4011linux_proc_xfer_partial (enum target_object object,
10d6c8cd
DJ
4012 const char *annex, gdb_byte *readbuf,
4013 const gdb_byte *writebuf,
9b409511 4014 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
dba24537 4015{
10d6c8cd
DJ
4016 LONGEST ret;
4017 int fd;
dba24537
AC
4018 char filename[64];
4019
a379284a 4020 if (object != TARGET_OBJECT_MEMORY)
f486487f 4021 return TARGET_XFER_EOF;
dba24537
AC
4022
4023 /* Don't bother for one word. */
4024 if (len < 3 * sizeof (long))
9b409511 4025 return TARGET_XFER_EOF;
dba24537
AC
4026
4027 /* We could keep this file open and cache it - possibly one per
4028 thread. That requires some juggling, but is even faster. */
b67aeab0
SM
4029 xsnprintf (filename, sizeof filename, "/proc/%ld/mem",
4030 ptid_get_lwp (inferior_ptid));
a379284a
AA
4031 fd = gdb_open_cloexec (filename, ((readbuf ? O_RDONLY : O_WRONLY)
4032 | O_LARGEFILE), 0);
dba24537 4033 if (fd == -1)
9b409511 4034 return TARGET_XFER_EOF;
dba24537 4035
a379284a
AA
4036 /* Use pread64/pwrite64 if available, since they save a syscall and can
4037 handle 64-bit offsets even on 32-bit platforms (for instance, SPARC
4038 debugging a SPARC64 application). */
dba24537 4039#ifdef HAVE_PREAD64
a379284a
AA
4040 ret = (readbuf ? pread64 (fd, readbuf, len, offset)
4041 : pwrite64 (fd, writebuf, len, offset));
dba24537 4042#else
a379284a
AA
4043 ret = lseek (fd, offset, SEEK_SET);
4044 if (ret != -1)
4045 ret = (readbuf ? read (fd, readbuf, len)
4046 : write (fd, writebuf, len));
dba24537 4047#endif
dba24537
AC
4048
4049 close (fd);
9b409511 4050
a379284a 4051 if (ret == -1 || ret == 0)
9b409511
YQ
4052 return TARGET_XFER_EOF;
4053 else
4054 {
4055 *xfered_len = ret;
4056 return TARGET_XFER_OK;
4057 }
dba24537
AC
4058}
4059
efcbbd14
UW
4060
4061/* Enumerate spufs IDs for process PID. */
4062static LONGEST
b55e14c7 4063spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, ULONGEST len)
efcbbd14 4064{
f5656ead 4065 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
efcbbd14
UW
4066 LONGEST pos = 0;
4067 LONGEST written = 0;
4068 char path[128];
4069 DIR *dir;
4070 struct dirent *entry;
4071
4072 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4073 dir = opendir (path);
4074 if (!dir)
4075 return -1;
4076
4077 rewinddir (dir);
4078 while ((entry = readdir (dir)) != NULL)
4079 {
4080 struct stat st;
4081 struct statfs stfs;
4082 int fd;
4083
4084 fd = atoi (entry->d_name);
4085 if (!fd)
4086 continue;
4087
4088 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4089 if (stat (path, &st) != 0)
4090 continue;
4091 if (!S_ISDIR (st.st_mode))
4092 continue;
4093
4094 if (statfs (path, &stfs) != 0)
4095 continue;
4096 if (stfs.f_type != SPUFS_MAGIC)
4097 continue;
4098
4099 if (pos >= offset && pos + 4 <= offset + len)
4100 {
4101 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4102 written += 4;
4103 }
4104 pos += 4;
4105 }
4106
4107 closedir (dir);
4108 return written;
4109}
4110
4111/* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4112 object type, using the /proc file system. */
9b409511
YQ
4113
4114static enum target_xfer_status
f6ac5f3d 4115linux_proc_xfer_spu (enum target_object object,
efcbbd14
UW
4116 const char *annex, gdb_byte *readbuf,
4117 const gdb_byte *writebuf,
9b409511 4118 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
efcbbd14
UW
4119{
4120 char buf[128];
4121 int fd = 0;
4122 int ret = -1;
b67aeab0 4123 int pid = ptid_get_lwp (inferior_ptid);
efcbbd14
UW
4124
4125 if (!annex)
4126 {
4127 if (!readbuf)
2ed4b548 4128 return TARGET_XFER_E_IO;
efcbbd14 4129 else
9b409511
YQ
4130 {
4131 LONGEST l = spu_enumerate_spu_ids (pid, readbuf, offset, len);
4132
4133 if (l < 0)
4134 return TARGET_XFER_E_IO;
4135 else if (l == 0)
4136 return TARGET_XFER_EOF;
4137 else
4138 {
4139 *xfered_len = (ULONGEST) l;
4140 return TARGET_XFER_OK;
4141 }
4142 }
efcbbd14
UW
4143 }
4144
4145 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
614c279d 4146 fd = gdb_open_cloexec (buf, writebuf? O_WRONLY : O_RDONLY, 0);
efcbbd14 4147 if (fd <= 0)
2ed4b548 4148 return TARGET_XFER_E_IO;
efcbbd14
UW
4149
4150 if (offset != 0
4151 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4152 {
4153 close (fd);
9b409511 4154 return TARGET_XFER_EOF;
efcbbd14
UW
4155 }
4156
4157 if (writebuf)
4158 ret = write (fd, writebuf, (size_t) len);
4159 else if (readbuf)
4160 ret = read (fd, readbuf, (size_t) len);
4161
4162 close (fd);
9b409511
YQ
4163
4164 if (ret < 0)
4165 return TARGET_XFER_E_IO;
4166 else if (ret == 0)
4167 return TARGET_XFER_EOF;
4168 else
4169 {
4170 *xfered_len = (ULONGEST) ret;
4171 return TARGET_XFER_OK;
4172 }
efcbbd14
UW
4173}
4174
4175
dba24537
AC
4176/* Parse LINE as a signal set and add its set bits to SIGS. */
4177
4178static void
4179add_line_to_sigset (const char *line, sigset_t *sigs)
4180{
4181 int len = strlen (line) - 1;
4182 const char *p;
4183 int signum;
4184
4185 if (line[len] != '\n')
8a3fe4f8 4186 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4187
4188 p = line;
4189 signum = len * 4;
4190 while (len-- > 0)
4191 {
4192 int digit;
4193
4194 if (*p >= '0' && *p <= '9')
4195 digit = *p - '0';
4196 else if (*p >= 'a' && *p <= 'f')
4197 digit = *p - 'a' + 10;
4198 else
8a3fe4f8 4199 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4200
4201 signum -= 4;
4202
4203 if (digit & 1)
4204 sigaddset (sigs, signum + 1);
4205 if (digit & 2)
4206 sigaddset (sigs, signum + 2);
4207 if (digit & 4)
4208 sigaddset (sigs, signum + 3);
4209 if (digit & 8)
4210 sigaddset (sigs, signum + 4);
4211
4212 p++;
4213 }
4214}
4215
4216/* Find process PID's pending signals from /proc/pid/status and set
4217 SIGS to match. */
4218
4219void
3e43a32a
MS
4220linux_proc_pending_signals (int pid, sigset_t *pending,
4221 sigset_t *blocked, sigset_t *ignored)
dba24537 4222{
d8d2a3ee 4223 char buffer[PATH_MAX], fname[PATH_MAX];
dba24537
AC
4224
4225 sigemptyset (pending);
4226 sigemptyset (blocked);
4227 sigemptyset (ignored);
cde33bf1 4228 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
d419f42d 4229 gdb_file_up procfile = gdb_fopen_cloexec (fname, "r");
dba24537 4230 if (procfile == NULL)
8a3fe4f8 4231 error (_("Could not open %s"), fname);
dba24537 4232
d419f42d 4233 while (fgets (buffer, PATH_MAX, procfile.get ()) != NULL)
dba24537
AC
4234 {
4235 /* Normal queued signals are on the SigPnd line in the status
4236 file. However, 2.6 kernels also have a "shared" pending
4237 queue for delivering signals to a thread group, so check for
4238 a ShdPnd line also.
4239
4240 Unfortunately some Red Hat kernels include the shared pending
4241 queue but not the ShdPnd status field. */
4242
61012eef 4243 if (startswith (buffer, "SigPnd:\t"))
dba24537 4244 add_line_to_sigset (buffer + 8, pending);
61012eef 4245 else if (startswith (buffer, "ShdPnd:\t"))
dba24537 4246 add_line_to_sigset (buffer + 8, pending);
61012eef 4247 else if (startswith (buffer, "SigBlk:\t"))
dba24537 4248 add_line_to_sigset (buffer + 8, blocked);
61012eef 4249 else if (startswith (buffer, "SigIgn:\t"))
dba24537
AC
4250 add_line_to_sigset (buffer + 8, ignored);
4251 }
dba24537
AC
4252}
4253
9b409511 4254static enum target_xfer_status
f6ac5f3d 4255linux_nat_xfer_osdata (enum target_object object,
e0881a8e 4256 const char *annex, gdb_byte *readbuf,
9b409511
YQ
4257 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4258 ULONGEST *xfered_len)
07e059b5 4259{
07e059b5
VP
4260 gdb_assert (object == TARGET_OBJECT_OSDATA);
4261
9b409511
YQ
4262 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4263 if (*xfered_len == 0)
4264 return TARGET_XFER_EOF;
4265 else
4266 return TARGET_XFER_OK;
07e059b5
VP
4267}
4268
5808517f
YQ
4269static void
4270cleanup_target_stop (void *arg)
4271{
4272 ptid_t *ptid = (ptid_t *) arg;
4273
4274 gdb_assert (arg != NULL);
4275
4276 /* Unpause all */
049a8570 4277 target_continue_no_signal (*ptid);
5808517f
YQ
4278}
4279
f6ac5f3d
PA
4280std::vector<static_tracepoint_marker>
4281linux_nat_target::static_tracepoint_markers_by_strid (const char *strid)
5808517f
YQ
4282{
4283 char s[IPA_CMD_BUF_SIZE];
4284 struct cleanup *old_chain;
4285 int pid = ptid_get_pid (inferior_ptid);
5d9310c4 4286 std::vector<static_tracepoint_marker> markers;
256642e8 4287 const char *p = s;
5808517f 4288 ptid_t ptid = ptid_build (pid, 0, 0);
5d9310c4 4289 static_tracepoint_marker marker;
5808517f
YQ
4290
4291 /* Pause all */
4292 target_stop (ptid);
4293
4294 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4295 s[sizeof ("qTfSTM")] = 0;
4296
42476b70 4297 agent_run_command (pid, s, strlen (s) + 1);
5808517f 4298
5d9310c4 4299 old_chain = make_cleanup (cleanup_target_stop, &ptid);
5808517f
YQ
4300
4301 while (*p++ == 'm')
4302 {
5808517f
YQ
4303 do
4304 {
5d9310c4 4305 parse_static_tracepoint_marker_definition (p, &p, &marker);
5808517f 4306
5d9310c4
SM
4307 if (strid == NULL || marker.str_id == strid)
4308 markers.push_back (std::move (marker));
5808517f
YQ
4309 }
4310 while (*p++ == ','); /* comma-separated list */
4311
4312 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4313 s[sizeof ("qTsSTM")] = 0;
42476b70 4314 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4315 p = s;
4316 }
4317
4318 do_cleanups (old_chain);
4319
4320 return markers;
4321}
4322
b84876c2
PA
4323/* target_is_async_p implementation. */
4324
f6ac5f3d
PA
4325int
4326linux_nat_target::is_async_p ()
b84876c2 4327{
198297aa 4328 return linux_is_async_p ();
b84876c2
PA
4329}
4330
4331/* target_can_async_p implementation. */
4332
f6ac5f3d
PA
4333int
4334linux_nat_target::can_async_p ()
b84876c2 4335{
fde1b17d
SM
4336 /* We're always async, unless the user explicitly prevented it with the
4337 "maint set target-async" command. */
3dd5b83d 4338 return target_async_permitted;
b84876c2
PA
4339}
4340
f6ac5f3d
PA
4341int
4342linux_nat_target::supports_non_stop ()
9908b566
VP
4343{
4344 return 1;
4345}
4346
fbea99ea
PA
4347/* to_always_non_stop_p implementation. */
4348
f6ac5f3d
PA
4349int
4350linux_nat_target::always_non_stop_p ()
fbea99ea 4351{
f12899e9 4352 return 1;
fbea99ea
PA
4353}
4354
d90e17a7
PA
4355/* True if we want to support multi-process. To be removed when GDB
4356 supports multi-exec. */
4357
2277426b 4358int linux_multi_process = 1;
d90e17a7 4359
f6ac5f3d
PA
4360int
4361linux_nat_target::supports_multi_process ()
d90e17a7
PA
4362{
4363 return linux_multi_process;
4364}
4365
f6ac5f3d
PA
4366int
4367linux_nat_target::supports_disable_randomization ()
03583c20
UW
4368{
4369#ifdef HAVE_PERSONALITY
4370 return 1;
4371#else
4372 return 0;
4373#endif
4374}
4375
7feb7d06
PA
4376/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4377 so we notice when any child changes state, and notify the
4378 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4379 above to wait for the arrival of a SIGCHLD. */
4380
b84876c2 4381static void
7feb7d06 4382sigchld_handler (int signo)
b84876c2 4383{
7feb7d06
PA
4384 int old_errno = errno;
4385
01124a23
DE
4386 if (debug_linux_nat)
4387 ui_file_write_async_safe (gdb_stdlog,
4388 "sigchld\n", sizeof ("sigchld\n") - 1);
7feb7d06
PA
4389
4390 if (signo == SIGCHLD
4391 && linux_nat_event_pipe[0] != -1)
4392 async_file_mark (); /* Let the event loop know that there are
4393 events to handle. */
4394
4395 errno = old_errno;
4396}
4397
4398/* Callback registered with the target events file descriptor. */
4399
4400static void
4401handle_target_event (int error, gdb_client_data client_data)
4402{
6a3753b3 4403 inferior_event_handler (INF_REG_EVENT, NULL);
7feb7d06
PA
4404}
4405
4406/* Create/destroy the target events pipe. Returns previous state. */
4407
4408static int
4409linux_async_pipe (int enable)
4410{
198297aa 4411 int previous = linux_is_async_p ();
7feb7d06
PA
4412
4413 if (previous != enable)
4414 {
4415 sigset_t prev_mask;
4416
12696c10
PA
4417 /* Block child signals while we create/destroy the pipe, as
4418 their handler writes to it. */
7feb7d06
PA
4419 block_child_signals (&prev_mask);
4420
4421 if (enable)
4422 {
614c279d 4423 if (gdb_pipe_cloexec (linux_nat_event_pipe) == -1)
7feb7d06
PA
4424 internal_error (__FILE__, __LINE__,
4425 "creating event pipe failed.");
4426
4427 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4428 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4429 }
4430 else
4431 {
4432 close (linux_nat_event_pipe[0]);
4433 close (linux_nat_event_pipe[1]);
4434 linux_nat_event_pipe[0] = -1;
4435 linux_nat_event_pipe[1] = -1;
4436 }
4437
4438 restore_child_signals_mask (&prev_mask);
4439 }
4440
4441 return previous;
b84876c2
PA
4442}
4443
4444/* target_async implementation. */
4445
f6ac5f3d
PA
4446void
4447linux_nat_target::async (int enable)
b84876c2 4448{
6a3753b3 4449 if (enable)
b84876c2 4450 {
7feb7d06
PA
4451 if (!linux_async_pipe (1))
4452 {
4453 add_file_handler (linux_nat_event_pipe[0],
4454 handle_target_event, NULL);
4455 /* There may be pending events to handle. Tell the event loop
4456 to poll them. */
4457 async_file_mark ();
4458 }
b84876c2
PA
4459 }
4460 else
4461 {
b84876c2 4462 delete_file_handler (linux_nat_event_pipe[0]);
7feb7d06 4463 linux_async_pipe (0);
b84876c2
PA
4464 }
4465 return;
4466}
4467
a493e3e2 4468/* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
252fbfc8
PA
4469 event came out. */
4470
4c28f408 4471static int
252fbfc8 4472linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4c28f408 4473{
d90e17a7 4474 if (!lwp->stopped)
252fbfc8 4475 {
d90e17a7
PA
4476 if (debug_linux_nat)
4477 fprintf_unfiltered (gdb_stdlog,
4478 "LNSL: running -> suspending %s\n",
4479 target_pid_to_str (lwp->ptid));
252fbfc8 4480
252fbfc8 4481
25289eb2
PA
4482 if (lwp->last_resume_kind == resume_stop)
4483 {
4484 if (debug_linux_nat)
4485 fprintf_unfiltered (gdb_stdlog,
4486 "linux-nat: already stopping LWP %ld at "
4487 "GDB's request\n",
4488 ptid_get_lwp (lwp->ptid));
4489 return 0;
4490 }
252fbfc8 4491
25289eb2
PA
4492 stop_callback (lwp, NULL);
4493 lwp->last_resume_kind = resume_stop;
d90e17a7
PA
4494 }
4495 else
4496 {
4497 /* Already known to be stopped; do nothing. */
252fbfc8 4498
d90e17a7
PA
4499 if (debug_linux_nat)
4500 {
e09875d4 4501 if (find_thread_ptid (lwp->ptid)->stop_requested)
3e43a32a
MS
4502 fprintf_unfiltered (gdb_stdlog,
4503 "LNSL: already stopped/stop_requested %s\n",
d90e17a7
PA
4504 target_pid_to_str (lwp->ptid));
4505 else
3e43a32a
MS
4506 fprintf_unfiltered (gdb_stdlog,
4507 "LNSL: already stopped/no "
4508 "stop_requested yet %s\n",
d90e17a7 4509 target_pid_to_str (lwp->ptid));
252fbfc8
PA
4510 }
4511 }
4c28f408
PA
4512 return 0;
4513}
4514
f6ac5f3d
PA
4515void
4516linux_nat_target::stop (ptid_t ptid)
4c28f408 4517{
bfedc46a
PA
4518 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4519}
4520
f6ac5f3d
PA
4521void
4522linux_nat_target::close ()
d90e17a7
PA
4523{
4524 /* Unregister from the event loop. */
f6ac5f3d
PA
4525 if (is_async_p ())
4526 async (0);
d90e17a7 4527
f6ac5f3d 4528 inf_ptrace_target::close ();
d90e17a7
PA
4529}
4530
c0694254
PA
4531/* When requests are passed down from the linux-nat layer to the
4532 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4533 used. The address space pointer is stored in the inferior object,
4534 but the common code that is passed such ptid can't tell whether
4535 lwpid is a "main" process id or not (it assumes so). We reverse
4536 look up the "main" process id from the lwp here. */
4537
f6ac5f3d
PA
4538struct address_space *
4539linux_nat_target::thread_address_space (ptid_t ptid)
c0694254
PA
4540{
4541 struct lwp_info *lwp;
4542 struct inferior *inf;
4543 int pid;
4544
dfd4cc63 4545 if (ptid_get_lwp (ptid) == 0)
c0694254
PA
4546 {
4547 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4548 tgid. */
4549 lwp = find_lwp_pid (ptid);
dfd4cc63 4550 pid = ptid_get_pid (lwp->ptid);
c0694254
PA
4551 }
4552 else
4553 {
4554 /* A (pid,lwpid,0) ptid. */
dfd4cc63 4555 pid = ptid_get_pid (ptid);
c0694254
PA
4556 }
4557
4558 inf = find_inferior_pid (pid);
4559 gdb_assert (inf != NULL);
4560 return inf->aspace;
4561}
4562
dc146f7c
VP
4563/* Return the cached value of the processor core for thread PTID. */
4564
f6ac5f3d
PA
4565int
4566linux_nat_target::core_of_thread (ptid_t ptid)
dc146f7c
VP
4567{
4568 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 4569
dc146f7c
VP
4570 if (info)
4571 return info->core;
4572 return -1;
4573}
4574
7a6a1731
GB
4575/* Implementation of to_filesystem_is_local. */
4576
f6ac5f3d
PA
4577int
4578linux_nat_target::filesystem_is_local ()
7a6a1731
GB
4579{
4580 struct inferior *inf = current_inferior ();
4581
4582 if (inf->fake_pid_p || inf->pid == 0)
4583 return 1;
4584
4585 return linux_ns_same (inf->pid, LINUX_NS_MNT);
4586}
4587
4588/* Convert the INF argument passed to a to_fileio_* method
4589 to a process ID suitable for passing to its corresponding
4590 linux_mntns_* function. If INF is non-NULL then the
4591 caller is requesting the filesystem seen by INF. If INF
4592 is NULL then the caller is requesting the filesystem seen
4593 by the GDB. We fall back to GDB's filesystem in the case
4594 that INF is non-NULL but its PID is unknown. */
4595
4596static pid_t
4597linux_nat_fileio_pid_of (struct inferior *inf)
4598{
4599 if (inf == NULL || inf->fake_pid_p || inf->pid == 0)
4600 return getpid ();
4601 else
4602 return inf->pid;
4603}
4604
4605/* Implementation of to_fileio_open. */
4606
f6ac5f3d
PA
4607int
4608linux_nat_target::fileio_open (struct inferior *inf, const char *filename,
4609 int flags, int mode, int warn_if_slow,
4610 int *target_errno)
7a6a1731
GB
4611{
4612 int nat_flags;
4613 mode_t nat_mode;
4614 int fd;
4615
4616 if (fileio_to_host_openflags (flags, &nat_flags) == -1
4617 || fileio_to_host_mode (mode, &nat_mode) == -1)
4618 {
4619 *target_errno = FILEIO_EINVAL;
4620 return -1;
4621 }
4622
4623 fd = linux_mntns_open_cloexec (linux_nat_fileio_pid_of (inf),
4624 filename, nat_flags, nat_mode);
4625 if (fd == -1)
4626 *target_errno = host_to_fileio_error (errno);
4627
4628 return fd;
4629}
4630
4631/* Implementation of to_fileio_readlink. */
4632
f6ac5f3d
PA
4633gdb::optional<std::string>
4634linux_nat_target::fileio_readlink (struct inferior *inf, const char *filename,
4635 int *target_errno)
7a6a1731
GB
4636{
4637 char buf[PATH_MAX];
4638 int len;
7a6a1731
GB
4639
4640 len = linux_mntns_readlink (linux_nat_fileio_pid_of (inf),
4641 filename, buf, sizeof (buf));
4642 if (len < 0)
4643 {
4644 *target_errno = host_to_fileio_error (errno);
e0d3522b 4645 return {};
7a6a1731
GB
4646 }
4647
e0d3522b 4648 return std::string (buf, len);
7a6a1731
GB
4649}
4650
4651/* Implementation of to_fileio_unlink. */
4652
f6ac5f3d
PA
4653int
4654linux_nat_target::fileio_unlink (struct inferior *inf, const char *filename,
4655 int *target_errno)
7a6a1731
GB
4656{
4657 int ret;
4658
4659 ret = linux_mntns_unlink (linux_nat_fileio_pid_of (inf),
4660 filename);
4661 if (ret == -1)
4662 *target_errno = host_to_fileio_error (errno);
4663
4664 return ret;
4665}
4666
aa01bd36
PA
4667/* Implementation of the to_thread_events method. */
4668
f6ac5f3d
PA
4669void
4670linux_nat_target::thread_events (int enable)
aa01bd36
PA
4671{
4672 report_thread_events = enable;
4673}
4674
f6ac5f3d
PA
4675linux_nat_target::linux_nat_target ()
4676{
f973ed9c
DJ
4677 /* We don't change the stratum; this target will sit at
4678 process_stratum and thread_db will set at thread_stratum. This
4679 is a little strange, since this is a multi-threaded-capable
4680 target, but we want to be on the stack below thread_db, and we
4681 also want to be used for single-threaded processes. */
f973ed9c
DJ
4682}
4683
9f0bdab8
DJ
4684/* Register a method to call whenever a new thread is attached. */
4685void
7b50312a
PA
4686linux_nat_set_new_thread (struct target_ops *t,
4687 void (*new_thread) (struct lwp_info *))
9f0bdab8
DJ
4688{
4689 /* Save the pointer. We only support a single registered instance
4690 of the GNU/Linux native target, so we do not need to map this to
4691 T. */
4692 linux_nat_new_thread = new_thread;
4693}
4694
466eecee
SM
4695/* Register a method to call whenever a new thread is attached. */
4696void
4697linux_nat_set_delete_thread (struct target_ops *t,
4698 void (*delete_thread) (struct arch_lwp_info *))
4699{
4700 /* Save the pointer. We only support a single registered instance
4701 of the GNU/Linux native target, so we do not need to map this to
4702 T. */
4703 linux_nat_delete_thread = delete_thread;
4704}
4705
26cb8b7c
PA
4706/* See declaration in linux-nat.h. */
4707
4708void
4709linux_nat_set_new_fork (struct target_ops *t,
4710 linux_nat_new_fork_ftype *new_fork)
4711{
4712 /* Save the pointer. */
4713 linux_nat_new_fork = new_fork;
4714}
4715
4716/* See declaration in linux-nat.h. */
4717
4718void
4719linux_nat_set_forget_process (struct target_ops *t,
4720 linux_nat_forget_process_ftype *fn)
4721{
4722 /* Save the pointer. */
4723 linux_nat_forget_process_hook = fn;
4724}
4725
4726/* See declaration in linux-nat.h. */
4727
4728void
4729linux_nat_forget_process (pid_t pid)
4730{
4731 if (linux_nat_forget_process_hook != NULL)
4732 linux_nat_forget_process_hook (pid);
4733}
4734
5b009018
PA
4735/* Register a method that converts a siginfo object between the layout
4736 that ptrace returns, and the layout in the architecture of the
4737 inferior. */
4738void
4739linux_nat_set_siginfo_fixup (struct target_ops *t,
a5362b9a 4740 int (*siginfo_fixup) (siginfo_t *,
5b009018
PA
4741 gdb_byte *,
4742 int))
4743{
4744 /* Save the pointer. */
4745 linux_nat_siginfo_fixup = siginfo_fixup;
4746}
4747
7b50312a
PA
4748/* Register a method to call prior to resuming a thread. */
4749
4750void
4751linux_nat_set_prepare_to_resume (struct target_ops *t,
4752 void (*prepare_to_resume) (struct lwp_info *))
4753{
4754 /* Save the pointer. */
4755 linux_nat_prepare_to_resume = prepare_to_resume;
4756}
4757
f865ee35
JK
4758/* See linux-nat.h. */
4759
4760int
4761linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
9f0bdab8 4762{
da559b09 4763 int pid;
9f0bdab8 4764
dfd4cc63 4765 pid = ptid_get_lwp (ptid);
da559b09 4766 if (pid == 0)
dfd4cc63 4767 pid = ptid_get_pid (ptid);
f865ee35 4768
da559b09
JK
4769 errno = 0;
4770 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
4771 if (errno != 0)
4772 {
4773 memset (siginfo, 0, sizeof (*siginfo));
4774 return 0;
4775 }
f865ee35 4776 return 1;
9f0bdab8
DJ
4777}
4778
7b669087
GB
4779/* See nat/linux-nat.h. */
4780
4781ptid_t
4782current_lwp_ptid (void)
4783{
4784 gdb_assert (ptid_lwp_p (inferior_ptid));
4785 return inferior_ptid;
4786}
4787
d6b0e80f
AC
4788void
4789_initialize_linux_nat (void)
4790{
ccce17b0
YQ
4791 add_setshow_zuinteger_cmd ("lin-lwp", class_maintenance,
4792 &debug_linux_nat, _("\
b84876c2
PA
4793Set debugging of GNU/Linux lwp module."), _("\
4794Show debugging of GNU/Linux lwp module."), _("\
4795Enables printf debugging output."),
ccce17b0
YQ
4796 NULL,
4797 show_debug_linux_nat,
4798 &setdebuglist, &showdebuglist);
b84876c2 4799
7a6a1731
GB
4800 add_setshow_boolean_cmd ("linux-namespaces", class_maintenance,
4801 &debug_linux_namespaces, _("\
4802Set debugging of GNU/Linux namespaces module."), _("\
4803Show debugging of GNU/Linux namespaces module."), _("\
4804Enables printf debugging output."),
4805 NULL,
4806 NULL,
4807 &setdebuglist, &showdebuglist);
4808
b84876c2 4809 /* Save this mask as the default. */
d6b0e80f
AC
4810 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4811
7feb7d06
PA
4812 /* Install a SIGCHLD handler. */
4813 sigchld_action.sa_handler = sigchld_handler;
4814 sigemptyset (&sigchld_action.sa_mask);
4815 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
4816
4817 /* Make it the default. */
7feb7d06 4818 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
4819
4820 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4821 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4822 sigdelset (&suspend_mask, SIGCHLD);
4823
7feb7d06 4824 sigemptyset (&blocked_mask);
774113b0
PA
4825
4826 lwp_lwpid_htab_create ();
d6b0e80f
AC
4827}
4828\f
4829
4830/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4831 the GNU/Linux Threads library and therefore doesn't really belong
4832 here. */
4833
d6b0e80f
AC
4834/* Return the set of signals used by the threads library in *SET. */
4835
4836void
4837lin_thread_get_thread_signals (sigset_t *set)
4838{
d6b0e80f
AC
4839 sigemptyset (set);
4840
4a6ed09b
PA
4841 /* NPTL reserves the first two RT signals, but does not provide any
4842 way for the debugger to query the signal numbers - fortunately
4843 they don't change. */
4844 sigaddset (set, __SIGRTMIN);
4845 sigaddset (set, __SIGRTMIN + 1);
d6b0e80f 4846}