]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/linux-nat.c
GDB copyright headers update after running GDB's copyright.py script.
[thirdparty/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
618f726f 3 Copyright (C) 2001-2016 Free Software Foundation, Inc.
3993f6b1
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
19
20#include "defs.h"
21#include "inferior.h"
45741a9c 22#include "infrun.h"
3993f6b1 23#include "target.h"
96d7229d
LM
24#include "nat/linux-nat.h"
25#include "nat/linux-waitpid.h"
3993f6b1 26#include "gdb_wait.h"
d6b0e80f
AC
27#include <unistd.h>
28#include <sys/syscall.h>
5826e159 29#include "nat/gdb_ptrace.h"
0274a8ce 30#include "linux-nat.h"
125f8a3d
GB
31#include "nat/linux-ptrace.h"
32#include "nat/linux-procfs.h"
8cc73a39 33#include "nat/linux-personality.h"
ac264b3b 34#include "linux-fork.h"
d6b0e80f
AC
35#include "gdbthread.h"
36#include "gdbcmd.h"
37#include "regcache.h"
4f844a66 38#include "regset.h"
dab06dbe 39#include "inf-child.h"
10d6c8cd
DJ
40#include "inf-ptrace.h"
41#include "auxv.h"
1777feb0 42#include <sys/procfs.h> /* for elf_gregset etc. */
dba24537
AC
43#include "elf-bfd.h" /* for elfcore_write_* */
44#include "gregset.h" /* for gregset */
45#include "gdbcore.h" /* for get_exec_file */
46#include <ctype.h> /* for isdigit */
53ce3c39 47#include <sys/stat.h> /* for struct stat */
dba24537 48#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
49#include "inf-loop.h"
50#include "event-loop.h"
51#include "event-top.h"
07e059b5
VP
52#include <pwd.h>
53#include <sys/types.h>
2978b111 54#include <dirent.h>
07e059b5 55#include "xml-support.h"
efcbbd14 56#include <sys/vfs.h>
6c95b8df 57#include "solib.h"
125f8a3d 58#include "nat/linux-osdata.h"
6432734d 59#include "linux-tdep.h"
7dcd53a0 60#include "symfile.h"
5808517f
YQ
61#include "agent.h"
62#include "tracepoint.h"
87b0bb13 63#include "buffer.h"
6ecd4729 64#include "target-descriptions.h"
614c279d 65#include "filestuff.h"
77e371c0 66#include "objfiles.h"
7a6a1731
GB
67#include "nat/linux-namespaces.h"
68#include "fileio.h"
efcbbd14
UW
69
70#ifndef SPUFS_MAGIC
71#define SPUFS_MAGIC 0x23c9b64e
72#endif
dba24537 73
1777feb0 74/* This comment documents high-level logic of this file.
8a77dff3
VP
75
76Waiting for events in sync mode
77===============================
78
4a6ed09b
PA
79When waiting for an event in a specific thread, we just use waitpid,
80passing the specific pid, and not passing WNOHANG.
81
82When waiting for an event in all threads, waitpid is not quite good:
83
84- If the thread group leader exits while other threads in the thread
85 group still exist, waitpid(TGID, ...) hangs. That waitpid won't
86 return an exit status until the other threads in the group are
87 reaped.
88
89- When a non-leader thread execs, that thread just vanishes without
90 reporting an exit (so we'd hang if we waited for it explicitly in
91 that case). The exec event is instead reported to the TGID pid.
92
93The solution is to always use -1 and WNOHANG, together with
94sigsuspend.
95
96First, we use non-blocking waitpid to check for events. If nothing is
97found, we use sigsuspend to wait for SIGCHLD. When SIGCHLD arrives,
98it means something happened to a child process. As soon as we know
99there's an event, we get back to calling nonblocking waitpid.
100
101Note that SIGCHLD should be blocked between waitpid and sigsuspend
102calls, so that we don't miss a signal. If SIGCHLD arrives in between,
103when it's blocked, the signal becomes pending and sigsuspend
104immediately notices it and returns.
105
106Waiting for events in async mode (TARGET_WNOHANG)
107=================================================
8a77dff3 108
7feb7d06
PA
109In async mode, GDB should always be ready to handle both user input
110and target events, so neither blocking waitpid nor sigsuspend are
111viable options. Instead, we should asynchronously notify the GDB main
112event loop whenever there's an unprocessed event from the target. We
113detect asynchronous target events by handling SIGCHLD signals. To
114notify the event loop about target events, the self-pipe trick is used
115--- a pipe is registered as waitable event source in the event loop,
116the event loop select/poll's on the read end of this pipe (as well on
117other event sources, e.g., stdin), and the SIGCHLD handler writes a
118byte to this pipe. This is more portable than relying on
119pselect/ppoll, since on kernels that lack those syscalls, libc
120emulates them with select/poll+sigprocmask, and that is racy
121(a.k.a. plain broken).
122
123Obviously, if we fail to notify the event loop if there's a target
124event, it's bad. OTOH, if we notify the event loop when there's no
125event from the target, linux_nat_wait will detect that there's no real
126event to report, and return event of type TARGET_WAITKIND_IGNORE.
127This is mostly harmless, but it will waste time and is better avoided.
128
129The main design point is that every time GDB is outside linux-nat.c,
130we have a SIGCHLD handler installed that is called when something
131happens to the target and notifies the GDB event loop. Whenever GDB
132core decides to handle the event, and calls into linux-nat.c, we
133process things as in sync mode, except that the we never block in
134sigsuspend.
135
136While processing an event, we may end up momentarily blocked in
137waitpid calls. Those waitpid calls, while blocking, are guarantied to
138return quickly. E.g., in all-stop mode, before reporting to the core
139that an LWP hit a breakpoint, all LWPs are stopped by sending them
140SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
141Note that this is different from blocking indefinitely waiting for the
142next event --- here, we're already handling an event.
8a77dff3
VP
143
144Use of signals
145==============
146
147We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
148signal is not entirely significant; we just need for a signal to be delivered,
149so that we can intercept it. SIGSTOP's advantage is that it can not be
150blocked. A disadvantage is that it is not a real-time signal, so it can only
151be queued once; we do not keep track of other sources of SIGSTOP.
152
153Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
154use them, because they have special behavior when the signal is generated -
155not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
156kills the entire thread group.
157
158A delivered SIGSTOP would stop the entire thread group, not just the thread we
159tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
160cancel it (by PTRACE_CONT without passing SIGSTOP).
161
162We could use a real-time signal instead. This would solve those problems; we
163could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
164But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
165generates it, and there are races with trying to find a signal that is not
4a6ed09b
PA
166blocked.
167
168Exec events
169===========
170
171The case of a thread group (process) with 3 or more threads, and a
172thread other than the leader execs is worth detailing:
173
174On an exec, the Linux kernel destroys all threads except the execing
175one in the thread group, and resets the execing thread's tid to the
176tgid. No exit notification is sent for the execing thread -- from the
177ptracer's perspective, it appears as though the execing thread just
178vanishes. Until we reap all other threads except the leader and the
179execing thread, the leader will be zombie, and the execing thread will
180be in `D (disc sleep)' state. As soon as all other threads are
181reaped, the execing thread changes its tid to the tgid, and the
182previous (zombie) leader vanishes, giving place to the "new"
183leader. */
a0ef4274 184
dba24537
AC
185#ifndef O_LARGEFILE
186#define O_LARGEFILE 0
187#endif
0274a8ce 188
433bbbf8 189/* Does the current host support PTRACE_GETREGSET? */
0bdb2f78 190enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;
433bbbf8 191
10d6c8cd
DJ
192/* The single-threaded native GNU/Linux target_ops. We save a pointer for
193 the use of the multi-threaded target. */
194static struct target_ops *linux_ops;
f973ed9c 195static struct target_ops linux_ops_saved;
10d6c8cd 196
9f0bdab8 197/* The method to call, if any, when a new thread is attached. */
7b50312a
PA
198static void (*linux_nat_new_thread) (struct lwp_info *);
199
26cb8b7c
PA
200/* The method to call, if any, when a new fork is attached. */
201static linux_nat_new_fork_ftype *linux_nat_new_fork;
202
203/* The method to call, if any, when a process is no longer
204 attached. */
205static linux_nat_forget_process_ftype *linux_nat_forget_process_hook;
206
7b50312a
PA
207/* Hook to call prior to resuming a thread. */
208static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
9f0bdab8 209
5b009018
PA
210/* The method to call, if any, when the siginfo object needs to be
211 converted between the layout returned by ptrace, and the layout in
212 the architecture of the inferior. */
a5362b9a 213static int (*linux_nat_siginfo_fixup) (siginfo_t *,
5b009018
PA
214 gdb_byte *,
215 int);
216
ac264b3b
MS
217/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
218 Called by our to_xfer_partial. */
4ac248ca 219static target_xfer_partial_ftype *super_xfer_partial;
10d6c8cd 220
6a3cb8e8
PA
221/* The saved to_close method, inherited from inf-ptrace.c.
222 Called by our to_close. */
223static void (*super_close) (struct target_ops *);
224
ccce17b0 225static unsigned int debug_linux_nat;
920d2a44
AC
226static void
227show_debug_linux_nat (struct ui_file *file, int from_tty,
228 struct cmd_list_element *c, const char *value)
229{
230 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
231 value);
232}
d6b0e80f 233
ae087d01
DJ
234struct simple_pid_list
235{
236 int pid;
3d799a95 237 int status;
ae087d01
DJ
238 struct simple_pid_list *next;
239};
240struct simple_pid_list *stopped_pids;
241
3dd5b83d
PA
242/* Async mode support. */
243
b84876c2
PA
244/* The read/write ends of the pipe registered as waitable file in the
245 event loop. */
246static int linux_nat_event_pipe[2] = { -1, -1 };
247
198297aa
PA
248/* True if we're currently in async mode. */
249#define linux_is_async_p() (linux_nat_event_pipe[0] != -1)
250
7feb7d06 251/* Flush the event pipe. */
b84876c2 252
7feb7d06
PA
253static void
254async_file_flush (void)
b84876c2 255{
7feb7d06
PA
256 int ret;
257 char buf;
b84876c2 258
7feb7d06 259 do
b84876c2 260 {
7feb7d06 261 ret = read (linux_nat_event_pipe[0], &buf, 1);
b84876c2 262 }
7feb7d06 263 while (ret >= 0 || (ret == -1 && errno == EINTR));
b84876c2
PA
264}
265
7feb7d06
PA
266/* Put something (anything, doesn't matter what, or how much) in event
267 pipe, so that the select/poll in the event-loop realizes we have
268 something to process. */
252fbfc8 269
b84876c2 270static void
7feb7d06 271async_file_mark (void)
b84876c2 272{
7feb7d06 273 int ret;
b84876c2 274
7feb7d06
PA
275 /* It doesn't really matter what the pipe contains, as long we end
276 up with something in it. Might as well flush the previous
277 left-overs. */
278 async_file_flush ();
b84876c2 279
7feb7d06 280 do
b84876c2 281 {
7feb7d06 282 ret = write (linux_nat_event_pipe[1], "+", 1);
b84876c2 283 }
7feb7d06 284 while (ret == -1 && errno == EINTR);
b84876c2 285
7feb7d06
PA
286 /* Ignore EAGAIN. If the pipe is full, the event loop will already
287 be awakened anyway. */
b84876c2
PA
288}
289
7feb7d06
PA
290static int kill_lwp (int lwpid, int signo);
291
292static int stop_callback (struct lwp_info *lp, void *data);
2db9a427 293static int resume_stopped_resumed_lwps (struct lwp_info *lp, void *data);
7feb7d06
PA
294
295static void block_child_signals (sigset_t *prev_mask);
296static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
297
298struct lwp_info;
299static struct lwp_info *add_lwp (ptid_t ptid);
300static void purge_lwp_list (int pid);
4403d8e9 301static void delete_lwp (ptid_t ptid);
2277426b
PA
302static struct lwp_info *find_lwp_pid (ptid_t ptid);
303
8a99810d
PA
304static int lwp_status_pending_p (struct lwp_info *lp);
305
9c02b525
PA
306static int check_stopped_by_breakpoint (struct lwp_info *lp);
307static int sigtrap_is_event (int status);
308static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
309
cff068da
GB
310\f
311/* LWP accessors. */
312
313/* See nat/linux-nat.h. */
314
315ptid_t
316ptid_of_lwp (struct lwp_info *lwp)
317{
318 return lwp->ptid;
319}
320
321/* See nat/linux-nat.h. */
322
4b134ca1
GB
323void
324lwp_set_arch_private_info (struct lwp_info *lwp,
325 struct arch_lwp_info *info)
326{
327 lwp->arch_private = info;
328}
329
330/* See nat/linux-nat.h. */
331
332struct arch_lwp_info *
333lwp_arch_private_info (struct lwp_info *lwp)
334{
335 return lwp->arch_private;
336}
337
338/* See nat/linux-nat.h. */
339
cff068da
GB
340int
341lwp_is_stopped (struct lwp_info *lwp)
342{
343 return lwp->stopped;
344}
345
346/* See nat/linux-nat.h. */
347
348enum target_stop_reason
349lwp_stop_reason (struct lwp_info *lwp)
350{
351 return lwp->stop_reason;
352}
353
ae087d01
DJ
354\f
355/* Trivial list manipulation functions to keep track of a list of
356 new stopped processes. */
357static void
3d799a95 358add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01 359{
8d749320 360 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
e0881a8e 361
ae087d01 362 new_pid->pid = pid;
3d799a95 363 new_pid->status = status;
ae087d01
DJ
364 new_pid->next = *listp;
365 *listp = new_pid;
366}
367
368static int
46a96992 369pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
370{
371 struct simple_pid_list **p;
372
373 for (p = listp; *p != NULL; p = &(*p)->next)
374 if ((*p)->pid == pid)
375 {
376 struct simple_pid_list *next = (*p)->next;
e0881a8e 377
46a96992 378 *statusp = (*p)->status;
ae087d01
DJ
379 xfree (*p);
380 *p = next;
381 return 1;
382 }
383 return 0;
384}
385
de0d863e
DB
386/* Return the ptrace options that we want to try to enable. */
387
388static int
389linux_nat_ptrace_options (int attached)
390{
391 int options = 0;
392
393 if (!attached)
394 options |= PTRACE_O_EXITKILL;
395
396 options |= (PTRACE_O_TRACESYSGOOD
397 | PTRACE_O_TRACEVFORKDONE
398 | PTRACE_O_TRACEVFORK
399 | PTRACE_O_TRACEFORK
400 | PTRACE_O_TRACEEXEC);
401
402 return options;
403}
404
96d7229d 405/* Initialize ptrace warnings and check for supported ptrace
beed38b8
JB
406 features given PID.
407
408 ATTACHED should be nonzero iff we attached to the inferior. */
3993f6b1
DJ
409
410static void
beed38b8 411linux_init_ptrace (pid_t pid, int attached)
3993f6b1 412{
de0d863e
DB
413 int options = linux_nat_ptrace_options (attached);
414
415 linux_enable_event_reporting (pid, options);
96d7229d 416 linux_ptrace_init_warnings ();
4de4c07c
DJ
417}
418
6d8fd2b7 419static void
f045800c 420linux_child_post_attach (struct target_ops *self, int pid)
4de4c07c 421{
beed38b8 422 linux_init_ptrace (pid, 1);
4de4c07c
DJ
423}
424
10d6c8cd 425static void
2e97a79e 426linux_child_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4de4c07c 427{
beed38b8 428 linux_init_ptrace (ptid_get_pid (ptid), 0);
4de4c07c
DJ
429}
430
4403d8e9
JK
431/* Return the number of known LWPs in the tgid given by PID. */
432
433static int
434num_lwps (int pid)
435{
436 int count = 0;
437 struct lwp_info *lp;
438
439 for (lp = lwp_list; lp; lp = lp->next)
440 if (ptid_get_pid (lp->ptid) == pid)
441 count++;
442
443 return count;
444}
445
446/* Call delete_lwp with prototype compatible for make_cleanup. */
447
448static void
449delete_lwp_cleanup (void *lp_voidp)
450{
9a3c8263 451 struct lwp_info *lp = (struct lwp_info *) lp_voidp;
4403d8e9
JK
452
453 delete_lwp (lp->ptid);
454}
455
d83ad864
DB
456/* Target hook for follow_fork. On entry inferior_ptid must be the
457 ptid of the followed inferior. At return, inferior_ptid will be
458 unchanged. */
459
6d8fd2b7 460static int
07107ca6
LM
461linux_child_follow_fork (struct target_ops *ops, int follow_child,
462 int detach_fork)
3993f6b1 463{
d83ad864 464 if (!follow_child)
4de4c07c 465 {
6c95b8df 466 struct lwp_info *child_lp = NULL;
d83ad864
DB
467 int status = W_STOPCODE (0);
468 struct cleanup *old_chain;
469 int has_vforked;
79639e11 470 ptid_t parent_ptid, child_ptid;
d83ad864
DB
471 int parent_pid, child_pid;
472
473 has_vforked = (inferior_thread ()->pending_follow.kind
474 == TARGET_WAITKIND_VFORKED);
79639e11
PA
475 parent_ptid = inferior_ptid;
476 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
477 parent_pid = ptid_get_lwp (parent_ptid);
478 child_pid = ptid_get_lwp (child_ptid);
4de4c07c 479
1777feb0 480 /* We're already attached to the parent, by default. */
d83ad864 481 old_chain = save_inferior_ptid ();
79639e11 482 inferior_ptid = child_ptid;
d83ad864
DB
483 child_lp = add_lwp (inferior_ptid);
484 child_lp->stopped = 1;
485 child_lp->last_resume_kind = resume_stop;
4de4c07c 486
ac264b3b
MS
487 /* Detach new forked process? */
488 if (detach_fork)
f75c00e4 489 {
4403d8e9
JK
490 make_cleanup (delete_lwp_cleanup, child_lp);
491
4403d8e9
JK
492 if (linux_nat_prepare_to_resume != NULL)
493 linux_nat_prepare_to_resume (child_lp);
c077881a
HZ
494
495 /* When debugging an inferior in an architecture that supports
496 hardware single stepping on a kernel without commit
497 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
498 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
499 set if the parent process had them set.
500 To work around this, single step the child process
501 once before detaching to clear the flags. */
502
503 if (!gdbarch_software_single_step_p (target_thread_architecture
504 (child_lp->ptid)))
505 {
c077881a
HZ
506 linux_disable_event_reporting (child_pid);
507 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
508 perror_with_name (_("Couldn't do single step"));
509 if (my_waitpid (child_pid, &status, 0) < 0)
510 perror_with_name (_("Couldn't wait vfork process"));
511 }
512
513 if (WIFSTOPPED (status))
9caaaa83
PA
514 {
515 int signo;
516
517 signo = WSTOPSIG (status);
518 if (signo != 0
519 && !signal_pass_state (gdb_signal_from_host (signo)))
520 signo = 0;
521 ptrace (PTRACE_DETACH, child_pid, 0, signo);
522 }
4403d8e9 523
d83ad864 524 /* Resets value of inferior_ptid to parent ptid. */
4403d8e9 525 do_cleanups (old_chain);
ac264b3b
MS
526 }
527 else
528 {
6c95b8df 529 /* Let the thread_db layer learn about this new process. */
2277426b 530 check_for_thread_db ();
ac264b3b 531 }
9016a515 532
d83ad864
DB
533 do_cleanups (old_chain);
534
9016a515
DJ
535 if (has_vforked)
536 {
3ced3da4 537 struct lwp_info *parent_lp;
6c95b8df 538
79639e11 539 parent_lp = find_lwp_pid (parent_ptid);
96d7229d 540 gdb_assert (linux_supports_tracefork () >= 0);
3ced3da4 541
96d7229d 542 if (linux_supports_tracevforkdone ())
9016a515 543 {
6c95b8df
PA
544 if (debug_linux_nat)
545 fprintf_unfiltered (gdb_stdlog,
546 "LCFF: waiting for VFORK_DONE on %d\n",
547 parent_pid);
3ced3da4 548 parent_lp->stopped = 1;
9016a515 549
6c95b8df
PA
550 /* We'll handle the VFORK_DONE event like any other
551 event, in target_wait. */
9016a515
DJ
552 }
553 else
554 {
555 /* We can't insert breakpoints until the child has
556 finished with the shared memory region. We need to
557 wait until that happens. Ideal would be to just
558 call:
559 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
560 - waitpid (parent_pid, &status, __WALL);
561 However, most architectures can't handle a syscall
562 being traced on the way out if it wasn't traced on
563 the way in.
564
565 We might also think to loop, continuing the child
566 until it exits or gets a SIGTRAP. One problem is
567 that the child might call ptrace with PTRACE_TRACEME.
568
569 There's no simple and reliable way to figure out when
570 the vforked child will be done with its copy of the
571 shared memory. We could step it out of the syscall,
572 two instructions, let it go, and then single-step the
573 parent once. When we have hardware single-step, this
574 would work; with software single-step it could still
575 be made to work but we'd have to be able to insert
576 single-step breakpoints in the child, and we'd have
577 to insert -just- the single-step breakpoint in the
578 parent. Very awkward.
579
580 In the end, the best we can do is to make sure it
581 runs for a little while. Hopefully it will be out of
582 range of any breakpoints we reinsert. Usually this
583 is only the single-step breakpoint at vfork's return
584 point. */
585
6c95b8df
PA
586 if (debug_linux_nat)
587 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
588 "LCFF: no VFORK_DONE "
589 "support, sleeping a bit\n");
6c95b8df 590
9016a515 591 usleep (10000);
9016a515 592
6c95b8df
PA
593 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
594 and leave it pending. The next linux_nat_resume call
595 will notice a pending event, and bypasses actually
596 resuming the inferior. */
3ced3da4
PA
597 parent_lp->status = 0;
598 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
599 parent_lp->stopped = 1;
6c95b8df
PA
600
601 /* If we're in async mode, need to tell the event loop
602 there's something here to process. */
d9d41e78 603 if (target_is_async_p ())
6c95b8df
PA
604 async_file_mark ();
605 }
9016a515 606 }
4de4c07c 607 }
3993f6b1 608 else
4de4c07c 609 {
3ced3da4 610 struct lwp_info *child_lp;
4de4c07c 611
3ced3da4
PA
612 child_lp = add_lwp (inferior_ptid);
613 child_lp->stopped = 1;
25289eb2 614 child_lp->last_resume_kind = resume_stop;
6c95b8df 615
6c95b8df 616 /* Let the thread_db layer learn about this new process. */
ef29ce1a 617 check_for_thread_db ();
4de4c07c
DJ
618 }
619
620 return 0;
621}
622
4de4c07c 623\f
77b06cd7 624static int
a863b201 625linux_child_insert_fork_catchpoint (struct target_ops *self, int pid)
4de4c07c 626{
96d7229d 627 return !linux_supports_tracefork ();
3993f6b1
DJ
628}
629
eb73ad13 630static int
973fc227 631linux_child_remove_fork_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
632{
633 return 0;
634}
635
77b06cd7 636static int
3ecc7da0 637linux_child_insert_vfork_catchpoint (struct target_ops *self, int pid)
3993f6b1 638{
96d7229d 639 return !linux_supports_tracefork ();
3993f6b1
DJ
640}
641
eb73ad13 642static int
e98cf0cd 643linux_child_remove_vfork_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
644{
645 return 0;
646}
647
77b06cd7 648static int
ba025e51 649linux_child_insert_exec_catchpoint (struct target_ops *self, int pid)
3993f6b1 650{
96d7229d 651 return !linux_supports_tracefork ();
3993f6b1
DJ
652}
653
eb73ad13 654static int
758e29d2 655linux_child_remove_exec_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
656{
657 return 0;
658}
659
a96d9b2e 660static int
ff214e67
TT
661linux_child_set_syscall_catchpoint (struct target_ops *self,
662 int pid, int needed, int any_count,
a96d9b2e
SDJ
663 int table_size, int *table)
664{
96d7229d 665 if (!linux_supports_tracesysgood ())
77b06cd7
TJB
666 return 1;
667
a96d9b2e
SDJ
668 /* On GNU/Linux, we ignore the arguments. It means that we only
669 enable the syscall catchpoints, but do not disable them.
77b06cd7 670
a96d9b2e
SDJ
671 Also, we do not use the `table' information because we do not
672 filter system calls here. We let GDB do the logic for us. */
673 return 0;
674}
675
d6b0e80f 676/* List of known LWPs. */
9f0bdab8 677struct lwp_info *lwp_list;
d6b0e80f
AC
678\f
679
d6b0e80f
AC
680/* Original signal mask. */
681static sigset_t normal_mask;
682
683/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
684 _initialize_linux_nat. */
685static sigset_t suspend_mask;
686
7feb7d06
PA
687/* Signals to block to make that sigsuspend work. */
688static sigset_t blocked_mask;
689
690/* SIGCHLD action. */
691struct sigaction sigchld_action;
b84876c2 692
7feb7d06
PA
693/* Block child signals (SIGCHLD and linux threads signals), and store
694 the previous mask in PREV_MASK. */
84e46146 695
7feb7d06
PA
696static void
697block_child_signals (sigset_t *prev_mask)
698{
699 /* Make sure SIGCHLD is blocked. */
700 if (!sigismember (&blocked_mask, SIGCHLD))
701 sigaddset (&blocked_mask, SIGCHLD);
702
703 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
704}
705
706/* Restore child signals mask, previously returned by
707 block_child_signals. */
708
709static void
710restore_child_signals_mask (sigset_t *prev_mask)
711{
712 sigprocmask (SIG_SETMASK, prev_mask, NULL);
713}
2455069d
UW
714
715/* Mask of signals to pass directly to the inferior. */
716static sigset_t pass_mask;
717
718/* Update signals to pass to the inferior. */
719static void
94bedb42
TT
720linux_nat_pass_signals (struct target_ops *self,
721 int numsigs, unsigned char *pass_signals)
2455069d
UW
722{
723 int signo;
724
725 sigemptyset (&pass_mask);
726
727 for (signo = 1; signo < NSIG; signo++)
728 {
2ea28649 729 int target_signo = gdb_signal_from_host (signo);
2455069d
UW
730 if (target_signo < numsigs && pass_signals[target_signo])
731 sigaddset (&pass_mask, signo);
732 }
733}
734
d6b0e80f
AC
735\f
736
737/* Prototypes for local functions. */
738static int stop_wait_callback (struct lwp_info *lp, void *data);
8dd27370 739static char *linux_child_pid_to_exec_file (struct target_ops *self, int pid);
20ba1ce6 740static int resume_stopped_resumed_lwps (struct lwp_info *lp, void *data);
710151dd 741
d6b0e80f 742\f
d6b0e80f 743
7b50312a
PA
744/* Destroy and free LP. */
745
746static void
747lwp_free (struct lwp_info *lp)
748{
749 xfree (lp->arch_private);
750 xfree (lp);
751}
752
d90e17a7
PA
753/* Remove all LWPs belong to PID from the lwp list. */
754
755static void
756purge_lwp_list (int pid)
757{
758 struct lwp_info *lp, *lpprev, *lpnext;
759
760 lpprev = NULL;
761
762 for (lp = lwp_list; lp; lp = lpnext)
763 {
764 lpnext = lp->next;
765
766 if (ptid_get_pid (lp->ptid) == pid)
767 {
768 if (lp == lwp_list)
769 lwp_list = lp->next;
770 else
771 lpprev->next = lp->next;
772
7b50312a 773 lwp_free (lp);
d90e17a7
PA
774 }
775 else
776 lpprev = lp;
777 }
778}
779
26cb8b7c
PA
780/* Add the LWP specified by PTID to the list. PTID is the first LWP
781 in the process. Return a pointer to the structure describing the
782 new LWP.
783
784 This differs from add_lwp in that we don't let the arch specific
785 bits know about this new thread. Current clients of this callback
786 take the opportunity to install watchpoints in the new thread, and
787 we shouldn't do that for the first thread. If we're spawning a
788 child ("run"), the thread executes the shell wrapper first, and we
789 shouldn't touch it until it execs the program we want to debug.
790 For "attach", it'd be okay to call the callback, but it's not
791 necessary, because watchpoints can't yet have been inserted into
792 the inferior. */
d6b0e80f
AC
793
794static struct lwp_info *
26cb8b7c 795add_initial_lwp (ptid_t ptid)
d6b0e80f
AC
796{
797 struct lwp_info *lp;
798
dfd4cc63 799 gdb_assert (ptid_lwp_p (ptid));
d6b0e80f 800
8d749320 801 lp = XNEW (struct lwp_info);
d6b0e80f
AC
802
803 memset (lp, 0, sizeof (struct lwp_info));
804
25289eb2 805 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
806 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
807
808 lp->ptid = ptid;
dc146f7c 809 lp->core = -1;
d6b0e80f
AC
810
811 lp->next = lwp_list;
812 lwp_list = lp;
d6b0e80f 813
26cb8b7c
PA
814 return lp;
815}
816
817/* Add the LWP specified by PID to the list. Return a pointer to the
818 structure describing the new LWP. The LWP should already be
819 stopped. */
820
821static struct lwp_info *
822add_lwp (ptid_t ptid)
823{
824 struct lwp_info *lp;
825
826 lp = add_initial_lwp (ptid);
827
6e012a6c
PA
828 /* Let the arch specific bits know about this new thread. Current
829 clients of this callback take the opportunity to install
26cb8b7c
PA
830 watchpoints in the new thread. We don't do this for the first
831 thread though. See add_initial_lwp. */
832 if (linux_nat_new_thread != NULL)
7b50312a 833 linux_nat_new_thread (lp);
9f0bdab8 834
d6b0e80f
AC
835 return lp;
836}
837
838/* Remove the LWP specified by PID from the list. */
839
840static void
841delete_lwp (ptid_t ptid)
842{
843 struct lwp_info *lp, *lpprev;
844
845 lpprev = NULL;
846
847 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
848 if (ptid_equal (lp->ptid, ptid))
849 break;
850
851 if (!lp)
852 return;
853
d6b0e80f
AC
854 if (lpprev)
855 lpprev->next = lp->next;
856 else
857 lwp_list = lp->next;
858
7b50312a 859 lwp_free (lp);
d6b0e80f
AC
860}
861
862/* Return a pointer to the structure describing the LWP corresponding
863 to PID. If no corresponding LWP could be found, return NULL. */
864
865static struct lwp_info *
866find_lwp_pid (ptid_t ptid)
867{
868 struct lwp_info *lp;
869 int lwp;
870
dfd4cc63
LM
871 if (ptid_lwp_p (ptid))
872 lwp = ptid_get_lwp (ptid);
d6b0e80f 873 else
dfd4cc63 874 lwp = ptid_get_pid (ptid);
d6b0e80f
AC
875
876 for (lp = lwp_list; lp; lp = lp->next)
dfd4cc63 877 if (lwp == ptid_get_lwp (lp->ptid))
d6b0e80f
AC
878 return lp;
879
880 return NULL;
881}
882
6d4ee8c6 883/* See nat/linux-nat.h. */
d6b0e80f
AC
884
885struct lwp_info *
d90e17a7 886iterate_over_lwps (ptid_t filter,
6d4ee8c6 887 iterate_over_lwps_ftype callback,
d90e17a7 888 void *data)
d6b0e80f
AC
889{
890 struct lwp_info *lp, *lpnext;
891
892 for (lp = lwp_list; lp; lp = lpnext)
893 {
894 lpnext = lp->next;
d90e17a7
PA
895
896 if (ptid_match (lp->ptid, filter))
897 {
6d4ee8c6 898 if ((*callback) (lp, data) != 0)
d90e17a7
PA
899 return lp;
900 }
d6b0e80f
AC
901 }
902
903 return NULL;
904}
905
2277426b
PA
906/* Update our internal state when changing from one checkpoint to
907 another indicated by NEW_PTID. We can only switch single-threaded
908 applications, so we only create one new LWP, and the previous list
909 is discarded. */
f973ed9c
DJ
910
911void
912linux_nat_switch_fork (ptid_t new_ptid)
913{
914 struct lwp_info *lp;
915
dfd4cc63 916 purge_lwp_list (ptid_get_pid (inferior_ptid));
2277426b 917
f973ed9c
DJ
918 lp = add_lwp (new_ptid);
919 lp->stopped = 1;
e26af52f 920
2277426b
PA
921 /* This changes the thread's ptid while preserving the gdb thread
922 num. Also changes the inferior pid, while preserving the
923 inferior num. */
924 thread_change_ptid (inferior_ptid, new_ptid);
925
926 /* We've just told GDB core that the thread changed target id, but,
927 in fact, it really is a different thread, with different register
928 contents. */
929 registers_changed ();
e26af52f
DJ
930}
931
e26af52f
DJ
932/* Handle the exit of a single thread LP. */
933
934static void
935exit_lwp (struct lwp_info *lp)
936{
e09875d4 937 struct thread_info *th = find_thread_ptid (lp->ptid);
063bfe2e
VP
938
939 if (th)
e26af52f 940 {
17faa917
DJ
941 if (print_thread_events)
942 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
943
4f8d22e3 944 delete_thread (lp->ptid);
e26af52f
DJ
945 }
946
947 delete_lwp (lp->ptid);
948}
949
a0ef4274
DJ
950/* Wait for the LWP specified by LP, which we have just attached to.
951 Returns a wait status for that LWP, to cache. */
952
953static int
4a6ed09b 954linux_nat_post_attach_wait (ptid_t ptid, int first, int *signalled)
a0ef4274 955{
dfd4cc63 956 pid_t new_pid, pid = ptid_get_lwp (ptid);
a0ef4274
DJ
957 int status;
958
644cebc9 959 if (linux_proc_pid_is_stopped (pid))
a0ef4274
DJ
960 {
961 if (debug_linux_nat)
962 fprintf_unfiltered (gdb_stdlog,
963 "LNPAW: Attaching to a stopped process\n");
964
965 /* The process is definitely stopped. It is in a job control
966 stop, unless the kernel predates the TASK_STOPPED /
967 TASK_TRACED distinction, in which case it might be in a
968 ptrace stop. Make sure it is in a ptrace stop; from there we
969 can kill it, signal it, et cetera.
970
971 First make sure there is a pending SIGSTOP. Since we are
972 already attached, the process can not transition from stopped
973 to running without a PTRACE_CONT; so we know this signal will
974 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
975 probably already in the queue (unless this kernel is old
976 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
977 is not an RT signal, it can only be queued once. */
978 kill_lwp (pid, SIGSTOP);
979
980 /* Finally, resume the stopped process. This will deliver the SIGSTOP
981 (or a higher priority signal, just like normal PTRACE_ATTACH). */
982 ptrace (PTRACE_CONT, pid, 0, 0);
983 }
984
985 /* Make sure the initial process is stopped. The user-level threads
986 layer might want to poke around in the inferior, and that won't
987 work if things haven't stabilized yet. */
4a6ed09b 988 new_pid = my_waitpid (pid, &status, __WALL);
dacc9cb2
PP
989 gdb_assert (pid == new_pid);
990
991 if (!WIFSTOPPED (status))
992 {
993 /* The pid we tried to attach has apparently just exited. */
994 if (debug_linux_nat)
995 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
996 pid, status_to_str (status));
997 return status;
998 }
a0ef4274
DJ
999
1000 if (WSTOPSIG (status) != SIGSTOP)
1001 {
1002 *signalled = 1;
1003 if (debug_linux_nat)
1004 fprintf_unfiltered (gdb_stdlog,
1005 "LNPAW: Received %s after attaching\n",
1006 status_to_str (status));
1007 }
1008
1009 return status;
1010}
1011
b84876c2 1012static void
136d6dae
VP
1013linux_nat_create_inferior (struct target_ops *ops,
1014 char *exec_file, char *allargs, char **env,
b84876c2
PA
1015 int from_tty)
1016{
8cc73a39
SDJ
1017 struct cleanup *restore_personality
1018 = maybe_disable_address_space_randomization (disable_randomization);
b84876c2
PA
1019
1020 /* The fork_child mechanism is synchronous and calls target_wait, so
1021 we have to mask the async mode. */
1022
2455069d 1023 /* Make sure we report all signals during startup. */
94bedb42 1024 linux_nat_pass_signals (ops, 0, NULL);
2455069d 1025
136d6dae 1026 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
b84876c2 1027
8cc73a39 1028 do_cleanups (restore_personality);
b84876c2
PA
1029}
1030
8784d563
PA
1031/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1032 already attached. Returns true if a new LWP is found, false
1033 otherwise. */
1034
1035static int
1036attach_proc_task_lwp_callback (ptid_t ptid)
1037{
1038 struct lwp_info *lp;
1039
1040 /* Ignore LWPs we're already attached to. */
1041 lp = find_lwp_pid (ptid);
1042 if (lp == NULL)
1043 {
1044 int lwpid = ptid_get_lwp (ptid);
1045
1046 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1047 {
1048 int err = errno;
1049
1050 /* Be quiet if we simply raced with the thread exiting.
1051 EPERM is returned if the thread's task still exists, and
1052 is marked as exited or zombie, as well as other
1053 conditions, so in that case, confirm the status in
1054 /proc/PID/status. */
1055 if (err == ESRCH
1056 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1057 {
1058 if (debug_linux_nat)
1059 {
1060 fprintf_unfiltered (gdb_stdlog,
1061 "Cannot attach to lwp %d: "
1062 "thread is gone (%d: %s)\n",
1063 lwpid, err, safe_strerror (err));
1064 }
1065 }
1066 else
1067 {
f71f0b0d 1068 warning (_("Cannot attach to lwp %d: %s"),
8784d563
PA
1069 lwpid,
1070 linux_ptrace_attach_fail_reason_string (ptid,
1071 err));
1072 }
1073 }
1074 else
1075 {
1076 if (debug_linux_nat)
1077 fprintf_unfiltered (gdb_stdlog,
1078 "PTRACE_ATTACH %s, 0, 0 (OK)\n",
1079 target_pid_to_str (ptid));
1080
1081 lp = add_lwp (ptid);
8784d563
PA
1082
1083 /* The next time we wait for this LWP we'll see a SIGSTOP as
1084 PTRACE_ATTACH brings it to a halt. */
1085 lp->signalled = 1;
1086
1087 /* We need to wait for a stop before being able to make the
1088 next ptrace call on this LWP. */
1089 lp->must_set_ptrace_flags = 1;
1090 }
1091
1092 return 1;
1093 }
1094 return 0;
1095}
1096
d6b0e80f 1097static void
c0939df1 1098linux_nat_attach (struct target_ops *ops, const char *args, int from_tty)
d6b0e80f
AC
1099{
1100 struct lwp_info *lp;
d6b0e80f 1101 int status;
af990527 1102 ptid_t ptid;
d6b0e80f 1103
2455069d 1104 /* Make sure we report all signals during attach. */
94bedb42 1105 linux_nat_pass_signals (ops, 0, NULL);
2455069d 1106
492d29ea 1107 TRY
87b0bb13
JK
1108 {
1109 linux_ops->to_attach (ops, args, from_tty);
1110 }
492d29ea 1111 CATCH (ex, RETURN_MASK_ERROR)
87b0bb13
JK
1112 {
1113 pid_t pid = parse_pid_to_attach (args);
1114 struct buffer buffer;
1115 char *message, *buffer_s;
1116
1117 message = xstrdup (ex.message);
1118 make_cleanup (xfree, message);
1119
1120 buffer_init (&buffer);
7ae1a6a6 1121 linux_ptrace_attach_fail_reason (pid, &buffer);
87b0bb13
JK
1122
1123 buffer_grow_str0 (&buffer, "");
1124 buffer_s = buffer_finish (&buffer);
1125 make_cleanup (xfree, buffer_s);
1126
7ae1a6a6
PA
1127 if (*buffer_s != '\0')
1128 throw_error (ex.error, "warning: %s\n%s", buffer_s, message);
1129 else
1130 throw_error (ex.error, "%s", message);
87b0bb13 1131 }
492d29ea 1132 END_CATCH
d6b0e80f 1133
af990527
PA
1134 /* The ptrace base target adds the main thread with (pid,0,0)
1135 format. Decorate it with lwp info. */
dfd4cc63
LM
1136 ptid = ptid_build (ptid_get_pid (inferior_ptid),
1137 ptid_get_pid (inferior_ptid),
1138 0);
af990527
PA
1139 thread_change_ptid (inferior_ptid, ptid);
1140
9f0bdab8 1141 /* Add the initial process as the first LWP to the list. */
26cb8b7c 1142 lp = add_initial_lwp (ptid);
a0ef4274 1143
4a6ed09b 1144 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->signalled);
dacc9cb2
PP
1145 if (!WIFSTOPPED (status))
1146 {
1147 if (WIFEXITED (status))
1148 {
1149 int exit_code = WEXITSTATUS (status);
1150
1151 target_terminal_ours ();
1152 target_mourn_inferior ();
1153 if (exit_code == 0)
1154 error (_("Unable to attach: program exited normally."));
1155 else
1156 error (_("Unable to attach: program exited with code %d."),
1157 exit_code);
1158 }
1159 else if (WIFSIGNALED (status))
1160 {
2ea28649 1161 enum gdb_signal signo;
dacc9cb2
PP
1162
1163 target_terminal_ours ();
1164 target_mourn_inferior ();
1165
2ea28649 1166 signo = gdb_signal_from_host (WTERMSIG (status));
dacc9cb2
PP
1167 error (_("Unable to attach: program terminated with signal "
1168 "%s, %s."),
2ea28649
PA
1169 gdb_signal_to_name (signo),
1170 gdb_signal_to_string (signo));
dacc9cb2
PP
1171 }
1172
1173 internal_error (__FILE__, __LINE__,
1174 _("unexpected status %d for PID %ld"),
dfd4cc63 1175 status, (long) ptid_get_lwp (ptid));
dacc9cb2
PP
1176 }
1177
a0ef4274 1178 lp->stopped = 1;
9f0bdab8 1179
a0ef4274 1180 /* Save the wait status to report later. */
d6b0e80f 1181 lp->resumed = 1;
a0ef4274
DJ
1182 if (debug_linux_nat)
1183 fprintf_unfiltered (gdb_stdlog,
1184 "LNA: waitpid %ld, saving status %s\n",
dfd4cc63 1185 (long) ptid_get_pid (lp->ptid), status_to_str (status));
710151dd 1186
7feb7d06
PA
1187 lp->status = status;
1188
8784d563
PA
1189 /* We must attach to every LWP. If /proc is mounted, use that to
1190 find them now. The inferior may be using raw clone instead of
1191 using pthreads. But even if it is using pthreads, thread_db
1192 walks structures in the inferior's address space to find the list
1193 of threads/LWPs, and those structures may well be corrupted.
1194 Note that once thread_db is loaded, we'll still use it to list
1195 threads and associate pthread info with each LWP. */
1196 linux_proc_attach_tgid_threads (ptid_get_pid (lp->ptid),
1197 attach_proc_task_lwp_callback);
1198
7feb7d06 1199 if (target_can_async_p ())
6a3753b3 1200 target_async (1);
d6b0e80f
AC
1201}
1202
a0ef4274
DJ
1203/* Get pending status of LP. */
1204static int
1205get_pending_status (struct lwp_info *lp, int *status)
1206{
a493e3e2 1207 enum gdb_signal signo = GDB_SIGNAL_0;
ca2163eb
PA
1208
1209 /* If we paused threads momentarily, we may have stored pending
1210 events in lp->status or lp->waitstatus (see stop_wait_callback),
1211 and GDB core hasn't seen any signal for those threads.
1212 Otherwise, the last signal reported to the core is found in the
1213 thread object's stop_signal.
1214
1215 There's a corner case that isn't handled here at present. Only
1216 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1217 stop_signal make sense as a real signal to pass to the inferior.
1218 Some catchpoint related events, like
1219 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
a493e3e2 1220 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
ca2163eb
PA
1221 those traps are debug API (ptrace in our case) related and
1222 induced; the inferior wouldn't see them if it wasn't being
1223 traced. Hence, we should never pass them to the inferior, even
1224 when set to pass state. Since this corner case isn't handled by
1225 infrun.c when proceeding with a signal, for consistency, neither
1226 do we handle it here (or elsewhere in the file we check for
1227 signal pass state). Normally SIGTRAP isn't set to pass state, so
1228 this is really a corner case. */
1229
1230 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
a493e3e2 1231 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
ca2163eb 1232 else if (lp->status)
2ea28649 1233 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
fbea99ea 1234 else if (target_is_non_stop_p () && !is_executing (lp->ptid))
ca2163eb
PA
1235 {
1236 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1237
16c381f0 1238 signo = tp->suspend.stop_signal;
ca2163eb 1239 }
fbea99ea 1240 else if (!target_is_non_stop_p ())
a0ef4274 1241 {
ca2163eb
PA
1242 struct target_waitstatus last;
1243 ptid_t last_ptid;
4c28f408 1244
ca2163eb 1245 get_last_target_status (&last_ptid, &last);
4c28f408 1246
dfd4cc63 1247 if (ptid_get_lwp (lp->ptid) == ptid_get_lwp (last_ptid))
ca2163eb 1248 {
e09875d4 1249 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1250
16c381f0 1251 signo = tp->suspend.stop_signal;
4c28f408 1252 }
ca2163eb 1253 }
4c28f408 1254
ca2163eb 1255 *status = 0;
4c28f408 1256
a493e3e2 1257 if (signo == GDB_SIGNAL_0)
ca2163eb
PA
1258 {
1259 if (debug_linux_nat)
1260 fprintf_unfiltered (gdb_stdlog,
1261 "GPT: lwp %s has no pending signal\n",
1262 target_pid_to_str (lp->ptid));
1263 }
1264 else if (!signal_pass_state (signo))
1265 {
1266 if (debug_linux_nat)
3e43a32a
MS
1267 fprintf_unfiltered (gdb_stdlog,
1268 "GPT: lwp %s had signal %s, "
1269 "but it is in no pass state\n",
ca2163eb 1270 target_pid_to_str (lp->ptid),
2ea28649 1271 gdb_signal_to_string (signo));
a0ef4274 1272 }
a0ef4274 1273 else
4c28f408 1274 {
2ea28649 1275 *status = W_STOPCODE (gdb_signal_to_host (signo));
ca2163eb
PA
1276
1277 if (debug_linux_nat)
1278 fprintf_unfiltered (gdb_stdlog,
1279 "GPT: lwp %s has pending signal %s\n",
1280 target_pid_to_str (lp->ptid),
2ea28649 1281 gdb_signal_to_string (signo));
4c28f408 1282 }
a0ef4274
DJ
1283
1284 return 0;
1285}
1286
d6b0e80f
AC
1287static int
1288detach_callback (struct lwp_info *lp, void *data)
1289{
1290 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1291
1292 if (debug_linux_nat && lp->status)
1293 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1294 strsignal (WSTOPSIG (lp->status)),
1295 target_pid_to_str (lp->ptid));
1296
a0ef4274
DJ
1297 /* If there is a pending SIGSTOP, get rid of it. */
1298 if (lp->signalled)
d6b0e80f 1299 {
d6b0e80f
AC
1300 if (debug_linux_nat)
1301 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1302 "DC: Sending SIGCONT to %s\n",
1303 target_pid_to_str (lp->ptid));
d6b0e80f 1304
dfd4cc63 1305 kill_lwp (ptid_get_lwp (lp->ptid), SIGCONT);
d6b0e80f 1306 lp->signalled = 0;
d6b0e80f
AC
1307 }
1308
1309 /* We don't actually detach from the LWP that has an id equal to the
1310 overall process id just yet. */
dfd4cc63 1311 if (ptid_get_lwp (lp->ptid) != ptid_get_pid (lp->ptid))
d6b0e80f 1312 {
a0ef4274
DJ
1313 int status = 0;
1314
1315 /* Pass on any pending signal for this LWP. */
1316 get_pending_status (lp, &status);
1317
7b50312a
PA
1318 if (linux_nat_prepare_to_resume != NULL)
1319 linux_nat_prepare_to_resume (lp);
d6b0e80f 1320 errno = 0;
dfd4cc63 1321 if (ptrace (PTRACE_DETACH, ptid_get_lwp (lp->ptid), 0,
a0ef4274 1322 WSTOPSIG (status)) < 0)
8a3fe4f8 1323 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1324 safe_strerror (errno));
1325
1326 if (debug_linux_nat)
1327 fprintf_unfiltered (gdb_stdlog,
1328 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1329 target_pid_to_str (lp->ptid),
7feb7d06 1330 strsignal (WSTOPSIG (status)));
d6b0e80f
AC
1331
1332 delete_lwp (lp->ptid);
1333 }
1334
1335 return 0;
1336}
1337
1338static void
52554a0e 1339linux_nat_detach (struct target_ops *ops, const char *args, int from_tty)
d6b0e80f 1340{
b84876c2 1341 int pid;
a0ef4274 1342 int status;
d90e17a7
PA
1343 struct lwp_info *main_lwp;
1344
dfd4cc63 1345 pid = ptid_get_pid (inferior_ptid);
a0ef4274 1346
ae5e0686
MK
1347 /* Don't unregister from the event loop, as there may be other
1348 inferiors running. */
b84876c2 1349
4c28f408
PA
1350 /* Stop all threads before detaching. ptrace requires that the
1351 thread is stopped to sucessfully detach. */
d90e17a7 1352 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
4c28f408
PA
1353 /* ... and wait until all of them have reported back that
1354 they're no longer running. */
d90e17a7 1355 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
4c28f408 1356
d90e17a7 1357 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
d6b0e80f
AC
1358
1359 /* Only the initial process should be left right now. */
dfd4cc63 1360 gdb_assert (num_lwps (ptid_get_pid (inferior_ptid)) == 1);
d90e17a7
PA
1361
1362 main_lwp = find_lwp_pid (pid_to_ptid (pid));
d6b0e80f 1363
a0ef4274
DJ
1364 /* Pass on any pending signal for the last LWP. */
1365 if ((args == NULL || *args == '\0')
d90e17a7 1366 && get_pending_status (main_lwp, &status) != -1
a0ef4274
DJ
1367 && WIFSTOPPED (status))
1368 {
52554a0e
TT
1369 char *tem;
1370
a0ef4274
DJ
1371 /* Put the signal number in ARGS so that inf_ptrace_detach will
1372 pass it along with PTRACE_DETACH. */
224c3ddb 1373 tem = (char *) alloca (8);
cde33bf1 1374 xsnprintf (tem, 8, "%d", (int) WSTOPSIG (status));
52554a0e 1375 args = tem;
ddabfc73
TT
1376 if (debug_linux_nat)
1377 fprintf_unfiltered (gdb_stdlog,
1378 "LND: Sending signal %s to %s\n",
1379 args,
1380 target_pid_to_str (main_lwp->ptid));
a0ef4274
DJ
1381 }
1382
7b50312a
PA
1383 if (linux_nat_prepare_to_resume != NULL)
1384 linux_nat_prepare_to_resume (main_lwp);
d90e17a7 1385 delete_lwp (main_lwp->ptid);
b84876c2 1386
7a7d3353
PA
1387 if (forks_exist_p ())
1388 {
1389 /* Multi-fork case. The current inferior_ptid is being detached
1390 from, but there are other viable forks to debug. Detach from
1391 the current fork, and context-switch to the first
1392 available. */
1393 linux_fork_detach (args, from_tty);
7a7d3353
PA
1394 }
1395 else
1396 linux_ops->to_detach (ops, args, from_tty);
d6b0e80f
AC
1397}
1398
8a99810d
PA
1399/* Resume execution of the inferior process. If STEP is nonzero,
1400 single-step it. If SIGNAL is nonzero, give it that signal. */
1401
1402static void
23f238d3
PA
1403linux_resume_one_lwp_throw (struct lwp_info *lp, int step,
1404 enum gdb_signal signo)
8a99810d 1405{
8a99810d 1406 lp->step = step;
9c02b525
PA
1407
1408 /* stop_pc doubles as the PC the LWP had when it was last resumed.
1409 We only presently need that if the LWP is stepped though (to
1410 handle the case of stepping a breakpoint instruction). */
1411 if (step)
1412 {
1413 struct regcache *regcache = get_thread_regcache (lp->ptid);
1414
1415 lp->stop_pc = regcache_read_pc (regcache);
1416 }
1417 else
1418 lp->stop_pc = 0;
1419
8a99810d
PA
1420 if (linux_nat_prepare_to_resume != NULL)
1421 linux_nat_prepare_to_resume (lp);
90ad5e1d 1422 linux_ops->to_resume (linux_ops, lp->ptid, step, signo);
23f238d3
PA
1423
1424 /* Successfully resumed. Clear state that no longer makes sense,
1425 and mark the LWP as running. Must not do this before resuming
1426 otherwise if that fails other code will be confused. E.g., we'd
1427 later try to stop the LWP and hang forever waiting for a stop
1428 status. Note that we must not throw after this is cleared,
1429 otherwise handle_zombie_lwp_error would get confused. */
8a99810d 1430 lp->stopped = 0;
23f238d3 1431 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8a99810d
PA
1432 registers_changed_ptid (lp->ptid);
1433}
1434
23f238d3
PA
1435/* Called when we try to resume a stopped LWP and that errors out. If
1436 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
1437 or about to become), discard the error, clear any pending status
1438 the LWP may have, and return true (we'll collect the exit status
1439 soon enough). Otherwise, return false. */
1440
1441static int
1442check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
1443{
1444 /* If we get an error after resuming the LWP successfully, we'd
1445 confuse !T state for the LWP being gone. */
1446 gdb_assert (lp->stopped);
1447
1448 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
1449 because even if ptrace failed with ESRCH, the tracee may be "not
1450 yet fully dead", but already refusing ptrace requests. In that
1451 case the tracee has 'R (Running)' state for a little bit
1452 (observed in Linux 3.18). See also the note on ESRCH in the
1453 ptrace(2) man page. Instead, check whether the LWP has any state
1454 other than ptrace-stopped. */
1455
1456 /* Don't assume anything if /proc/PID/status can't be read. */
1457 if (linux_proc_pid_is_trace_stopped_nowarn (ptid_get_lwp (lp->ptid)) == 0)
1458 {
1459 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1460 lp->status = 0;
1461 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1462 return 1;
1463 }
1464 return 0;
1465}
1466
1467/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
1468 disappears while we try to resume it. */
1469
1470static void
1471linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1472{
1473 TRY
1474 {
1475 linux_resume_one_lwp_throw (lp, step, signo);
1476 }
1477 CATCH (ex, RETURN_MASK_ERROR)
1478 {
1479 if (!check_ptrace_stopped_lwp_gone (lp))
1480 throw_exception (ex);
1481 }
1482 END_CATCH
1483}
1484
d6b0e80f
AC
1485/* Resume LP. */
1486
25289eb2 1487static void
e5ef252a 1488resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
d6b0e80f 1489{
25289eb2 1490 if (lp->stopped)
6c95b8df 1491 {
c9657e70 1492 struct inferior *inf = find_inferior_ptid (lp->ptid);
25289eb2
PA
1493
1494 if (inf->vfork_child != NULL)
1495 {
1496 if (debug_linux_nat)
1497 fprintf_unfiltered (gdb_stdlog,
1498 "RC: Not resuming %s (vfork parent)\n",
1499 target_pid_to_str (lp->ptid));
1500 }
8a99810d 1501 else if (!lwp_status_pending_p (lp))
25289eb2
PA
1502 {
1503 if (debug_linux_nat)
1504 fprintf_unfiltered (gdb_stdlog,
e5ef252a
PA
1505 "RC: Resuming sibling %s, %s, %s\n",
1506 target_pid_to_str (lp->ptid),
1507 (signo != GDB_SIGNAL_0
1508 ? strsignal (gdb_signal_to_host (signo))
1509 : "0"),
1510 step ? "step" : "resume");
25289eb2 1511
8a99810d 1512 linux_resume_one_lwp (lp, step, signo);
25289eb2
PA
1513 }
1514 else
1515 {
1516 if (debug_linux_nat)
1517 fprintf_unfiltered (gdb_stdlog,
1518 "RC: Not resuming sibling %s (has pending)\n",
1519 target_pid_to_str (lp->ptid));
1520 }
6c95b8df 1521 }
25289eb2 1522 else
d6b0e80f 1523 {
d90e17a7
PA
1524 if (debug_linux_nat)
1525 fprintf_unfiltered (gdb_stdlog,
25289eb2 1526 "RC: Not resuming sibling %s (not stopped)\n",
d6b0e80f 1527 target_pid_to_str (lp->ptid));
d6b0e80f 1528 }
25289eb2 1529}
d6b0e80f 1530
8817a6f2
PA
1531/* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1532 Resume LWP with the last stop signal, if it is in pass state. */
e5ef252a 1533
25289eb2 1534static int
8817a6f2 1535linux_nat_resume_callback (struct lwp_info *lp, void *except)
25289eb2 1536{
e5ef252a
PA
1537 enum gdb_signal signo = GDB_SIGNAL_0;
1538
8817a6f2
PA
1539 if (lp == except)
1540 return 0;
1541
e5ef252a
PA
1542 if (lp->stopped)
1543 {
1544 struct thread_info *thread;
1545
1546 thread = find_thread_ptid (lp->ptid);
1547 if (thread != NULL)
1548 {
70509625 1549 signo = thread->suspend.stop_signal;
e5ef252a
PA
1550 thread->suspend.stop_signal = GDB_SIGNAL_0;
1551 }
1552 }
1553
1554 resume_lwp (lp, 0, signo);
d6b0e80f
AC
1555 return 0;
1556}
1557
1558static int
1559resume_clear_callback (struct lwp_info *lp, void *data)
1560{
1561 lp->resumed = 0;
25289eb2 1562 lp->last_resume_kind = resume_stop;
d6b0e80f
AC
1563 return 0;
1564}
1565
1566static int
1567resume_set_callback (struct lwp_info *lp, void *data)
1568{
1569 lp->resumed = 1;
25289eb2 1570 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1571 return 0;
1572}
1573
1574static void
28439f5e 1575linux_nat_resume (struct target_ops *ops,
2ea28649 1576 ptid_t ptid, int step, enum gdb_signal signo)
d6b0e80f
AC
1577{
1578 struct lwp_info *lp;
d90e17a7 1579 int resume_many;
d6b0e80f 1580
76f50ad1
DJ
1581 if (debug_linux_nat)
1582 fprintf_unfiltered (gdb_stdlog,
1583 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1584 step ? "step" : "resume",
1585 target_pid_to_str (ptid),
a493e3e2 1586 (signo != GDB_SIGNAL_0
2ea28649 1587 ? strsignal (gdb_signal_to_host (signo)) : "0"),
76f50ad1
DJ
1588 target_pid_to_str (inferior_ptid));
1589
d6b0e80f 1590 /* A specific PTID means `step only this process id'. */
d90e17a7
PA
1591 resume_many = (ptid_equal (minus_one_ptid, ptid)
1592 || ptid_is_pid (ptid));
4c28f408 1593
e3e9f5a2
PA
1594 /* Mark the lwps we're resuming as resumed. */
1595 iterate_over_lwps (ptid, resume_set_callback, NULL);
d6b0e80f 1596
d90e17a7
PA
1597 /* See if it's the current inferior that should be handled
1598 specially. */
1599 if (resume_many)
1600 lp = find_lwp_pid (inferior_ptid);
1601 else
1602 lp = find_lwp_pid (ptid);
9f0bdab8 1603 gdb_assert (lp != NULL);
d6b0e80f 1604
9f0bdab8 1605 /* Remember if we're stepping. */
25289eb2 1606 lp->last_resume_kind = step ? resume_step : resume_continue;
d6b0e80f 1607
9f0bdab8
DJ
1608 /* If we have a pending wait status for this thread, there is no
1609 point in resuming the process. But first make sure that
1610 linux_nat_wait won't preemptively handle the event - we
1611 should never take this short-circuit if we are going to
1612 leave LP running, since we have skipped resuming all the
1613 other threads. This bit of code needs to be synchronized
1614 with linux_nat_wait. */
76f50ad1 1615
9f0bdab8
DJ
1616 if (lp->status && WIFSTOPPED (lp->status))
1617 {
2455069d
UW
1618 if (!lp->step
1619 && WSTOPSIG (lp->status)
1620 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 1621 {
9f0bdab8
DJ
1622 if (debug_linux_nat)
1623 fprintf_unfiltered (gdb_stdlog,
1624 "LLR: Not short circuiting for ignored "
1625 "status 0x%x\n", lp->status);
1626
d6b0e80f
AC
1627 /* FIXME: What should we do if we are supposed to continue
1628 this thread with a signal? */
a493e3e2 1629 gdb_assert (signo == GDB_SIGNAL_0);
2ea28649 1630 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
1631 lp->status = 0;
1632 }
1633 }
76f50ad1 1634
8a99810d 1635 if (lwp_status_pending_p (lp))
9f0bdab8
DJ
1636 {
1637 /* FIXME: What should we do if we are supposed to continue
1638 this thread with a signal? */
a493e3e2 1639 gdb_assert (signo == GDB_SIGNAL_0);
76f50ad1 1640
9f0bdab8
DJ
1641 if (debug_linux_nat)
1642 fprintf_unfiltered (gdb_stdlog,
1643 "LLR: Short circuiting for status 0x%x\n",
1644 lp->status);
d6b0e80f 1645
7feb7d06
PA
1646 if (target_can_async_p ())
1647 {
6a3753b3 1648 target_async (1);
7feb7d06
PA
1649 /* Tell the event loop we have something to process. */
1650 async_file_mark ();
1651 }
9f0bdab8 1652 return;
d6b0e80f
AC
1653 }
1654
d90e17a7 1655 if (resume_many)
8817a6f2 1656 iterate_over_lwps (ptid, linux_nat_resume_callback, lp);
d90e17a7 1657
d6b0e80f
AC
1658 if (debug_linux_nat)
1659 fprintf_unfiltered (gdb_stdlog,
1660 "LLR: %s %s, %s (resume event thread)\n",
1661 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2bf6fb9d 1662 target_pid_to_str (lp->ptid),
a493e3e2 1663 (signo != GDB_SIGNAL_0
2ea28649 1664 ? strsignal (gdb_signal_to_host (signo)) : "0"));
b84876c2 1665
2bf6fb9d
PA
1666 linux_resume_one_lwp (lp, step, signo);
1667
b84876c2 1668 if (target_can_async_p ())
6a3753b3 1669 target_async (1);
d6b0e80f
AC
1670}
1671
c5f62d5f 1672/* Send a signal to an LWP. */
d6b0e80f
AC
1673
1674static int
1675kill_lwp (int lwpid, int signo)
1676{
4a6ed09b 1677 int ret;
d6b0e80f 1678
4a6ed09b
PA
1679 errno = 0;
1680 ret = syscall (__NR_tkill, lwpid, signo);
1681 if (errno == ENOSYS)
1682 {
1683 /* If tkill fails, then we are not using nptl threads, a
1684 configuration we no longer support. */
1685 perror_with_name (("tkill"));
1686 }
1687 return ret;
d6b0e80f
AC
1688}
1689
ca2163eb
PA
1690/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1691 event, check if the core is interested in it: if not, ignore the
1692 event, and keep waiting; otherwise, we need to toggle the LWP's
1693 syscall entry/exit status, since the ptrace event itself doesn't
1694 indicate it, and report the trap to higher layers. */
1695
1696static int
1697linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1698{
1699 struct target_waitstatus *ourstatus = &lp->waitstatus;
1700 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
1701 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
1702
1703 if (stopping)
1704 {
1705 /* If we're stopping threads, there's a SIGSTOP pending, which
1706 makes it so that the LWP reports an immediate syscall return,
1707 followed by the SIGSTOP. Skip seeing that "return" using
1708 PTRACE_CONT directly, and let stop_wait_callback collect the
1709 SIGSTOP. Later when the thread is resumed, a new syscall
1710 entry event. If we didn't do this (and returned 0), we'd
1711 leave a syscall entry pending, and our caller, by using
1712 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1713 itself. Later, when the user re-resumes this LWP, we'd see
1714 another syscall entry event and we'd mistake it for a return.
1715
1716 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1717 (leaving immediately with LWP->signalled set, without issuing
1718 a PTRACE_CONT), it would still be problematic to leave this
1719 syscall enter pending, as later when the thread is resumed,
1720 it would then see the same syscall exit mentioned above,
1721 followed by the delayed SIGSTOP, while the syscall didn't
1722 actually get to execute. It seems it would be even more
1723 confusing to the user. */
1724
1725 if (debug_linux_nat)
1726 fprintf_unfiltered (gdb_stdlog,
1727 "LHST: ignoring syscall %d "
1728 "for LWP %ld (stopping threads), "
1729 "resuming with PTRACE_CONT for SIGSTOP\n",
1730 syscall_number,
dfd4cc63 1731 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1732
1733 lp->syscall_state = TARGET_WAITKIND_IGNORE;
dfd4cc63 1734 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
8817a6f2 1735 lp->stopped = 0;
ca2163eb
PA
1736 return 1;
1737 }
1738
bfd09d20
JS
1739 /* Always update the entry/return state, even if this particular
1740 syscall isn't interesting to the core now. In async mode,
1741 the user could install a new catchpoint for this syscall
1742 between syscall enter/return, and we'll need to know to
1743 report a syscall return if that happens. */
1744 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1745 ? TARGET_WAITKIND_SYSCALL_RETURN
1746 : TARGET_WAITKIND_SYSCALL_ENTRY);
1747
ca2163eb
PA
1748 if (catch_syscall_enabled ())
1749 {
ca2163eb
PA
1750 if (catching_syscall_number (syscall_number))
1751 {
1752 /* Alright, an event to report. */
1753 ourstatus->kind = lp->syscall_state;
1754 ourstatus->value.syscall_number = syscall_number;
1755
1756 if (debug_linux_nat)
1757 fprintf_unfiltered (gdb_stdlog,
1758 "LHST: stopping for %s of syscall %d"
1759 " for LWP %ld\n",
3e43a32a
MS
1760 lp->syscall_state
1761 == TARGET_WAITKIND_SYSCALL_ENTRY
ca2163eb
PA
1762 ? "entry" : "return",
1763 syscall_number,
dfd4cc63 1764 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1765 return 0;
1766 }
1767
1768 if (debug_linux_nat)
1769 fprintf_unfiltered (gdb_stdlog,
1770 "LHST: ignoring %s of syscall %d "
1771 "for LWP %ld\n",
1772 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1773 ? "entry" : "return",
1774 syscall_number,
dfd4cc63 1775 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1776 }
1777 else
1778 {
1779 /* If we had been syscall tracing, and hence used PT_SYSCALL
1780 before on this LWP, it could happen that the user removes all
1781 syscall catchpoints before we get to process this event.
1782 There are two noteworthy issues here:
1783
1784 - When stopped at a syscall entry event, resuming with
1785 PT_STEP still resumes executing the syscall and reports a
1786 syscall return.
1787
1788 - Only PT_SYSCALL catches syscall enters. If we last
1789 single-stepped this thread, then this event can't be a
1790 syscall enter. If we last single-stepped this thread, this
1791 has to be a syscall exit.
1792
1793 The points above mean that the next resume, be it PT_STEP or
1794 PT_CONTINUE, can not trigger a syscall trace event. */
1795 if (debug_linux_nat)
1796 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
1797 "LHST: caught syscall event "
1798 "with no syscall catchpoints."
ca2163eb
PA
1799 " %d for LWP %ld, ignoring\n",
1800 syscall_number,
dfd4cc63 1801 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1802 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1803 }
1804
1805 /* The core isn't interested in this event. For efficiency, avoid
1806 stopping all threads only to have the core resume them all again.
1807 Since we're not stopping threads, if we're still syscall tracing
1808 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1809 subsequent syscall. Simply resume using the inf-ptrace layer,
1810 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1811
8a99810d 1812 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
ca2163eb
PA
1813 return 1;
1814}
1815
3d799a95
DJ
1816/* Handle a GNU/Linux extended wait response. If we see a clone
1817 event, we need to add the new LWP to our list (and not report the
1818 trap to higher layers). This function returns non-zero if the
1819 event should be ignored and we should wait again. If STOPPING is
1820 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
1821
1822static int
4dd63d48 1823linux_handle_extended_wait (struct lwp_info *lp, int status)
d6b0e80f 1824{
dfd4cc63 1825 int pid = ptid_get_lwp (lp->ptid);
3d799a95 1826 struct target_waitstatus *ourstatus = &lp->waitstatus;
89a5711c 1827 int event = linux_ptrace_get_extended_event (status);
d6b0e80f 1828
bfd09d20
JS
1829 /* All extended events we currently use are mid-syscall. Only
1830 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
1831 you have to be using PTRACE_SEIZE to get that. */
1832 lp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
1833
3d799a95
DJ
1834 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1835 || event == PTRACE_EVENT_CLONE)
d6b0e80f 1836 {
3d799a95
DJ
1837 unsigned long new_pid;
1838 int ret;
1839
1840 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 1841
3d799a95
DJ
1842 /* If we haven't already seen the new PID stop, wait for it now. */
1843 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1844 {
1845 /* The new child has a pending SIGSTOP. We can't affect it until it
1846 hits the SIGSTOP, but we're already attached. */
4a6ed09b 1847 ret = my_waitpid (new_pid, &status, __WALL);
3d799a95
DJ
1848 if (ret == -1)
1849 perror_with_name (_("waiting for new child"));
1850 else if (ret != new_pid)
1851 internal_error (__FILE__, __LINE__,
1852 _("wait returned unexpected PID %d"), ret);
1853 else if (!WIFSTOPPED (status))
1854 internal_error (__FILE__, __LINE__,
1855 _("wait returned unexpected status 0x%x"), status);
1856 }
1857
3a3e9ee3 1858 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95 1859
26cb8b7c
PA
1860 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1861 {
1862 /* The arch-specific native code may need to know about new
1863 forks even if those end up never mapped to an
1864 inferior. */
1865 if (linux_nat_new_fork != NULL)
1866 linux_nat_new_fork (lp, new_pid);
1867 }
1868
2277426b 1869 if (event == PTRACE_EVENT_FORK
dfd4cc63 1870 && linux_fork_checkpointing_p (ptid_get_pid (lp->ptid)))
2277426b 1871 {
2277426b
PA
1872 /* Handle checkpointing by linux-fork.c here as a special
1873 case. We don't want the follow-fork-mode or 'catch fork'
1874 to interfere with this. */
1875
1876 /* This won't actually modify the breakpoint list, but will
1877 physically remove the breakpoints from the child. */
d80ee84f 1878 detach_breakpoints (ptid_build (new_pid, new_pid, 0));
2277426b
PA
1879
1880 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
1881 if (!find_fork_pid (new_pid))
1882 add_fork (new_pid);
2277426b
PA
1883
1884 /* Report as spurious, so that infrun doesn't want to follow
1885 this fork. We're actually doing an infcall in
1886 linux-fork.c. */
1887 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2277426b
PA
1888
1889 /* Report the stop to the core. */
1890 return 0;
1891 }
1892
3d799a95
DJ
1893 if (event == PTRACE_EVENT_FORK)
1894 ourstatus->kind = TARGET_WAITKIND_FORKED;
1895 else if (event == PTRACE_EVENT_VFORK)
1896 ourstatus->kind = TARGET_WAITKIND_VFORKED;
4dd63d48 1897 else if (event == PTRACE_EVENT_CLONE)
3d799a95 1898 {
78768c4a
JK
1899 struct lwp_info *new_lp;
1900
3d799a95 1901 ourstatus->kind = TARGET_WAITKIND_IGNORE;
78768c4a 1902
3c4d7e12
PA
1903 if (debug_linux_nat)
1904 fprintf_unfiltered (gdb_stdlog,
1905 "LHEW: Got clone event "
1906 "from LWP %d, new child is LWP %ld\n",
1907 pid, new_pid);
1908
dfd4cc63 1909 new_lp = add_lwp (ptid_build (ptid_get_pid (lp->ptid), new_pid, 0));
4c28f408 1910 new_lp->stopped = 1;
4dd63d48 1911 new_lp->resumed = 1;
d6b0e80f 1912
2db9a427
PA
1913 /* If the thread_db layer is active, let it record the user
1914 level thread id and status, and add the thread to GDB's
1915 list. */
1916 if (!thread_db_notice_clone (lp->ptid, new_lp->ptid))
3d799a95 1917 {
2db9a427
PA
1918 /* The process is not using thread_db. Add the LWP to
1919 GDB's list. */
1920 target_post_attach (ptid_get_lwp (new_lp->ptid));
1921 add_thread (new_lp->ptid);
1922 }
4c28f408 1923
2ee52aa4 1924 /* Even if we're stopping the thread for some reason
4dd63d48
PA
1925 internal to this module, from the perspective of infrun
1926 and the user/frontend, this new thread is running until
1927 it next reports a stop. */
2ee52aa4 1928 set_running (new_lp->ptid, 1);
4dd63d48 1929 set_executing (new_lp->ptid, 1);
4c28f408 1930
4dd63d48 1931 if (WSTOPSIG (status) != SIGSTOP)
79395f92 1932 {
4dd63d48
PA
1933 /* This can happen if someone starts sending signals to
1934 the new thread before it gets a chance to run, which
1935 have a lower number than SIGSTOP (e.g. SIGUSR1).
1936 This is an unlikely case, and harder to handle for
1937 fork / vfork than for clone, so we do not try - but
1938 we handle it for clone events here. */
1939
1940 new_lp->signalled = 1;
1941
79395f92
PA
1942 /* We created NEW_LP so it cannot yet contain STATUS. */
1943 gdb_assert (new_lp->status == 0);
1944
1945 /* Save the wait status to report later. */
1946 if (debug_linux_nat)
1947 fprintf_unfiltered (gdb_stdlog,
1948 "LHEW: waitpid of new LWP %ld, "
1949 "saving status %s\n",
dfd4cc63 1950 (long) ptid_get_lwp (new_lp->ptid),
79395f92
PA
1951 status_to_str (status));
1952 new_lp->status = status;
1953 }
1954
3d799a95
DJ
1955 return 1;
1956 }
1957
1958 return 0;
d6b0e80f
AC
1959 }
1960
3d799a95
DJ
1961 if (event == PTRACE_EVENT_EXEC)
1962 {
a75724bc
PA
1963 if (debug_linux_nat)
1964 fprintf_unfiltered (gdb_stdlog,
1965 "LHEW: Got exec event from LWP %ld\n",
dfd4cc63 1966 ptid_get_lwp (lp->ptid));
a75724bc 1967
3d799a95
DJ
1968 ourstatus->kind = TARGET_WAITKIND_EXECD;
1969 ourstatus->value.execd_pathname
8dd27370 1970 = xstrdup (linux_child_pid_to_exec_file (NULL, pid));
3d799a95 1971
8af756ef
PA
1972 /* The thread that execed must have been resumed, but, when a
1973 thread execs, it changes its tid to the tgid, and the old
1974 tgid thread might have not been resumed. */
1975 lp->resumed = 1;
6c95b8df
PA
1976 return 0;
1977 }
1978
1979 if (event == PTRACE_EVENT_VFORK_DONE)
1980 {
1981 if (current_inferior ()->waiting_for_vfork_done)
3d799a95 1982 {
6c95b8df 1983 if (debug_linux_nat)
3e43a32a
MS
1984 fprintf_unfiltered (gdb_stdlog,
1985 "LHEW: Got expected PTRACE_EVENT_"
1986 "VFORK_DONE from LWP %ld: stopping\n",
dfd4cc63 1987 ptid_get_lwp (lp->ptid));
3d799a95 1988
6c95b8df
PA
1989 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
1990 return 0;
3d799a95
DJ
1991 }
1992
6c95b8df 1993 if (debug_linux_nat)
3e43a32a
MS
1994 fprintf_unfiltered (gdb_stdlog,
1995 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
20ba1ce6 1996 "from LWP %ld: ignoring\n",
dfd4cc63 1997 ptid_get_lwp (lp->ptid));
6c95b8df 1998 return 1;
3d799a95
DJ
1999 }
2000
2001 internal_error (__FILE__, __LINE__,
2002 _("unknown ptrace event %d"), event);
d6b0e80f
AC
2003}
2004
2005/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2006 exited. */
2007
2008static int
2009wait_lwp (struct lwp_info *lp)
2010{
2011 pid_t pid;
432b4d03 2012 int status = 0;
d6b0e80f 2013 int thread_dead = 0;
432b4d03 2014 sigset_t prev_mask;
d6b0e80f
AC
2015
2016 gdb_assert (!lp->stopped);
2017 gdb_assert (lp->status == 0);
2018
432b4d03
JK
2019 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2020 block_child_signals (&prev_mask);
2021
2022 for (;;)
d6b0e80f 2023 {
4a6ed09b 2024 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, __WALL | WNOHANG);
a9f4bb21
PA
2025 if (pid == -1 && errno == ECHILD)
2026 {
2027 /* The thread has previously exited. We need to delete it
4a6ed09b
PA
2028 now because if this was a non-leader thread execing, we
2029 won't get an exit event. See comments on exec events at
2030 the top of the file. */
a9f4bb21
PA
2031 thread_dead = 1;
2032 if (debug_linux_nat)
2033 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2034 target_pid_to_str (lp->ptid));
2035 }
432b4d03
JK
2036 if (pid != 0)
2037 break;
2038
2039 /* Bugs 10970, 12702.
2040 Thread group leader may have exited in which case we'll lock up in
2041 waitpid if there are other threads, even if they are all zombies too.
2042 Basically, we're not supposed to use waitpid this way.
4a6ed09b
PA
2043 tkill(pid,0) cannot be used here as it gets ESRCH for both
2044 for zombie and running processes.
432b4d03
JK
2045
2046 As a workaround, check if we're waiting for the thread group leader and
2047 if it's a zombie, and avoid calling waitpid if it is.
2048
2049 This is racy, what if the tgl becomes a zombie right after we check?
2050 Therefore always use WNOHANG with sigsuspend - it is equivalent to
5f572dec 2051 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
432b4d03 2052
dfd4cc63
LM
2053 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid)
2054 && linux_proc_pid_is_zombie (ptid_get_lwp (lp->ptid)))
d6b0e80f 2055 {
d6b0e80f
AC
2056 thread_dead = 1;
2057 if (debug_linux_nat)
432b4d03
JK
2058 fprintf_unfiltered (gdb_stdlog,
2059 "WL: Thread group leader %s vanished.\n",
d6b0e80f 2060 target_pid_to_str (lp->ptid));
432b4d03 2061 break;
d6b0e80f 2062 }
432b4d03
JK
2063
2064 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2065 get invoked despite our caller had them intentionally blocked by
2066 block_child_signals. This is sensitive only to the loop of
2067 linux_nat_wait_1 and there if we get called my_waitpid gets called
2068 again before it gets to sigsuspend so we can safely let the handlers
2069 get executed here. */
2070
d36bf488
DE
2071 if (debug_linux_nat)
2072 fprintf_unfiltered (gdb_stdlog, "WL: about to sigsuspend\n");
432b4d03
JK
2073 sigsuspend (&suspend_mask);
2074 }
2075
2076 restore_child_signals_mask (&prev_mask);
2077
d6b0e80f
AC
2078 if (!thread_dead)
2079 {
dfd4cc63 2080 gdb_assert (pid == ptid_get_lwp (lp->ptid));
d6b0e80f
AC
2081
2082 if (debug_linux_nat)
2083 {
2084 fprintf_unfiltered (gdb_stdlog,
2085 "WL: waitpid %s received %s\n",
2086 target_pid_to_str (lp->ptid),
2087 status_to_str (status));
2088 }
d6b0e80f 2089
a9f4bb21
PA
2090 /* Check if the thread has exited. */
2091 if (WIFEXITED (status) || WIFSIGNALED (status))
2092 {
69dde7dc
PA
2093 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid))
2094 {
2095 if (debug_linux_nat)
2096 fprintf_unfiltered (gdb_stdlog, "WL: Process %d exited.\n",
2097 ptid_get_pid (lp->ptid));
2098
2099 /* This is the leader exiting, it means the whole
2100 process is gone. Store the status to report to the
2101 core. Store it in lp->waitstatus, because lp->status
2102 would be ambiguous (W_EXITCODE(0,0) == 0). */
2103 store_waitstatus (&lp->waitstatus, status);
2104 return 0;
2105 }
2106
a9f4bb21
PA
2107 thread_dead = 1;
2108 if (debug_linux_nat)
2109 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2110 target_pid_to_str (lp->ptid));
2111 }
d6b0e80f
AC
2112 }
2113
2114 if (thread_dead)
2115 {
e26af52f 2116 exit_lwp (lp);
d6b0e80f
AC
2117 return 0;
2118 }
2119
2120 gdb_assert (WIFSTOPPED (status));
8817a6f2 2121 lp->stopped = 1;
d6b0e80f 2122
8784d563
PA
2123 if (lp->must_set_ptrace_flags)
2124 {
2125 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
de0d863e 2126 int options = linux_nat_ptrace_options (inf->attach_flag);
8784d563 2127
de0d863e 2128 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), options);
8784d563
PA
2129 lp->must_set_ptrace_flags = 0;
2130 }
2131
ca2163eb
PA
2132 /* Handle GNU/Linux's syscall SIGTRAPs. */
2133 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2134 {
2135 /* No longer need the sysgood bit. The ptrace event ends up
2136 recorded in lp->waitstatus if we care for it. We can carry
2137 on handling the event like a regular SIGTRAP from here
2138 on. */
2139 status = W_STOPCODE (SIGTRAP);
2140 if (linux_handle_syscall_trap (lp, 1))
2141 return wait_lwp (lp);
2142 }
bfd09d20
JS
2143 else
2144 {
2145 /* Almost all other ptrace-stops are known to be outside of system
2146 calls, with further exceptions in linux_handle_extended_wait. */
2147 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2148 }
ca2163eb 2149
d6b0e80f 2150 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2151 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2152 && linux_is_extended_waitstatus (status))
d6b0e80f
AC
2153 {
2154 if (debug_linux_nat)
2155 fprintf_unfiltered (gdb_stdlog,
2156 "WL: Handling extended status 0x%06x\n",
2157 status);
4dd63d48 2158 linux_handle_extended_wait (lp, status);
20ba1ce6 2159 return 0;
d6b0e80f
AC
2160 }
2161
2162 return status;
2163}
2164
2165/* Send a SIGSTOP to LP. */
2166
2167static int
2168stop_callback (struct lwp_info *lp, void *data)
2169{
2170 if (!lp->stopped && !lp->signalled)
2171 {
2172 int ret;
2173
2174 if (debug_linux_nat)
2175 {
2176 fprintf_unfiltered (gdb_stdlog,
2177 "SC: kill %s **<SIGSTOP>**\n",
2178 target_pid_to_str (lp->ptid));
2179 }
2180 errno = 0;
dfd4cc63 2181 ret = kill_lwp (ptid_get_lwp (lp->ptid), SIGSTOP);
d6b0e80f
AC
2182 if (debug_linux_nat)
2183 {
2184 fprintf_unfiltered (gdb_stdlog,
2185 "SC: lwp kill %d %s\n",
2186 ret,
2187 errno ? safe_strerror (errno) : "ERRNO-OK");
2188 }
2189
2190 lp->signalled = 1;
2191 gdb_assert (lp->status == 0);
2192 }
2193
2194 return 0;
2195}
2196
7b50312a
PA
2197/* Request a stop on LWP. */
2198
2199void
2200linux_stop_lwp (struct lwp_info *lwp)
2201{
2202 stop_callback (lwp, NULL);
2203}
2204
2db9a427
PA
2205/* See linux-nat.h */
2206
2207void
2208linux_stop_and_wait_all_lwps (void)
2209{
2210 /* Stop all LWP's ... */
2211 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
2212
2213 /* ... and wait until all of them have reported back that
2214 they're no longer running. */
2215 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
2216}
2217
2218/* See linux-nat.h */
2219
2220void
2221linux_unstop_all_lwps (void)
2222{
2223 iterate_over_lwps (minus_one_ptid,
2224 resume_stopped_resumed_lwps, &minus_one_ptid);
2225}
2226
57380f4e 2227/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2228
2229static int
57380f4e
DJ
2230linux_nat_has_pending_sigint (int pid)
2231{
2232 sigset_t pending, blocked, ignored;
57380f4e
DJ
2233
2234 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2235
2236 if (sigismember (&pending, SIGINT)
2237 && !sigismember (&ignored, SIGINT))
2238 return 1;
2239
2240 return 0;
2241}
2242
2243/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2244
2245static int
2246set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2247{
57380f4e
DJ
2248 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2249 flag to consume the next one. */
2250 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2251 && WSTOPSIG (lp->status) == SIGINT)
2252 lp->status = 0;
2253 else
2254 lp->ignore_sigint = 1;
2255
2256 return 0;
2257}
2258
2259/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2260 This function is called after we know the LWP has stopped; if the LWP
2261 stopped before the expected SIGINT was delivered, then it will never have
2262 arrived. Also, if the signal was delivered to a shared queue and consumed
2263 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2264
57380f4e
DJ
2265static void
2266maybe_clear_ignore_sigint (struct lwp_info *lp)
2267{
2268 if (!lp->ignore_sigint)
2269 return;
2270
dfd4cc63 2271 if (!linux_nat_has_pending_sigint (ptid_get_lwp (lp->ptid)))
57380f4e
DJ
2272 {
2273 if (debug_linux_nat)
2274 fprintf_unfiltered (gdb_stdlog,
2275 "MCIS: Clearing bogus flag for %s\n",
2276 target_pid_to_str (lp->ptid));
2277 lp->ignore_sigint = 0;
2278 }
2279}
2280
ebec9a0f
PA
2281/* Fetch the possible triggered data watchpoint info and store it in
2282 LP.
2283
2284 On some archs, like x86, that use debug registers to set
2285 watchpoints, it's possible that the way to know which watched
2286 address trapped, is to check the register that is used to select
2287 which address to watch. Problem is, between setting the watchpoint
2288 and reading back which data address trapped, the user may change
2289 the set of watchpoints, and, as a consequence, GDB changes the
2290 debug registers in the inferior. To avoid reading back a stale
2291 stopped-data-address when that happens, we cache in LP the fact
2292 that a watchpoint trapped, and the corresponding data address, as
2293 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2294 registers meanwhile, we have the cached data we can rely on. */
2295
9c02b525
PA
2296static int
2297check_stopped_by_watchpoint (struct lwp_info *lp)
ebec9a0f
PA
2298{
2299 struct cleanup *old_chain;
2300
2301 if (linux_ops->to_stopped_by_watchpoint == NULL)
9c02b525 2302 return 0;
ebec9a0f
PA
2303
2304 old_chain = save_inferior_ptid ();
2305 inferior_ptid = lp->ptid;
2306
9c02b525 2307 if (linux_ops->to_stopped_by_watchpoint (linux_ops))
ebec9a0f 2308 {
15c66dd6 2309 lp->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
9c02b525 2310
ebec9a0f
PA
2311 if (linux_ops->to_stopped_data_address != NULL)
2312 lp->stopped_data_address_p =
2313 linux_ops->to_stopped_data_address (&current_target,
2314 &lp->stopped_data_address);
2315 else
2316 lp->stopped_data_address_p = 0;
2317 }
2318
2319 do_cleanups (old_chain);
9c02b525 2320
15c66dd6 2321 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
9c02b525
PA
2322}
2323
2324/* Called when the LWP stopped for a trap that could be explained by a
2325 watchpoint or a breakpoint. */
2326
2327static void
2328save_sigtrap (struct lwp_info *lp)
2329{
15c66dd6 2330 gdb_assert (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON);
9c02b525
PA
2331 gdb_assert (lp->status != 0);
2332
faf09f01
PA
2333 /* Check first if this was a SW/HW breakpoint before checking
2334 watchpoints, because at least s390 can't tell the data address of
2335 hardware watchpoint hits, and the kernel returns
2336 stopped-by-watchpoint as long as there's a watchpoint set. */
9c02b525
PA
2337 if (linux_nat_status_is_event (lp->status))
2338 check_stopped_by_breakpoint (lp);
faf09f01
PA
2339
2340 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2341 or hardware watchpoint. Check which is which if we got
2342 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2343 if (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON
2344 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
2345 check_stopped_by_watchpoint (lp);
ebec9a0f
PA
2346}
2347
9c02b525 2348/* Returns true if the LWP had stopped for a watchpoint. */
ebec9a0f
PA
2349
2350static int
6a109b6b 2351linux_nat_stopped_by_watchpoint (struct target_ops *ops)
ebec9a0f
PA
2352{
2353 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2354
2355 gdb_assert (lp != NULL);
2356
15c66dd6 2357 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
ebec9a0f
PA
2358}
2359
2360static int
2361linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2362{
2363 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2364
2365 gdb_assert (lp != NULL);
2366
2367 *addr_p = lp->stopped_data_address;
2368
2369 return lp->stopped_data_address_p;
2370}
2371
26ab7092
JK
2372/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2373
2374static int
2375sigtrap_is_event (int status)
2376{
2377 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2378}
2379
26ab7092
JK
2380/* Set alternative SIGTRAP-like events recognizer. If
2381 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2382 applied. */
2383
2384void
2385linux_nat_set_status_is_event (struct target_ops *t,
2386 int (*status_is_event) (int status))
2387{
2388 linux_nat_status_is_event = status_is_event;
2389}
2390
57380f4e
DJ
2391/* Wait until LP is stopped. */
2392
2393static int
2394stop_wait_callback (struct lwp_info *lp, void *data)
2395{
c9657e70 2396 struct inferior *inf = find_inferior_ptid (lp->ptid);
6c95b8df
PA
2397
2398 /* If this is a vfork parent, bail out, it is not going to report
2399 any SIGSTOP until the vfork is done with. */
2400 if (inf->vfork_child != NULL)
2401 return 0;
2402
d6b0e80f
AC
2403 if (!lp->stopped)
2404 {
2405 int status;
2406
2407 status = wait_lwp (lp);
2408 if (status == 0)
2409 return 0;
2410
57380f4e
DJ
2411 if (lp->ignore_sigint && WIFSTOPPED (status)
2412 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2413 {
57380f4e 2414 lp->ignore_sigint = 0;
d6b0e80f
AC
2415
2416 errno = 0;
dfd4cc63 2417 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
8817a6f2 2418 lp->stopped = 0;
d6b0e80f
AC
2419 if (debug_linux_nat)
2420 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2421 "PTRACE_CONT %s, 0, 0 (%s) "
2422 "(discarding SIGINT)\n",
d6b0e80f
AC
2423 target_pid_to_str (lp->ptid),
2424 errno ? safe_strerror (errno) : "OK");
2425
57380f4e 2426 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2427 }
2428
57380f4e
DJ
2429 maybe_clear_ignore_sigint (lp);
2430
d6b0e80f
AC
2431 if (WSTOPSIG (status) != SIGSTOP)
2432 {
e5ef252a 2433 /* The thread was stopped with a signal other than SIGSTOP. */
7feb7d06 2434
e5ef252a
PA
2435 if (debug_linux_nat)
2436 fprintf_unfiltered (gdb_stdlog,
2437 "SWC: Pending event %s in %s\n",
2438 status_to_str ((int) status),
2439 target_pid_to_str (lp->ptid));
2440
2441 /* Save the sigtrap event. */
2442 lp->status = status;
e5ef252a 2443 gdb_assert (lp->signalled);
9c02b525 2444 save_sigtrap (lp);
d6b0e80f
AC
2445 }
2446 else
2447 {
2448 /* We caught the SIGSTOP that we intended to catch, so
2449 there's no SIGSTOP pending. */
e5ef252a
PA
2450
2451 if (debug_linux_nat)
2452 fprintf_unfiltered (gdb_stdlog,
2bf6fb9d 2453 "SWC: Expected SIGSTOP caught for %s.\n",
e5ef252a
PA
2454 target_pid_to_str (lp->ptid));
2455
e5ef252a
PA
2456 /* Reset SIGNALLED only after the stop_wait_callback call
2457 above as it does gdb_assert on SIGNALLED. */
d6b0e80f
AC
2458 lp->signalled = 0;
2459 }
2460 }
2461
2462 return 0;
2463}
2464
9c02b525
PA
2465/* Return non-zero if LP has a wait status pending. Discard the
2466 pending event and resume the LWP if the event that originally
2467 caused the stop became uninteresting. */
d6b0e80f
AC
2468
2469static int
2470status_callback (struct lwp_info *lp, void *data)
2471{
2472 /* Only report a pending wait status if we pretend that this has
2473 indeed been resumed. */
ca2163eb
PA
2474 if (!lp->resumed)
2475 return 0;
2476
eb54c8bf
PA
2477 if (!lwp_status_pending_p (lp))
2478 return 0;
2479
15c66dd6
PA
2480 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
2481 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
9c02b525
PA
2482 {
2483 struct regcache *regcache = get_thread_regcache (lp->ptid);
2484 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2485 CORE_ADDR pc;
2486 int discard = 0;
2487
9c02b525
PA
2488 pc = regcache_read_pc (regcache);
2489
2490 if (pc != lp->stop_pc)
2491 {
2492 if (debug_linux_nat)
2493 fprintf_unfiltered (gdb_stdlog,
2494 "SC: PC of %s changed. was=%s, now=%s\n",
2495 target_pid_to_str (lp->ptid),
2496 paddress (target_gdbarch (), lp->stop_pc),
2497 paddress (target_gdbarch (), pc));
2498 discard = 1;
2499 }
faf09f01
PA
2500
2501#if !USE_SIGTRAP_SIGINFO
9c02b525
PA
2502 else if (!breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
2503 {
2504 if (debug_linux_nat)
2505 fprintf_unfiltered (gdb_stdlog,
2506 "SC: previous breakpoint of %s, at %s gone\n",
2507 target_pid_to_str (lp->ptid),
2508 paddress (target_gdbarch (), lp->stop_pc));
2509
2510 discard = 1;
2511 }
faf09f01 2512#endif
9c02b525
PA
2513
2514 if (discard)
2515 {
2516 if (debug_linux_nat)
2517 fprintf_unfiltered (gdb_stdlog,
2518 "SC: pending event of %s cancelled.\n",
2519 target_pid_to_str (lp->ptid));
2520
2521 lp->status = 0;
2522 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2523 return 0;
2524 }
9c02b525
PA
2525 }
2526
eb54c8bf 2527 return 1;
d6b0e80f
AC
2528}
2529
d6b0e80f
AC
2530/* Count the LWP's that have had events. */
2531
2532static int
2533count_events_callback (struct lwp_info *lp, void *data)
2534{
9a3c8263 2535 int *count = (int *) data;
d6b0e80f
AC
2536
2537 gdb_assert (count != NULL);
2538
9c02b525
PA
2539 /* Select only resumed LWPs that have an event pending. */
2540 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2541 (*count)++;
2542
2543 return 0;
2544}
2545
2546/* Select the LWP (if any) that is currently being single-stepped. */
2547
2548static int
2549select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2550{
25289eb2
PA
2551 if (lp->last_resume_kind == resume_step
2552 && lp->status != 0)
d6b0e80f
AC
2553 return 1;
2554 else
2555 return 0;
2556}
2557
8a99810d
PA
2558/* Returns true if LP has a status pending. */
2559
2560static int
2561lwp_status_pending_p (struct lwp_info *lp)
2562{
2563 /* We check for lp->waitstatus in addition to lp->status, because we
2564 can have pending process exits recorded in lp->status and
2565 W_EXITCODE(0,0) happens to be 0. */
2566 return lp->status != 0 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE;
2567}
2568
b90fc188 2569/* Select the Nth LWP that has had an event. */
d6b0e80f
AC
2570
2571static int
2572select_event_lwp_callback (struct lwp_info *lp, void *data)
2573{
9a3c8263 2574 int *selector = (int *) data;
d6b0e80f
AC
2575
2576 gdb_assert (selector != NULL);
2577
9c02b525
PA
2578 /* Select only resumed LWPs that have an event pending. */
2579 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2580 if ((*selector)-- == 0)
2581 return 1;
2582
2583 return 0;
2584}
2585
9c02b525
PA
2586/* Called when the LWP got a signal/trap that could be explained by a
2587 software or hardware breakpoint. */
2588
710151dd 2589static int
9c02b525 2590check_stopped_by_breakpoint (struct lwp_info *lp)
710151dd
PA
2591{
2592 /* Arrange for a breakpoint to be hit again later. We don't keep
2593 the SIGTRAP status and don't forward the SIGTRAP signal to the
2594 LWP. We will handle the current event, eventually we will resume
2595 this LWP, and this breakpoint will trap again.
2596
2597 If we do not do this, then we run the risk that the user will
2598 delete or disable the breakpoint, but the LWP will have already
2599 tripped on it. */
2600
515630c5
UW
2601 struct regcache *regcache = get_thread_regcache (lp->ptid);
2602 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2603 CORE_ADDR pc;
9c02b525 2604 CORE_ADDR sw_bp_pc;
faf09f01
PA
2605#if USE_SIGTRAP_SIGINFO
2606 siginfo_t siginfo;
2607#endif
9c02b525
PA
2608
2609 pc = regcache_read_pc (regcache);
527a273a 2610 sw_bp_pc = pc - gdbarch_decr_pc_after_break (gdbarch);
515630c5 2611
faf09f01
PA
2612#if USE_SIGTRAP_SIGINFO
2613 if (linux_nat_get_siginfo (lp->ptid, &siginfo))
2614 {
2615 if (siginfo.si_signo == SIGTRAP)
2616 {
1db33b5a 2617 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
faf09f01
PA
2618 {
2619 if (debug_linux_nat)
2620 fprintf_unfiltered (gdb_stdlog,
2bf6fb9d
PA
2621 "CSBB: %s stopped by software "
2622 "breakpoint\n",
faf09f01
PA
2623 target_pid_to_str (lp->ptid));
2624
2625 /* Back up the PC if necessary. */
2626 if (pc != sw_bp_pc)
2627 regcache_write_pc (regcache, sw_bp_pc);
2628
2629 lp->stop_pc = sw_bp_pc;
2630 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2631 return 1;
2632 }
2633 else if (siginfo.si_code == TRAP_HWBKPT)
2634 {
2635 if (debug_linux_nat)
2636 fprintf_unfiltered (gdb_stdlog,
2bf6fb9d
PA
2637 "CSBB: %s stopped by hardware "
2638 "breakpoint/watchpoint\n",
faf09f01
PA
2639 target_pid_to_str (lp->ptid));
2640
2641 lp->stop_pc = pc;
2642 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2643 return 1;
2644 }
2bf6fb9d
PA
2645 else if (siginfo.si_code == TRAP_TRACE)
2646 {
2647 if (debug_linux_nat)
2648 fprintf_unfiltered (gdb_stdlog,
2649 "CSBB: %s stopped by trace\n",
2650 target_pid_to_str (lp->ptid));
2651 }
faf09f01
PA
2652 }
2653 }
2654#else
9c02b525
PA
2655 if ((!lp->step || lp->stop_pc == sw_bp_pc)
2656 && software_breakpoint_inserted_here_p (get_regcache_aspace (regcache),
2657 sw_bp_pc))
710151dd 2658 {
9c02b525
PA
2659 /* The LWP was either continued, or stepped a software
2660 breakpoint instruction. */
710151dd
PA
2661 if (debug_linux_nat)
2662 fprintf_unfiltered (gdb_stdlog,
2bf6fb9d 2663 "CSBB: %s stopped by software breakpoint\n",
710151dd
PA
2664 target_pid_to_str (lp->ptid));
2665
2666 /* Back up the PC if necessary. */
9c02b525
PA
2667 if (pc != sw_bp_pc)
2668 regcache_write_pc (regcache, sw_bp_pc);
515630c5 2669
9c02b525 2670 lp->stop_pc = sw_bp_pc;
15c66dd6 2671 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
710151dd
PA
2672 return 1;
2673 }
710151dd 2674
9c02b525
PA
2675 if (hardware_breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
2676 {
2677 if (debug_linux_nat)
2678 fprintf_unfiltered (gdb_stdlog,
2bf6fb9d 2679 "CSBB: stopped by hardware breakpoint %s\n",
9c02b525 2680 target_pid_to_str (lp->ptid));
d6b0e80f 2681
9c02b525 2682 lp->stop_pc = pc;
15c66dd6 2683 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
9c02b525
PA
2684 return 1;
2685 }
faf09f01 2686#endif
d6b0e80f
AC
2687
2688 return 0;
2689}
2690
faf09f01
PA
2691
2692/* Returns true if the LWP had stopped for a software breakpoint. */
2693
2694static int
2695linux_nat_stopped_by_sw_breakpoint (struct target_ops *ops)
2696{
2697 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2698
2699 gdb_assert (lp != NULL);
2700
2701 return lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2702}
2703
2704/* Implement the supports_stopped_by_sw_breakpoint method. */
2705
2706static int
2707linux_nat_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2708{
2709 return USE_SIGTRAP_SIGINFO;
2710}
2711
2712/* Returns true if the LWP had stopped for a hardware
2713 breakpoint/watchpoint. */
2714
2715static int
2716linux_nat_stopped_by_hw_breakpoint (struct target_ops *ops)
2717{
2718 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2719
2720 gdb_assert (lp != NULL);
2721
2722 return lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2723}
2724
2725/* Implement the supports_stopped_by_hw_breakpoint method. */
2726
2727static int
2728linux_nat_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2729{
2730 return USE_SIGTRAP_SIGINFO;
2731}
2732
d6b0e80f
AC
2733/* Select one LWP out of those that have events pending. */
2734
2735static void
d90e17a7 2736select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
2737{
2738 int num_events = 0;
2739 int random_selector;
9c02b525 2740 struct lwp_info *event_lp = NULL;
d6b0e80f 2741
ac264b3b 2742 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2743 (*orig_lp)->status = *status;
2744
9c02b525
PA
2745 /* In all-stop, give preference to the LWP that is being
2746 single-stepped. There will be at most one, and it will be the
2747 LWP that the core is most interested in. If we didn't do this,
2748 then we'd have to handle pending step SIGTRAPs somehow in case
2749 the core later continues the previously-stepped thread, as
2750 otherwise we'd report the pending SIGTRAP then, and the core, not
2751 having stepped the thread, wouldn't understand what the trap was
2752 for, and therefore would report it to the user as a random
2753 signal. */
fbea99ea 2754 if (!target_is_non_stop_p ())
d6b0e80f 2755 {
9c02b525
PA
2756 event_lp = iterate_over_lwps (filter,
2757 select_singlestep_lwp_callback, NULL);
2758 if (event_lp != NULL)
2759 {
2760 if (debug_linux_nat)
2761 fprintf_unfiltered (gdb_stdlog,
2762 "SEL: Select single-step %s\n",
2763 target_pid_to_str (event_lp->ptid));
2764 }
d6b0e80f 2765 }
9c02b525
PA
2766
2767 if (event_lp == NULL)
d6b0e80f 2768 {
9c02b525 2769 /* Pick one at random, out of those which have had events. */
d6b0e80f 2770
9c02b525 2771 /* First see how many events we have. */
d90e17a7 2772 iterate_over_lwps (filter, count_events_callback, &num_events);
8bf3b159 2773 gdb_assert (num_events > 0);
d6b0e80f 2774
9c02b525
PA
2775 /* Now randomly pick a LWP out of those that have had
2776 events. */
d6b0e80f
AC
2777 random_selector = (int)
2778 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2779
2780 if (debug_linux_nat && num_events > 1)
2781 fprintf_unfiltered (gdb_stdlog,
9c02b525 2782 "SEL: Found %d events, selecting #%d\n",
d6b0e80f
AC
2783 num_events, random_selector);
2784
d90e17a7
PA
2785 event_lp = iterate_over_lwps (filter,
2786 select_event_lwp_callback,
d6b0e80f
AC
2787 &random_selector);
2788 }
2789
2790 if (event_lp != NULL)
2791 {
2792 /* Switch the event LWP. */
2793 *orig_lp = event_lp;
2794 *status = event_lp->status;
2795 }
2796
2797 /* Flush the wait status for the event LWP. */
2798 (*orig_lp)->status = 0;
2799}
2800
2801/* Return non-zero if LP has been resumed. */
2802
2803static int
2804resumed_callback (struct lwp_info *lp, void *data)
2805{
2806 return lp->resumed;
2807}
2808
02f3fc28 2809/* Check if we should go on and pass this event to common code.
9c02b525 2810 Return the affected lwp if we are, or NULL otherwise. */
12d9289a 2811
02f3fc28 2812static struct lwp_info *
9c02b525 2813linux_nat_filter_event (int lwpid, int status)
02f3fc28
PA
2814{
2815 struct lwp_info *lp;
89a5711c 2816 int event = linux_ptrace_get_extended_event (status);
02f3fc28
PA
2817
2818 lp = find_lwp_pid (pid_to_ptid (lwpid));
2819
2820 /* Check for stop events reported by a process we didn't already
2821 know about - anything not already in our LWP list.
2822
2823 If we're expecting to receive stopped processes after
2824 fork, vfork, and clone events, then we'll just add the
2825 new one to our list and go back to waiting for the event
2826 to be reported - the stopped process might be returned
0e5bf2a8
PA
2827 from waitpid before or after the event is.
2828
2829 But note the case of a non-leader thread exec'ing after the
2830 leader having exited, and gone from our lists. The non-leader
2831 thread changes its tid to the tgid. */
2832
2833 if (WIFSTOPPED (status) && lp == NULL
89a5711c 2834 && (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC))
0e5bf2a8
PA
2835 {
2836 /* A multi-thread exec after we had seen the leader exiting. */
2837 if (debug_linux_nat)
2838 fprintf_unfiltered (gdb_stdlog,
2839 "LLW: Re-adding thread group leader LWP %d.\n",
2840 lwpid);
2841
dfd4cc63 2842 lp = add_lwp (ptid_build (lwpid, lwpid, 0));
0e5bf2a8
PA
2843 lp->stopped = 1;
2844 lp->resumed = 1;
2845 add_thread (lp->ptid);
2846 }
2847
02f3fc28
PA
2848 if (WIFSTOPPED (status) && !lp)
2849 {
3b27ef47
PA
2850 if (debug_linux_nat)
2851 fprintf_unfiltered (gdb_stdlog,
2852 "LHEW: saving LWP %ld status %s in stopped_pids list\n",
2853 (long) lwpid, status_to_str (status));
84636d28 2854 add_to_pid_list (&stopped_pids, lwpid, status);
02f3fc28
PA
2855 return NULL;
2856 }
2857
2858 /* Make sure we don't report an event for the exit of an LWP not in
1777feb0 2859 our list, i.e. not part of the current process. This can happen
fd62cb89 2860 if we detach from a program we originally forked and then it
02f3fc28
PA
2861 exits. */
2862 if (!WIFSTOPPED (status) && !lp)
2863 return NULL;
2864
8817a6f2
PA
2865 /* This LWP is stopped now. (And if dead, this prevents it from
2866 ever being continued.) */
2867 lp->stopped = 1;
2868
8784d563
PA
2869 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
2870 {
2871 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
de0d863e 2872 int options = linux_nat_ptrace_options (inf->attach_flag);
8784d563 2873
de0d863e 2874 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), options);
8784d563
PA
2875 lp->must_set_ptrace_flags = 0;
2876 }
2877
ca2163eb
PA
2878 /* Handle GNU/Linux's syscall SIGTRAPs. */
2879 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2880 {
2881 /* No longer need the sysgood bit. The ptrace event ends up
2882 recorded in lp->waitstatus if we care for it. We can carry
2883 on handling the event like a regular SIGTRAP from here
2884 on. */
2885 status = W_STOPCODE (SIGTRAP);
2886 if (linux_handle_syscall_trap (lp, 0))
2887 return NULL;
2888 }
bfd09d20
JS
2889 else
2890 {
2891 /* Almost all other ptrace-stops are known to be outside of system
2892 calls, with further exceptions in linux_handle_extended_wait. */
2893 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2894 }
02f3fc28 2895
ca2163eb 2896 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2897 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2898 && linux_is_extended_waitstatus (status))
02f3fc28
PA
2899 {
2900 if (debug_linux_nat)
2901 fprintf_unfiltered (gdb_stdlog,
2902 "LLW: Handling extended status 0x%06x\n",
2903 status);
4dd63d48 2904 if (linux_handle_extended_wait (lp, status))
02f3fc28
PA
2905 return NULL;
2906 }
2907
2908 /* Check if the thread has exited. */
9c02b525
PA
2909 if (WIFEXITED (status) || WIFSIGNALED (status))
2910 {
2911 if (num_lwps (ptid_get_pid (lp->ptid)) > 1)
02f3fc28 2912 {
9c02b525
PA
2913 if (debug_linux_nat)
2914 fprintf_unfiltered (gdb_stdlog,
2915 "LLW: %s exited.\n",
2916 target_pid_to_str (lp->ptid));
2917
4a6ed09b
PA
2918 /* If there is at least one more LWP, then the exit signal
2919 was not the end of the debugged application and should be
2920 ignored. */
2921 exit_lwp (lp);
2922 return NULL;
02f3fc28
PA
2923 }
2924
77598427
PA
2925 /* Note that even if the leader was ptrace-stopped, it can still
2926 exit, if e.g., some other thread brings down the whole
2927 process (calls `exit'). So don't assert that the lwp is
2928 resumed. */
02f3fc28
PA
2929 if (debug_linux_nat)
2930 fprintf_unfiltered (gdb_stdlog,
77598427
PA
2931 "Process %ld exited (resumed=%d)\n",
2932 ptid_get_lwp (lp->ptid), lp->resumed);
02f3fc28 2933
9c02b525
PA
2934 /* This was the last lwp in the process. Since events are
2935 serialized to GDB core, we may not be able report this one
2936 right now, but GDB core and the other target layers will want
2937 to be notified about the exit code/signal, leave the status
2938 pending for the next time we're able to report it. */
2939
2940 /* Dead LWP's aren't expected to reported a pending sigstop. */
2941 lp->signalled = 0;
2942
2943 /* Store the pending event in the waitstatus, because
2944 W_EXITCODE(0,0) == 0. */
2945 store_waitstatus (&lp->waitstatus, status);
2946 return lp;
02f3fc28
PA
2947 }
2948
02f3fc28
PA
2949 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2950 an attempt to stop an LWP. */
2951 if (lp->signalled
2952 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2953 {
02f3fc28
PA
2954 lp->signalled = 0;
2955
2bf6fb9d 2956 if (lp->last_resume_kind == resume_stop)
25289eb2 2957 {
2bf6fb9d
PA
2958 if (debug_linux_nat)
2959 fprintf_unfiltered (gdb_stdlog,
2960 "LLW: resume_stop SIGSTOP caught for %s.\n",
2961 target_pid_to_str (lp->ptid));
2962 }
2963 else
2964 {
2965 /* This is a delayed SIGSTOP. Filter out the event. */
02f3fc28 2966
25289eb2
PA
2967 if (debug_linux_nat)
2968 fprintf_unfiltered (gdb_stdlog,
2bf6fb9d 2969 "LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
25289eb2
PA
2970 lp->step ?
2971 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2972 target_pid_to_str (lp->ptid));
02f3fc28 2973
2bf6fb9d 2974 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
25289eb2 2975 gdb_assert (lp->resumed);
25289eb2
PA
2976 return NULL;
2977 }
02f3fc28
PA
2978 }
2979
57380f4e
DJ
2980 /* Make sure we don't report a SIGINT that we have already displayed
2981 for another thread. */
2982 if (lp->ignore_sigint
2983 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
2984 {
2985 if (debug_linux_nat)
2986 fprintf_unfiltered (gdb_stdlog,
2987 "LLW: Delayed SIGINT caught for %s.\n",
2988 target_pid_to_str (lp->ptid));
2989
2990 /* This is a delayed SIGINT. */
2991 lp->ignore_sigint = 0;
2992
8a99810d 2993 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
57380f4e
DJ
2994 if (debug_linux_nat)
2995 fprintf_unfiltered (gdb_stdlog,
2996 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
2997 lp->step ?
2998 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2999 target_pid_to_str (lp->ptid));
57380f4e
DJ
3000 gdb_assert (lp->resumed);
3001
3002 /* Discard the event. */
3003 return NULL;
3004 }
3005
9c02b525
PA
3006 /* Don't report signals that GDB isn't interested in, such as
3007 signals that are neither printed nor stopped upon. Stopping all
3008 threads can be a bit time-consuming so if we want decent
3009 performance with heavily multi-threaded programs, especially when
3010 they're using a high frequency timer, we'd better avoid it if we
3011 can. */
3012 if (WIFSTOPPED (status))
3013 {
3014 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3015
fbea99ea 3016 if (!target_is_non_stop_p ())
9c02b525
PA
3017 {
3018 /* Only do the below in all-stop, as we currently use SIGSTOP
3019 to implement target_stop (see linux_nat_stop) in
3020 non-stop. */
3021 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3022 {
3023 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3024 forwarded to the entire process group, that is, all LWPs
3025 will receive it - unless they're using CLONE_THREAD to
3026 share signals. Since we only want to report it once, we
3027 mark it as ignored for all LWPs except this one. */
3028 iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)),
3029 set_ignore_sigint, NULL);
3030 lp->ignore_sigint = 0;
3031 }
3032 else
3033 maybe_clear_ignore_sigint (lp);
3034 }
3035
3036 /* When using hardware single-step, we need to report every signal.
c9587f88
AT
3037 Otherwise, signals in pass_mask may be short-circuited
3038 except signals that might be caused by a breakpoint. */
9c02b525 3039 if (!lp->step
c9587f88
AT
3040 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))
3041 && !linux_wstatus_maybe_breakpoint (status))
9c02b525
PA
3042 {
3043 linux_resume_one_lwp (lp, lp->step, signo);
3044 if (debug_linux_nat)
3045 fprintf_unfiltered (gdb_stdlog,
3046 "LLW: %s %s, %s (preempt 'handle')\n",
3047 lp->step ?
3048 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3049 target_pid_to_str (lp->ptid),
3050 (signo != GDB_SIGNAL_0
3051 ? strsignal (gdb_signal_to_host (signo))
3052 : "0"));
3053 return NULL;
3054 }
3055 }
3056
02f3fc28
PA
3057 /* An interesting event. */
3058 gdb_assert (lp);
ca2163eb 3059 lp->status = status;
9c02b525 3060 save_sigtrap (lp);
02f3fc28
PA
3061 return lp;
3062}
3063
0e5bf2a8
PA
3064/* Detect zombie thread group leaders, and "exit" them. We can't reap
3065 their exits until all other threads in the group have exited. */
3066
3067static void
3068check_zombie_leaders (void)
3069{
3070 struct inferior *inf;
3071
3072 ALL_INFERIORS (inf)
3073 {
3074 struct lwp_info *leader_lp;
3075
3076 if (inf->pid == 0)
3077 continue;
3078
3079 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3080 if (leader_lp != NULL
3081 /* Check if there are other threads in the group, as we may
3082 have raced with the inferior simply exiting. */
3083 && num_lwps (inf->pid) > 1
5f572dec 3084 && linux_proc_pid_is_zombie (inf->pid))
0e5bf2a8
PA
3085 {
3086 if (debug_linux_nat)
3087 fprintf_unfiltered (gdb_stdlog,
3088 "CZL: Thread group leader %d zombie "
3089 "(it exited, or another thread execd).\n",
3090 inf->pid);
3091
3092 /* A leader zombie can mean one of two things:
3093
3094 - It exited, and there's an exit status pending
3095 available, or only the leader exited (not the whole
3096 program). In the latter case, we can't waitpid the
3097 leader's exit status until all other threads are gone.
3098
3099 - There are 3 or more threads in the group, and a thread
4a6ed09b
PA
3100 other than the leader exec'd. See comments on exec
3101 events at the top of the file. We could try
0e5bf2a8
PA
3102 distinguishing the exit and exec cases, by waiting once
3103 more, and seeing if something comes out, but it doesn't
3104 sound useful. The previous leader _does_ go away, and
3105 we'll re-add the new one once we see the exec event
3106 (which is just the same as what would happen if the
3107 previous leader did exit voluntarily before some other
3108 thread execs). */
3109
3110 if (debug_linux_nat)
3111 fprintf_unfiltered (gdb_stdlog,
3112 "CZL: Thread group leader %d vanished.\n",
3113 inf->pid);
3114 exit_lwp (leader_lp);
3115 }
3116 }
3117}
3118
d6b0e80f 3119static ptid_t
7feb7d06 3120linux_nat_wait_1 (struct target_ops *ops,
47608cb1
PA
3121 ptid_t ptid, struct target_waitstatus *ourstatus,
3122 int target_options)
d6b0e80f 3123{
fc9b8e47 3124 sigset_t prev_mask;
4b60df3d 3125 enum resume_kind last_resume_kind;
12d9289a 3126 struct lwp_info *lp;
12d9289a 3127 int status;
d6b0e80f 3128
01124a23 3129 if (debug_linux_nat)
b84876c2
PA
3130 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3131
f973ed9c
DJ
3132 /* The first time we get here after starting a new inferior, we may
3133 not have added it to the LWP list yet - this is the earliest
3134 moment at which we know its PID. */
d90e17a7 3135 if (ptid_is_pid (inferior_ptid))
f973ed9c 3136 {
27c9d204
PA
3137 /* Upgrade the main thread's ptid. */
3138 thread_change_ptid (inferior_ptid,
dfd4cc63
LM
3139 ptid_build (ptid_get_pid (inferior_ptid),
3140 ptid_get_pid (inferior_ptid), 0));
27c9d204 3141
26cb8b7c 3142 lp = add_initial_lwp (inferior_ptid);
f973ed9c
DJ
3143 lp->resumed = 1;
3144 }
3145
12696c10 3146 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
7feb7d06 3147 block_child_signals (&prev_mask);
d6b0e80f 3148
d6b0e80f 3149 /* First check if there is a LWP with a wait status pending. */
8a99810d
PA
3150 lp = iterate_over_lwps (ptid, status_callback, NULL);
3151 if (lp != NULL)
d6b0e80f
AC
3152 {
3153 if (debug_linux_nat)
d6b0e80f
AC
3154 fprintf_unfiltered (gdb_stdlog,
3155 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3156 status_to_str (lp->status),
d6b0e80f 3157 target_pid_to_str (lp->ptid));
d6b0e80f
AC
3158 }
3159
9c02b525
PA
3160 /* But if we don't find a pending event, we'll have to wait. Always
3161 pull all events out of the kernel. We'll randomly select an
3162 event LWP out of all that have events, to prevent starvation. */
7feb7d06 3163
d90e17a7 3164 while (lp == NULL)
d6b0e80f
AC
3165 {
3166 pid_t lwpid;
3167
0e5bf2a8
PA
3168 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3169 quirks:
3170
3171 - If the thread group leader exits while other threads in the
3172 thread group still exist, waitpid(TGID, ...) hangs. That
3173 waitpid won't return an exit status until the other threads
3174 in the group are reapped.
3175
3176 - When a non-leader thread execs, that thread just vanishes
3177 without reporting an exit (so we'd hang if we waited for it
3178 explicitly in that case). The exec event is reported to
3179 the TGID pid. */
3180
3181 errno = 0;
4a6ed09b 3182 lwpid = my_waitpid (-1, &status, __WALL | WNOHANG);
0e5bf2a8
PA
3183
3184 if (debug_linux_nat)
3185 fprintf_unfiltered (gdb_stdlog,
3186 "LNW: waitpid(-1, ...) returned %d, %s\n",
3187 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
b84876c2 3188
d6b0e80f
AC
3189 if (lwpid > 0)
3190 {
d6b0e80f
AC
3191 if (debug_linux_nat)
3192 {
3193 fprintf_unfiltered (gdb_stdlog,
3194 "LLW: waitpid %ld received %s\n",
3195 (long) lwpid, status_to_str (status));
3196 }
3197
9c02b525 3198 linux_nat_filter_event (lwpid, status);
0e5bf2a8
PA
3199 /* Retry until nothing comes out of waitpid. A single
3200 SIGCHLD can indicate more than one child stopped. */
3201 continue;
d6b0e80f
AC
3202 }
3203
20ba1ce6
PA
3204 /* Now that we've pulled all events out of the kernel, resume
3205 LWPs that don't have an interesting event to report. */
3206 iterate_over_lwps (minus_one_ptid,
3207 resume_stopped_resumed_lwps, &minus_one_ptid);
3208
3209 /* ... and find an LWP with a status to report to the core, if
3210 any. */
9c02b525
PA
3211 lp = iterate_over_lwps (ptid, status_callback, NULL);
3212 if (lp != NULL)
3213 break;
3214
0e5bf2a8
PA
3215 /* Check for zombie thread group leaders. Those can't be reaped
3216 until all other threads in the thread group are. */
3217 check_zombie_leaders ();
d6b0e80f 3218
0e5bf2a8
PA
3219 /* If there are no resumed children left, bail. We'd be stuck
3220 forever in the sigsuspend call below otherwise. */
3221 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3222 {
3223 if (debug_linux_nat)
3224 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
b84876c2 3225
0e5bf2a8 3226 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
b84876c2 3227
0e5bf2a8
PA
3228 restore_child_signals_mask (&prev_mask);
3229 return minus_one_ptid;
d6b0e80f 3230 }
28736962 3231
0e5bf2a8
PA
3232 /* No interesting event to report to the core. */
3233
3234 if (target_options & TARGET_WNOHANG)
3235 {
01124a23 3236 if (debug_linux_nat)
28736962
PA
3237 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3238
0e5bf2a8 3239 ourstatus->kind = TARGET_WAITKIND_IGNORE;
28736962
PA
3240 restore_child_signals_mask (&prev_mask);
3241 return minus_one_ptid;
3242 }
d6b0e80f
AC
3243
3244 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3245 gdb_assert (lp == NULL);
0e5bf2a8
PA
3246
3247 /* Block until we get an event reported with SIGCHLD. */
d36bf488
DE
3248 if (debug_linux_nat)
3249 fprintf_unfiltered (gdb_stdlog, "LNW: about to sigsuspend\n");
0e5bf2a8 3250 sigsuspend (&suspend_mask);
d6b0e80f
AC
3251 }
3252
d6b0e80f
AC
3253 gdb_assert (lp);
3254
ca2163eb
PA
3255 status = lp->status;
3256 lp->status = 0;
3257
fbea99ea 3258 if (!target_is_non_stop_p ())
4c28f408
PA
3259 {
3260 /* Now stop all other LWP's ... */
d90e17a7 3261 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
4c28f408
PA
3262
3263 /* ... and wait until all of them have reported back that
3264 they're no longer running. */
d90e17a7 3265 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
9c02b525
PA
3266 }
3267
3268 /* If we're not waiting for a specific LWP, choose an event LWP from
3269 among those that have had events. Giving equal priority to all
3270 LWPs that have had events helps prevent starvation. */
3271 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3272 select_event_lwp (ptid, &lp, &status);
3273
3274 gdb_assert (lp != NULL);
3275
3276 /* Now that we've selected our final event LWP, un-adjust its PC if
faf09f01
PA
3277 it was a software breakpoint, and we can't reliably support the
3278 "stopped by software breakpoint" stop reason. */
3279 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3280 && !USE_SIGTRAP_SIGINFO)
9c02b525
PA
3281 {
3282 struct regcache *regcache = get_thread_regcache (lp->ptid);
3283 struct gdbarch *gdbarch = get_regcache_arch (regcache);
527a273a 3284 int decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4c28f408 3285
9c02b525
PA
3286 if (decr_pc != 0)
3287 {
3288 CORE_ADDR pc;
d6b0e80f 3289
9c02b525
PA
3290 pc = regcache_read_pc (regcache);
3291 regcache_write_pc (regcache, pc + decr_pc);
3292 }
3293 }
e3e9f5a2 3294
9c02b525
PA
3295 /* We'll need this to determine whether to report a SIGSTOP as
3296 GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3297 clears it. */
3298 last_resume_kind = lp->last_resume_kind;
4b60df3d 3299
fbea99ea 3300 if (!target_is_non_stop_p ())
9c02b525 3301 {
e3e9f5a2
PA
3302 /* In all-stop, from the core's perspective, all LWPs are now
3303 stopped until a new resume action is sent over. */
3304 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3305 }
3306 else
25289eb2 3307 {
4b60df3d 3308 resume_clear_callback (lp, NULL);
25289eb2 3309 }
d6b0e80f 3310
26ab7092 3311 if (linux_nat_status_is_event (status))
d6b0e80f 3312 {
d6b0e80f
AC
3313 if (debug_linux_nat)
3314 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3315 "LLW: trap ptid is %s.\n",
3316 target_pid_to_str (lp->ptid));
d6b0e80f 3317 }
d6b0e80f
AC
3318
3319 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3320 {
3321 *ourstatus = lp->waitstatus;
3322 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3323 }
3324 else
3325 store_waitstatus (ourstatus, status);
3326
01124a23 3327 if (debug_linux_nat)
b84876c2
PA
3328 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3329
7feb7d06 3330 restore_child_signals_mask (&prev_mask);
1e225492 3331
4b60df3d 3332 if (last_resume_kind == resume_stop
25289eb2
PA
3333 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3334 && WSTOPSIG (status) == SIGSTOP)
3335 {
3336 /* A thread that has been requested to stop by GDB with
3337 target_stop, and it stopped cleanly, so report as SIG0. The
3338 use of SIGSTOP is an implementation detail. */
a493e3e2 3339 ourstatus->value.sig = GDB_SIGNAL_0;
25289eb2
PA
3340 }
3341
1e225492
JK
3342 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3343 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3344 lp->core = -1;
3345 else
2e794194 3346 lp->core = linux_common_core_of_thread (lp->ptid);
1e225492 3347
f973ed9c 3348 return lp->ptid;
d6b0e80f
AC
3349}
3350
e3e9f5a2
PA
3351/* Resume LWPs that are currently stopped without any pending status
3352 to report, but are resumed from the core's perspective. */
3353
3354static int
3355resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3356{
9a3c8263 3357 ptid_t *wait_ptid_p = (ptid_t *) data;
e3e9f5a2 3358
4dd63d48
PA
3359 if (!lp->stopped)
3360 {
3361 if (debug_linux_nat)
3362 fprintf_unfiltered (gdb_stdlog,
3363 "RSRL: NOT resuming LWP %s, not stopped\n",
3364 target_pid_to_str (lp->ptid));
3365 }
3366 else if (!lp->resumed)
3367 {
3368 if (debug_linux_nat)
3369 fprintf_unfiltered (gdb_stdlog,
3370 "RSRL: NOT resuming LWP %s, not resumed\n",
3371 target_pid_to_str (lp->ptid));
3372 }
3373 else if (lwp_status_pending_p (lp))
3374 {
3375 if (debug_linux_nat)
3376 fprintf_unfiltered (gdb_stdlog,
3377 "RSRL: NOT resuming LWP %s, has pending status\n",
3378 target_pid_to_str (lp->ptid));
3379 }
3380 else
e3e9f5a2 3381 {
336060f3
PA
3382 struct regcache *regcache = get_thread_regcache (lp->ptid);
3383 struct gdbarch *gdbarch = get_regcache_arch (regcache);
336060f3 3384
23f238d3 3385 TRY
e3e9f5a2 3386 {
23f238d3
PA
3387 CORE_ADDR pc = regcache_read_pc (regcache);
3388 int leave_stopped = 0;
e3e9f5a2 3389
23f238d3
PA
3390 /* Don't bother if there's a breakpoint at PC that we'd hit
3391 immediately, and we're not waiting for this LWP. */
3392 if (!ptid_match (lp->ptid, *wait_ptid_p))
3393 {
3394 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3395 leave_stopped = 1;
3396 }
e3e9f5a2 3397
23f238d3
PA
3398 if (!leave_stopped)
3399 {
3400 if (debug_linux_nat)
3401 fprintf_unfiltered (gdb_stdlog,
3402 "RSRL: resuming stopped-resumed LWP %s at "
3403 "%s: step=%d\n",
3404 target_pid_to_str (lp->ptid),
3405 paddress (gdbarch, pc),
3406 lp->step);
3407
3408 linux_resume_one_lwp_throw (lp, lp->step, GDB_SIGNAL_0);
3409 }
3410 }
3411 CATCH (ex, RETURN_MASK_ERROR)
3412 {
3413 if (!check_ptrace_stopped_lwp_gone (lp))
3414 throw_exception (ex);
3415 }
3416 END_CATCH
e3e9f5a2
PA
3417 }
3418
3419 return 0;
3420}
3421
7feb7d06
PA
3422static ptid_t
3423linux_nat_wait (struct target_ops *ops,
47608cb1
PA
3424 ptid_t ptid, struct target_waitstatus *ourstatus,
3425 int target_options)
7feb7d06
PA
3426{
3427 ptid_t event_ptid;
3428
3429 if (debug_linux_nat)
09826ec5
PA
3430 {
3431 char *options_string;
3432
3433 options_string = target_options_to_string (target_options);
3434 fprintf_unfiltered (gdb_stdlog,
3435 "linux_nat_wait: [%s], [%s]\n",
3436 target_pid_to_str (ptid),
3437 options_string);
3438 xfree (options_string);
3439 }
7feb7d06
PA
3440
3441 /* Flush the async file first. */
d9d41e78 3442 if (target_is_async_p ())
7feb7d06
PA
3443 async_file_flush ();
3444
e3e9f5a2
PA
3445 /* Resume LWPs that are currently stopped without any pending status
3446 to report, but are resumed from the core's perspective. LWPs get
3447 in this state if we find them stopping at a time we're not
3448 interested in reporting the event (target_wait on a
3449 specific_process, for example, see linux_nat_wait_1), and
3450 meanwhile the event became uninteresting. Don't bother resuming
3451 LWPs we're not going to wait for if they'd stop immediately. */
fbea99ea 3452 if (target_is_non_stop_p ())
e3e9f5a2
PA
3453 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3454
47608cb1 3455 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
7feb7d06
PA
3456
3457 /* If we requested any event, and something came out, assume there
3458 may be more. If we requested a specific lwp or process, also
3459 assume there may be more. */
d9d41e78 3460 if (target_is_async_p ()
6953d224
PA
3461 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3462 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
7feb7d06
PA
3463 || !ptid_equal (ptid, minus_one_ptid)))
3464 async_file_mark ();
3465
7feb7d06
PA
3466 return event_ptid;
3467}
3468
d6b0e80f
AC
3469static int
3470kill_callback (struct lwp_info *lp, void *data)
3471{
ed731959
JK
3472 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3473
3474 errno = 0;
69ff6be5 3475 kill_lwp (ptid_get_lwp (lp->ptid), SIGKILL);
ed731959 3476 if (debug_linux_nat)
57745c90
PA
3477 {
3478 int save_errno = errno;
3479
3480 fprintf_unfiltered (gdb_stdlog,
3481 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
3482 target_pid_to_str (lp->ptid),
3483 save_errno ? safe_strerror (save_errno) : "OK");
3484 }
ed731959
JK
3485
3486 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3487
d6b0e80f 3488 errno = 0;
dfd4cc63 3489 ptrace (PTRACE_KILL, ptid_get_lwp (lp->ptid), 0, 0);
d6b0e80f 3490 if (debug_linux_nat)
57745c90
PA
3491 {
3492 int save_errno = errno;
3493
3494 fprintf_unfiltered (gdb_stdlog,
3495 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3496 target_pid_to_str (lp->ptid),
3497 save_errno ? safe_strerror (save_errno) : "OK");
3498 }
d6b0e80f
AC
3499
3500 return 0;
3501}
3502
3503static int
3504kill_wait_callback (struct lwp_info *lp, void *data)
3505{
3506 pid_t pid;
3507
3508 /* We must make sure that there are no pending events (delayed
3509 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3510 program doesn't interfere with any following debugging session. */
3511
d6b0e80f
AC
3512 do
3513 {
4a6ed09b 3514 pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, __WALL);
e85a822c 3515 if (pid != (pid_t) -1)
d6b0e80f 3516 {
e85a822c
DJ
3517 if (debug_linux_nat)
3518 fprintf_unfiltered (gdb_stdlog,
4a6ed09b 3519 "KWC: wait %s received unknown.\n",
e85a822c 3520 target_pid_to_str (lp->ptid));
4a6ed09b
PA
3521 /* The Linux kernel sometimes fails to kill a thread
3522 completely after PTRACE_KILL; that goes from the stop
3523 point in do_fork out to the one in get_signal_to_deliver
3524 and waits again. So kill it again. */
e85a822c 3525 kill_callback (lp, NULL);
d6b0e80f
AC
3526 }
3527 }
dfd4cc63 3528 while (pid == ptid_get_lwp (lp->ptid));
d6b0e80f
AC
3529
3530 gdb_assert (pid == -1 && errno == ECHILD);
3531 return 0;
3532}
3533
3534static void
7d85a9c0 3535linux_nat_kill (struct target_ops *ops)
d6b0e80f 3536{
f973ed9c
DJ
3537 struct target_waitstatus last;
3538 ptid_t last_ptid;
3539 int status;
d6b0e80f 3540
f973ed9c
DJ
3541 /* If we're stopped while forking and we haven't followed yet,
3542 kill the other task. We need to do this first because the
3543 parent will be sleeping if this is a vfork. */
d6b0e80f 3544
f973ed9c 3545 get_last_target_status (&last_ptid, &last);
d6b0e80f 3546
f973ed9c
DJ
3547 if (last.kind == TARGET_WAITKIND_FORKED
3548 || last.kind == TARGET_WAITKIND_VFORKED)
3549 {
dfd4cc63 3550 ptrace (PT_KILL, ptid_get_pid (last.value.related_pid), 0, 0);
f973ed9c 3551 wait (&status);
26cb8b7c
PA
3552
3553 /* Let the arch-specific native code know this process is
3554 gone. */
dfd4cc63 3555 linux_nat_forget_process (ptid_get_pid (last.value.related_pid));
f973ed9c
DJ
3556 }
3557
3558 if (forks_exist_p ())
7feb7d06 3559 linux_fork_killall ();
f973ed9c
DJ
3560 else
3561 {
d90e17a7 3562 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
e0881a8e 3563
4c28f408
PA
3564 /* Stop all threads before killing them, since ptrace requires
3565 that the thread is stopped to sucessfully PTRACE_KILL. */
d90e17a7 3566 iterate_over_lwps (ptid, stop_callback, NULL);
4c28f408
PA
3567 /* ... and wait until all of them have reported back that
3568 they're no longer running. */
d90e17a7 3569 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4c28f408 3570
f973ed9c 3571 /* Kill all LWP's ... */
d90e17a7 3572 iterate_over_lwps (ptid, kill_callback, NULL);
f973ed9c
DJ
3573
3574 /* ... and wait until we've flushed all events. */
d90e17a7 3575 iterate_over_lwps (ptid, kill_wait_callback, NULL);
f973ed9c
DJ
3576 }
3577
3578 target_mourn_inferior ();
d6b0e80f
AC
3579}
3580
3581static void
136d6dae 3582linux_nat_mourn_inferior (struct target_ops *ops)
d6b0e80f 3583{
26cb8b7c
PA
3584 int pid = ptid_get_pid (inferior_ptid);
3585
3586 purge_lwp_list (pid);
d6b0e80f 3587
f973ed9c 3588 if (! forks_exist_p ())
d90e17a7
PA
3589 /* Normal case, no other forks available. */
3590 linux_ops->to_mourn_inferior (ops);
f973ed9c
DJ
3591 else
3592 /* Multi-fork case. The current inferior_ptid has exited, but
3593 there are other viable forks to debug. Delete the exiting
3594 one and context-switch to the first available. */
3595 linux_fork_mourn_inferior ();
26cb8b7c
PA
3596
3597 /* Let the arch-specific native code know this process is gone. */
3598 linux_nat_forget_process (pid);
d6b0e80f
AC
3599}
3600
5b009018
PA
3601/* Convert a native/host siginfo object, into/from the siginfo in the
3602 layout of the inferiors' architecture. */
3603
3604static void
a5362b9a 3605siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
5b009018
PA
3606{
3607 int done = 0;
3608
3609 if (linux_nat_siginfo_fixup != NULL)
3610 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3611
3612 /* If there was no callback, or the callback didn't do anything,
3613 then just do a straight memcpy. */
3614 if (!done)
3615 {
3616 if (direction == 1)
a5362b9a 3617 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5b009018 3618 else
a5362b9a 3619 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5b009018
PA
3620 }
3621}
3622
9b409511 3623static enum target_xfer_status
4aa995e1
PA
3624linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
3625 const char *annex, gdb_byte *readbuf,
9b409511
YQ
3626 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3627 ULONGEST *xfered_len)
4aa995e1 3628{
4aa995e1 3629 int pid;
a5362b9a
TS
3630 siginfo_t siginfo;
3631 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1
PA
3632
3633 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3634 gdb_assert (readbuf || writebuf);
3635
dfd4cc63 3636 pid = ptid_get_lwp (inferior_ptid);
4aa995e1 3637 if (pid == 0)
dfd4cc63 3638 pid = ptid_get_pid (inferior_ptid);
4aa995e1
PA
3639
3640 if (offset > sizeof (siginfo))
2ed4b548 3641 return TARGET_XFER_E_IO;
4aa995e1
PA
3642
3643 errno = 0;
3644 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3645 if (errno != 0)
2ed4b548 3646 return TARGET_XFER_E_IO;
4aa995e1 3647
5b009018
PA
3648 /* When GDB is built as a 64-bit application, ptrace writes into
3649 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3650 inferior with a 64-bit GDB should look the same as debugging it
3651 with a 32-bit GDB, we need to convert it. GDB core always sees
3652 the converted layout, so any read/write will have to be done
3653 post-conversion. */
3654 siginfo_fixup (&siginfo, inf_siginfo, 0);
3655
4aa995e1
PA
3656 if (offset + len > sizeof (siginfo))
3657 len = sizeof (siginfo) - offset;
3658
3659 if (readbuf != NULL)
5b009018 3660 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
3661 else
3662 {
5b009018
PA
3663 memcpy (inf_siginfo + offset, writebuf, len);
3664
3665 /* Convert back to ptrace layout before flushing it out. */
3666 siginfo_fixup (&siginfo, inf_siginfo, 1);
3667
4aa995e1
PA
3668 errno = 0;
3669 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3670 if (errno != 0)
2ed4b548 3671 return TARGET_XFER_E_IO;
4aa995e1
PA
3672 }
3673
9b409511
YQ
3674 *xfered_len = len;
3675 return TARGET_XFER_OK;
4aa995e1
PA
3676}
3677
9b409511 3678static enum target_xfer_status
10d6c8cd
DJ
3679linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3680 const char *annex, gdb_byte *readbuf,
3681 const gdb_byte *writebuf,
9b409511 3682 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
d6b0e80f 3683{
4aa995e1 3684 struct cleanup *old_chain;
9b409511 3685 enum target_xfer_status xfer;
d6b0e80f 3686
4aa995e1
PA
3687 if (object == TARGET_OBJECT_SIGNAL_INFO)
3688 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
9b409511 3689 offset, len, xfered_len);
4aa995e1 3690
c35b1492
PA
3691 /* The target is connected but no live inferior is selected. Pass
3692 this request down to a lower stratum (e.g., the executable
3693 file). */
3694 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
9b409511 3695 return TARGET_XFER_EOF;
c35b1492 3696
4aa995e1
PA
3697 old_chain = save_inferior_ptid ();
3698
dfd4cc63
LM
3699 if (ptid_lwp_p (inferior_ptid))
3700 inferior_ptid = pid_to_ptid (ptid_get_lwp (inferior_ptid));
d6b0e80f 3701
10d6c8cd 3702 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511 3703 offset, len, xfered_len);
d6b0e80f
AC
3704
3705 do_cleanups (old_chain);
3706 return xfer;
3707}
3708
28439f5e
PA
3709static int
3710linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
3711{
4a6ed09b
PA
3712 /* As long as a PTID is in lwp list, consider it alive. */
3713 return find_lwp_pid (ptid) != NULL;
28439f5e
PA
3714}
3715
8a06aea7
PA
3716/* Implement the to_update_thread_list target method for this
3717 target. */
3718
3719static void
3720linux_nat_update_thread_list (struct target_ops *ops)
3721{
a6904d5a
PA
3722 struct lwp_info *lwp;
3723
4a6ed09b
PA
3724 /* We add/delete threads from the list as clone/exit events are
3725 processed, so just try deleting exited threads still in the
3726 thread list. */
3727 delete_exited_threads ();
a6904d5a
PA
3728
3729 /* Update the processor core that each lwp/thread was last seen
3730 running on. */
3731 ALL_LWPS (lwp)
3732 lwp->core = linux_common_core_of_thread (lwp->ptid);
8a06aea7
PA
3733}
3734
d6b0e80f 3735static char *
117de6a9 3736linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
d6b0e80f
AC
3737{
3738 static char buf[64];
3739
dfd4cc63
LM
3740 if (ptid_lwp_p (ptid)
3741 && (ptid_get_pid (ptid) != ptid_get_lwp (ptid)
3742 || num_lwps (ptid_get_pid (ptid)) > 1))
d6b0e80f 3743 {
dfd4cc63 3744 snprintf (buf, sizeof (buf), "LWP %ld", ptid_get_lwp (ptid));
d6b0e80f
AC
3745 return buf;
3746 }
3747
3748 return normal_pid_to_str (ptid);
3749}
3750
73ede765 3751static const char *
503a628d 3752linux_nat_thread_name (struct target_ops *self, struct thread_info *thr)
4694da01 3753{
79efa585 3754 return linux_proc_tid_get_name (thr->ptid);
4694da01
TT
3755}
3756
dba24537
AC
3757/* Accepts an integer PID; Returns a string representing a file that
3758 can be opened to get the symbols for the child process. */
3759
6d8fd2b7 3760static char *
8dd27370 3761linux_child_pid_to_exec_file (struct target_ops *self, int pid)
dba24537 3762{
e0d86d2c 3763 return linux_proc_pid_to_exec_file (pid);
dba24537
AC
3764}
3765
10d6c8cd
DJ
3766/* Implement the to_xfer_partial interface for memory reads using the /proc
3767 filesystem. Because we can use a single read() call for /proc, this
3768 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3769 but it doesn't support writes. */
3770
9b409511 3771static enum target_xfer_status
10d6c8cd
DJ
3772linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3773 const char *annex, gdb_byte *readbuf,
3774 const gdb_byte *writebuf,
9b409511 3775 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
dba24537 3776{
10d6c8cd
DJ
3777 LONGEST ret;
3778 int fd;
dba24537
AC
3779 char filename[64];
3780
10d6c8cd 3781 if (object != TARGET_OBJECT_MEMORY || !readbuf)
f486487f 3782 return TARGET_XFER_EOF;
dba24537
AC
3783
3784 /* Don't bother for one word. */
3785 if (len < 3 * sizeof (long))
9b409511 3786 return TARGET_XFER_EOF;
dba24537
AC
3787
3788 /* We could keep this file open and cache it - possibly one per
3789 thread. That requires some juggling, but is even faster. */
cde33bf1
YQ
3790 xsnprintf (filename, sizeof filename, "/proc/%d/mem",
3791 ptid_get_pid (inferior_ptid));
614c279d 3792 fd = gdb_open_cloexec (filename, O_RDONLY | O_LARGEFILE, 0);
dba24537 3793 if (fd == -1)
9b409511 3794 return TARGET_XFER_EOF;
dba24537
AC
3795
3796 /* If pread64 is available, use it. It's faster if the kernel
3797 supports it (only one syscall), and it's 64-bit safe even on
3798 32-bit platforms (for instance, SPARC debugging a SPARC64
3799 application). */
3800#ifdef HAVE_PREAD64
10d6c8cd 3801 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 3802#else
10d6c8cd 3803 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
3804#endif
3805 ret = 0;
3806 else
3807 ret = len;
3808
3809 close (fd);
9b409511
YQ
3810
3811 if (ret == 0)
3812 return TARGET_XFER_EOF;
3813 else
3814 {
3815 *xfered_len = ret;
3816 return TARGET_XFER_OK;
3817 }
dba24537
AC
3818}
3819
efcbbd14
UW
3820
3821/* Enumerate spufs IDs for process PID. */
3822static LONGEST
b55e14c7 3823spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, ULONGEST len)
efcbbd14 3824{
f5656ead 3825 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
efcbbd14
UW
3826 LONGEST pos = 0;
3827 LONGEST written = 0;
3828 char path[128];
3829 DIR *dir;
3830 struct dirent *entry;
3831
3832 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
3833 dir = opendir (path);
3834 if (!dir)
3835 return -1;
3836
3837 rewinddir (dir);
3838 while ((entry = readdir (dir)) != NULL)
3839 {
3840 struct stat st;
3841 struct statfs stfs;
3842 int fd;
3843
3844 fd = atoi (entry->d_name);
3845 if (!fd)
3846 continue;
3847
3848 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
3849 if (stat (path, &st) != 0)
3850 continue;
3851 if (!S_ISDIR (st.st_mode))
3852 continue;
3853
3854 if (statfs (path, &stfs) != 0)
3855 continue;
3856 if (stfs.f_type != SPUFS_MAGIC)
3857 continue;
3858
3859 if (pos >= offset && pos + 4 <= offset + len)
3860 {
3861 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
3862 written += 4;
3863 }
3864 pos += 4;
3865 }
3866
3867 closedir (dir);
3868 return written;
3869}
3870
3871/* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
3872 object type, using the /proc file system. */
9b409511
YQ
3873
3874static enum target_xfer_status
efcbbd14
UW
3875linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
3876 const char *annex, gdb_byte *readbuf,
3877 const gdb_byte *writebuf,
9b409511 3878 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
efcbbd14
UW
3879{
3880 char buf[128];
3881 int fd = 0;
3882 int ret = -1;
dfd4cc63 3883 int pid = ptid_get_pid (inferior_ptid);
efcbbd14
UW
3884
3885 if (!annex)
3886 {
3887 if (!readbuf)
2ed4b548 3888 return TARGET_XFER_E_IO;
efcbbd14 3889 else
9b409511
YQ
3890 {
3891 LONGEST l = spu_enumerate_spu_ids (pid, readbuf, offset, len);
3892
3893 if (l < 0)
3894 return TARGET_XFER_E_IO;
3895 else if (l == 0)
3896 return TARGET_XFER_EOF;
3897 else
3898 {
3899 *xfered_len = (ULONGEST) l;
3900 return TARGET_XFER_OK;
3901 }
3902 }
efcbbd14
UW
3903 }
3904
3905 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
614c279d 3906 fd = gdb_open_cloexec (buf, writebuf? O_WRONLY : O_RDONLY, 0);
efcbbd14 3907 if (fd <= 0)
2ed4b548 3908 return TARGET_XFER_E_IO;
efcbbd14
UW
3909
3910 if (offset != 0
3911 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
3912 {
3913 close (fd);
9b409511 3914 return TARGET_XFER_EOF;
efcbbd14
UW
3915 }
3916
3917 if (writebuf)
3918 ret = write (fd, writebuf, (size_t) len);
3919 else if (readbuf)
3920 ret = read (fd, readbuf, (size_t) len);
3921
3922 close (fd);
9b409511
YQ
3923
3924 if (ret < 0)
3925 return TARGET_XFER_E_IO;
3926 else if (ret == 0)
3927 return TARGET_XFER_EOF;
3928 else
3929 {
3930 *xfered_len = (ULONGEST) ret;
3931 return TARGET_XFER_OK;
3932 }
efcbbd14
UW
3933}
3934
3935
dba24537
AC
3936/* Parse LINE as a signal set and add its set bits to SIGS. */
3937
3938static void
3939add_line_to_sigset (const char *line, sigset_t *sigs)
3940{
3941 int len = strlen (line) - 1;
3942 const char *p;
3943 int signum;
3944
3945 if (line[len] != '\n')
8a3fe4f8 3946 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3947
3948 p = line;
3949 signum = len * 4;
3950 while (len-- > 0)
3951 {
3952 int digit;
3953
3954 if (*p >= '0' && *p <= '9')
3955 digit = *p - '0';
3956 else if (*p >= 'a' && *p <= 'f')
3957 digit = *p - 'a' + 10;
3958 else
8a3fe4f8 3959 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3960
3961 signum -= 4;
3962
3963 if (digit & 1)
3964 sigaddset (sigs, signum + 1);
3965 if (digit & 2)
3966 sigaddset (sigs, signum + 2);
3967 if (digit & 4)
3968 sigaddset (sigs, signum + 3);
3969 if (digit & 8)
3970 sigaddset (sigs, signum + 4);
3971
3972 p++;
3973 }
3974}
3975
3976/* Find process PID's pending signals from /proc/pid/status and set
3977 SIGS to match. */
3978
3979void
3e43a32a
MS
3980linux_proc_pending_signals (int pid, sigset_t *pending,
3981 sigset_t *blocked, sigset_t *ignored)
dba24537
AC
3982{
3983 FILE *procfile;
d8d2a3ee 3984 char buffer[PATH_MAX], fname[PATH_MAX];
7c8a8b04 3985 struct cleanup *cleanup;
dba24537
AC
3986
3987 sigemptyset (pending);
3988 sigemptyset (blocked);
3989 sigemptyset (ignored);
cde33bf1 3990 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
614c279d 3991 procfile = gdb_fopen_cloexec (fname, "r");
dba24537 3992 if (procfile == NULL)
8a3fe4f8 3993 error (_("Could not open %s"), fname);
7c8a8b04 3994 cleanup = make_cleanup_fclose (procfile);
dba24537 3995
d8d2a3ee 3996 while (fgets (buffer, PATH_MAX, procfile) != NULL)
dba24537
AC
3997 {
3998 /* Normal queued signals are on the SigPnd line in the status
3999 file. However, 2.6 kernels also have a "shared" pending
4000 queue for delivering signals to a thread group, so check for
4001 a ShdPnd line also.
4002
4003 Unfortunately some Red Hat kernels include the shared pending
4004 queue but not the ShdPnd status field. */
4005
61012eef 4006 if (startswith (buffer, "SigPnd:\t"))
dba24537 4007 add_line_to_sigset (buffer + 8, pending);
61012eef 4008 else if (startswith (buffer, "ShdPnd:\t"))
dba24537 4009 add_line_to_sigset (buffer + 8, pending);
61012eef 4010 else if (startswith (buffer, "SigBlk:\t"))
dba24537 4011 add_line_to_sigset (buffer + 8, blocked);
61012eef 4012 else if (startswith (buffer, "SigIgn:\t"))
dba24537
AC
4013 add_line_to_sigset (buffer + 8, ignored);
4014 }
4015
7c8a8b04 4016 do_cleanups (cleanup);
dba24537
AC
4017}
4018
9b409511 4019static enum target_xfer_status
07e059b5 4020linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
e0881a8e 4021 const char *annex, gdb_byte *readbuf,
9b409511
YQ
4022 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4023 ULONGEST *xfered_len)
07e059b5 4024{
07e059b5
VP
4025 gdb_assert (object == TARGET_OBJECT_OSDATA);
4026
9b409511
YQ
4027 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4028 if (*xfered_len == 0)
4029 return TARGET_XFER_EOF;
4030 else
4031 return TARGET_XFER_OK;
07e059b5
VP
4032}
4033
9b409511 4034static enum target_xfer_status
10d6c8cd
DJ
4035linux_xfer_partial (struct target_ops *ops, enum target_object object,
4036 const char *annex, gdb_byte *readbuf,
9b409511
YQ
4037 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4038 ULONGEST *xfered_len)
10d6c8cd 4039{
9b409511 4040 enum target_xfer_status xfer;
10d6c8cd
DJ
4041
4042 if (object == TARGET_OBJECT_AUXV)
9f2982ff 4043 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
9b409511 4044 offset, len, xfered_len);
10d6c8cd 4045
07e059b5
VP
4046 if (object == TARGET_OBJECT_OSDATA)
4047 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
9b409511 4048 offset, len, xfered_len);
07e059b5 4049
efcbbd14
UW
4050 if (object == TARGET_OBJECT_SPU)
4051 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
9b409511 4052 offset, len, xfered_len);
efcbbd14 4053
8f313923
JK
4054 /* GDB calculates all the addresses in possibly larget width of the address.
4055 Address width needs to be masked before its final use - either by
4056 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4057
4058 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4059
4060 if (object == TARGET_OBJECT_MEMORY)
4061 {
f5656ead 4062 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
8f313923
JK
4063
4064 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4065 offset &= ((ULONGEST) 1 << addr_bit) - 1;
4066 }
4067
10d6c8cd 4068 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511
YQ
4069 offset, len, xfered_len);
4070 if (xfer != TARGET_XFER_EOF)
10d6c8cd
DJ
4071 return xfer;
4072
4073 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511 4074 offset, len, xfered_len);
10d6c8cd
DJ
4075}
4076
5808517f
YQ
4077static void
4078cleanup_target_stop (void *arg)
4079{
4080 ptid_t *ptid = (ptid_t *) arg;
4081
4082 gdb_assert (arg != NULL);
4083
4084 /* Unpause all */
a493e3e2 4085 target_resume (*ptid, 0, GDB_SIGNAL_0);
5808517f
YQ
4086}
4087
4088static VEC(static_tracepoint_marker_p) *
c686c57f
TT
4089linux_child_static_tracepoint_markers_by_strid (struct target_ops *self,
4090 const char *strid)
5808517f
YQ
4091{
4092 char s[IPA_CMD_BUF_SIZE];
4093 struct cleanup *old_chain;
4094 int pid = ptid_get_pid (inferior_ptid);
4095 VEC(static_tracepoint_marker_p) *markers = NULL;
4096 struct static_tracepoint_marker *marker = NULL;
4097 char *p = s;
4098 ptid_t ptid = ptid_build (pid, 0, 0);
4099
4100 /* Pause all */
4101 target_stop (ptid);
4102
4103 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4104 s[sizeof ("qTfSTM")] = 0;
4105
42476b70 4106 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4107
4108 old_chain = make_cleanup (free_current_marker, &marker);
4109 make_cleanup (cleanup_target_stop, &ptid);
4110
4111 while (*p++ == 'm')
4112 {
4113 if (marker == NULL)
4114 marker = XCNEW (struct static_tracepoint_marker);
4115
4116 do
4117 {
4118 parse_static_tracepoint_marker_definition (p, &p, marker);
4119
4120 if (strid == NULL || strcmp (strid, marker->str_id) == 0)
4121 {
4122 VEC_safe_push (static_tracepoint_marker_p,
4123 markers, marker);
4124 marker = NULL;
4125 }
4126 else
4127 {
4128 release_static_tracepoint_marker (marker);
4129 memset (marker, 0, sizeof (*marker));
4130 }
4131 }
4132 while (*p++ == ','); /* comma-separated list */
4133
4134 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4135 s[sizeof ("qTsSTM")] = 0;
42476b70 4136 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4137 p = s;
4138 }
4139
4140 do_cleanups (old_chain);
4141
4142 return markers;
4143}
4144
e9efe249 4145/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
4146 it with local methods. */
4147
910122bf
UW
4148static void
4149linux_target_install_ops (struct target_ops *t)
10d6c8cd 4150{
6d8fd2b7 4151 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
eb73ad13 4152 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
6d8fd2b7 4153 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
eb73ad13 4154 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
6d8fd2b7 4155 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
eb73ad13 4156 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
a96d9b2e 4157 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
6d8fd2b7 4158 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 4159 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
4160 t->to_post_attach = linux_child_post_attach;
4161 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
4162
4163 super_xfer_partial = t->to_xfer_partial;
4164 t->to_xfer_partial = linux_xfer_partial;
5808517f
YQ
4165
4166 t->to_static_tracepoint_markers_by_strid
4167 = linux_child_static_tracepoint_markers_by_strid;
910122bf
UW
4168}
4169
4170struct target_ops *
4171linux_target (void)
4172{
4173 struct target_ops *t;
4174
4175 t = inf_ptrace_target ();
4176 linux_target_install_ops (t);
4177
4178 return t;
4179}
4180
4181struct target_ops *
7714d83a 4182linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
4183{
4184 struct target_ops *t;
4185
4186 t = inf_ptrace_trad_target (register_u_offset);
4187 linux_target_install_ops (t);
10d6c8cd 4188
10d6c8cd
DJ
4189 return t;
4190}
4191
b84876c2
PA
4192/* target_is_async_p implementation. */
4193
4194static int
6a109b6b 4195linux_nat_is_async_p (struct target_ops *ops)
b84876c2 4196{
198297aa 4197 return linux_is_async_p ();
b84876c2
PA
4198}
4199
4200/* target_can_async_p implementation. */
4201
4202static int
6a109b6b 4203linux_nat_can_async_p (struct target_ops *ops)
b84876c2
PA
4204{
4205 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 4206 it explicitly with the "set target-async" command.
b84876c2 4207 Someday, linux will always be async. */
3dd5b83d 4208 return target_async_permitted;
b84876c2
PA
4209}
4210
9908b566 4211static int
2a9a2795 4212linux_nat_supports_non_stop (struct target_ops *self)
9908b566
VP
4213{
4214 return 1;
4215}
4216
fbea99ea
PA
4217/* to_always_non_stop_p implementation. */
4218
4219static int
4220linux_nat_always_non_stop_p (struct target_ops *self)
4221{
f12899e9 4222 return 1;
fbea99ea
PA
4223}
4224
d90e17a7
PA
4225/* True if we want to support multi-process. To be removed when GDB
4226 supports multi-exec. */
4227
2277426b 4228int linux_multi_process = 1;
d90e17a7
PA
4229
4230static int
86ce2668 4231linux_nat_supports_multi_process (struct target_ops *self)
d90e17a7
PA
4232{
4233 return linux_multi_process;
4234}
4235
03583c20 4236static int
2bfc0540 4237linux_nat_supports_disable_randomization (struct target_ops *self)
03583c20
UW
4238{
4239#ifdef HAVE_PERSONALITY
4240 return 1;
4241#else
4242 return 0;
4243#endif
4244}
4245
b84876c2
PA
4246static int async_terminal_is_ours = 1;
4247
4d4ca2a1
DE
4248/* target_terminal_inferior implementation.
4249
4250 This is a wrapper around child_terminal_inferior to add async support. */
b84876c2
PA
4251
4252static void
d2f640d4 4253linux_nat_terminal_inferior (struct target_ops *self)
b84876c2 4254{
d6b64346 4255 child_terminal_inferior (self);
b84876c2 4256
d9d2d8b6 4257 /* Calls to target_terminal_*() are meant to be idempotent. */
b84876c2
PA
4258 if (!async_terminal_is_ours)
4259 return;
4260
4261 delete_file_handler (input_fd);
4262 async_terminal_is_ours = 0;
4263 set_sigint_trap ();
4264}
4265
4d4ca2a1
DE
4266/* target_terminal_ours implementation.
4267
4268 This is a wrapper around child_terminal_ours to add async support (and
4269 implement the target_terminal_ours vs target_terminal_ours_for_output
4270 distinction). child_terminal_ours is currently no different than
4271 child_terminal_ours_for_output.
4272 We leave target_terminal_ours_for_output alone, leaving it to
4273 child_terminal_ours_for_output. */
b84876c2 4274
2c0b251b 4275static void
e3594fd1 4276linux_nat_terminal_ours (struct target_ops *self)
b84876c2 4277{
b84876c2
PA
4278 /* GDB should never give the terminal to the inferior if the
4279 inferior is running in the background (run&, continue&, etc.),
4280 but claiming it sure should. */
d6b64346 4281 child_terminal_ours (self);
b84876c2 4282
b84876c2
PA
4283 if (async_terminal_is_ours)
4284 return;
4285
4286 clear_sigint_trap ();
4287 add_file_handler (input_fd, stdin_event_handler, 0);
4288 async_terminal_is_ours = 1;
4289}
4290
7feb7d06
PA
4291/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4292 so we notice when any child changes state, and notify the
4293 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4294 above to wait for the arrival of a SIGCHLD. */
4295
b84876c2 4296static void
7feb7d06 4297sigchld_handler (int signo)
b84876c2 4298{
7feb7d06
PA
4299 int old_errno = errno;
4300
01124a23
DE
4301 if (debug_linux_nat)
4302 ui_file_write_async_safe (gdb_stdlog,
4303 "sigchld\n", sizeof ("sigchld\n") - 1);
7feb7d06
PA
4304
4305 if (signo == SIGCHLD
4306 && linux_nat_event_pipe[0] != -1)
4307 async_file_mark (); /* Let the event loop know that there are
4308 events to handle. */
4309
4310 errno = old_errno;
4311}
4312
4313/* Callback registered with the target events file descriptor. */
4314
4315static void
4316handle_target_event (int error, gdb_client_data client_data)
4317{
6a3753b3 4318 inferior_event_handler (INF_REG_EVENT, NULL);
7feb7d06
PA
4319}
4320
4321/* Create/destroy the target events pipe. Returns previous state. */
4322
4323static int
4324linux_async_pipe (int enable)
4325{
198297aa 4326 int previous = linux_is_async_p ();
7feb7d06
PA
4327
4328 if (previous != enable)
4329 {
4330 sigset_t prev_mask;
4331
12696c10
PA
4332 /* Block child signals while we create/destroy the pipe, as
4333 their handler writes to it. */
7feb7d06
PA
4334 block_child_signals (&prev_mask);
4335
4336 if (enable)
4337 {
614c279d 4338 if (gdb_pipe_cloexec (linux_nat_event_pipe) == -1)
7feb7d06
PA
4339 internal_error (__FILE__, __LINE__,
4340 "creating event pipe failed.");
4341
4342 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4343 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4344 }
4345 else
4346 {
4347 close (linux_nat_event_pipe[0]);
4348 close (linux_nat_event_pipe[1]);
4349 linux_nat_event_pipe[0] = -1;
4350 linux_nat_event_pipe[1] = -1;
4351 }
4352
4353 restore_child_signals_mask (&prev_mask);
4354 }
4355
4356 return previous;
b84876c2
PA
4357}
4358
4359/* target_async implementation. */
4360
4361static void
6a3753b3 4362linux_nat_async (struct target_ops *ops, int enable)
b84876c2 4363{
6a3753b3 4364 if (enable)
b84876c2 4365 {
7feb7d06
PA
4366 if (!linux_async_pipe (1))
4367 {
4368 add_file_handler (linux_nat_event_pipe[0],
4369 handle_target_event, NULL);
4370 /* There may be pending events to handle. Tell the event loop
4371 to poll them. */
4372 async_file_mark ();
4373 }
b84876c2
PA
4374 }
4375 else
4376 {
b84876c2 4377 delete_file_handler (linux_nat_event_pipe[0]);
7feb7d06 4378 linux_async_pipe (0);
b84876c2
PA
4379 }
4380 return;
4381}
4382
a493e3e2 4383/* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
252fbfc8
PA
4384 event came out. */
4385
4c28f408 4386static int
252fbfc8 4387linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4c28f408 4388{
d90e17a7 4389 if (!lwp->stopped)
252fbfc8 4390 {
d90e17a7
PA
4391 if (debug_linux_nat)
4392 fprintf_unfiltered (gdb_stdlog,
4393 "LNSL: running -> suspending %s\n",
4394 target_pid_to_str (lwp->ptid));
252fbfc8 4395
252fbfc8 4396
25289eb2
PA
4397 if (lwp->last_resume_kind == resume_stop)
4398 {
4399 if (debug_linux_nat)
4400 fprintf_unfiltered (gdb_stdlog,
4401 "linux-nat: already stopping LWP %ld at "
4402 "GDB's request\n",
4403 ptid_get_lwp (lwp->ptid));
4404 return 0;
4405 }
252fbfc8 4406
25289eb2
PA
4407 stop_callback (lwp, NULL);
4408 lwp->last_resume_kind = resume_stop;
d90e17a7
PA
4409 }
4410 else
4411 {
4412 /* Already known to be stopped; do nothing. */
252fbfc8 4413
d90e17a7
PA
4414 if (debug_linux_nat)
4415 {
e09875d4 4416 if (find_thread_ptid (lwp->ptid)->stop_requested)
3e43a32a
MS
4417 fprintf_unfiltered (gdb_stdlog,
4418 "LNSL: already stopped/stop_requested %s\n",
d90e17a7
PA
4419 target_pid_to_str (lwp->ptid));
4420 else
3e43a32a
MS
4421 fprintf_unfiltered (gdb_stdlog,
4422 "LNSL: already stopped/no "
4423 "stop_requested yet %s\n",
d90e17a7 4424 target_pid_to_str (lwp->ptid));
252fbfc8
PA
4425 }
4426 }
4c28f408
PA
4427 return 0;
4428}
4429
4430static void
1eab8a48 4431linux_nat_stop (struct target_ops *self, ptid_t ptid)
4c28f408 4432{
bfedc46a
PA
4433 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4434}
4435
4436static void
4437linux_nat_interrupt (struct target_ops *self, ptid_t ptid)
4438{
4439 if (non_stop)
d90e17a7 4440 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4c28f408 4441 else
bfedc46a 4442 linux_ops->to_interrupt (linux_ops, ptid);
4c28f408
PA
4443}
4444
d90e17a7 4445static void
de90e03d 4446linux_nat_close (struct target_ops *self)
d90e17a7
PA
4447{
4448 /* Unregister from the event loop. */
9debeba0 4449 if (linux_nat_is_async_p (self))
6a3753b3 4450 linux_nat_async (self, 0);
d90e17a7 4451
d90e17a7 4452 if (linux_ops->to_close)
de90e03d 4453 linux_ops->to_close (linux_ops);
6a3cb8e8
PA
4454
4455 super_close (self);
d90e17a7
PA
4456}
4457
c0694254
PA
4458/* When requests are passed down from the linux-nat layer to the
4459 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4460 used. The address space pointer is stored in the inferior object,
4461 but the common code that is passed such ptid can't tell whether
4462 lwpid is a "main" process id or not (it assumes so). We reverse
4463 look up the "main" process id from the lwp here. */
4464
70221824 4465static struct address_space *
c0694254
PA
4466linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
4467{
4468 struct lwp_info *lwp;
4469 struct inferior *inf;
4470 int pid;
4471
dfd4cc63 4472 if (ptid_get_lwp (ptid) == 0)
c0694254
PA
4473 {
4474 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4475 tgid. */
4476 lwp = find_lwp_pid (ptid);
dfd4cc63 4477 pid = ptid_get_pid (lwp->ptid);
c0694254
PA
4478 }
4479 else
4480 {
4481 /* A (pid,lwpid,0) ptid. */
dfd4cc63 4482 pid = ptid_get_pid (ptid);
c0694254
PA
4483 }
4484
4485 inf = find_inferior_pid (pid);
4486 gdb_assert (inf != NULL);
4487 return inf->aspace;
4488}
4489
dc146f7c
VP
4490/* Return the cached value of the processor core for thread PTID. */
4491
70221824 4492static int
dc146f7c
VP
4493linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
4494{
4495 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 4496
dc146f7c
VP
4497 if (info)
4498 return info->core;
4499 return -1;
4500}
4501
7a6a1731
GB
4502/* Implementation of to_filesystem_is_local. */
4503
4504static int
4505linux_nat_filesystem_is_local (struct target_ops *ops)
4506{
4507 struct inferior *inf = current_inferior ();
4508
4509 if (inf->fake_pid_p || inf->pid == 0)
4510 return 1;
4511
4512 return linux_ns_same (inf->pid, LINUX_NS_MNT);
4513}
4514
4515/* Convert the INF argument passed to a to_fileio_* method
4516 to a process ID suitable for passing to its corresponding
4517 linux_mntns_* function. If INF is non-NULL then the
4518 caller is requesting the filesystem seen by INF. If INF
4519 is NULL then the caller is requesting the filesystem seen
4520 by the GDB. We fall back to GDB's filesystem in the case
4521 that INF is non-NULL but its PID is unknown. */
4522
4523static pid_t
4524linux_nat_fileio_pid_of (struct inferior *inf)
4525{
4526 if (inf == NULL || inf->fake_pid_p || inf->pid == 0)
4527 return getpid ();
4528 else
4529 return inf->pid;
4530}
4531
4532/* Implementation of to_fileio_open. */
4533
4534static int
4535linux_nat_fileio_open (struct target_ops *self,
4536 struct inferior *inf, const char *filename,
4313b8c0
GB
4537 int flags, int mode, int warn_if_slow,
4538 int *target_errno)
7a6a1731
GB
4539{
4540 int nat_flags;
4541 mode_t nat_mode;
4542 int fd;
4543
4544 if (fileio_to_host_openflags (flags, &nat_flags) == -1
4545 || fileio_to_host_mode (mode, &nat_mode) == -1)
4546 {
4547 *target_errno = FILEIO_EINVAL;
4548 return -1;
4549 }
4550
4551 fd = linux_mntns_open_cloexec (linux_nat_fileio_pid_of (inf),
4552 filename, nat_flags, nat_mode);
4553 if (fd == -1)
4554 *target_errno = host_to_fileio_error (errno);
4555
4556 return fd;
4557}
4558
4559/* Implementation of to_fileio_readlink. */
4560
4561static char *
4562linux_nat_fileio_readlink (struct target_ops *self,
4563 struct inferior *inf, const char *filename,
4564 int *target_errno)
4565{
4566 char buf[PATH_MAX];
4567 int len;
4568 char *ret;
4569
4570 len = linux_mntns_readlink (linux_nat_fileio_pid_of (inf),
4571 filename, buf, sizeof (buf));
4572 if (len < 0)
4573 {
4574 *target_errno = host_to_fileio_error (errno);
4575 return NULL;
4576 }
4577
224c3ddb 4578 ret = (char *) xmalloc (len + 1);
7a6a1731
GB
4579 memcpy (ret, buf, len);
4580 ret[len] = '\0';
4581 return ret;
4582}
4583
4584/* Implementation of to_fileio_unlink. */
4585
4586static int
4587linux_nat_fileio_unlink (struct target_ops *self,
4588 struct inferior *inf, const char *filename,
4589 int *target_errno)
4590{
4591 int ret;
4592
4593 ret = linux_mntns_unlink (linux_nat_fileio_pid_of (inf),
4594 filename);
4595 if (ret == -1)
4596 *target_errno = host_to_fileio_error (errno);
4597
4598 return ret;
4599}
4600
f973ed9c
DJ
4601void
4602linux_nat_add_target (struct target_ops *t)
4603{
f973ed9c
DJ
4604 /* Save the provided single-threaded target. We save this in a separate
4605 variable because another target we've inherited from (e.g. inf-ptrace)
4606 may have saved a pointer to T; we want to use it for the final
4607 process stratum target. */
4608 linux_ops_saved = *t;
4609 linux_ops = &linux_ops_saved;
4610
4611 /* Override some methods for multithreading. */
b84876c2 4612 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
4613 t->to_attach = linux_nat_attach;
4614 t->to_detach = linux_nat_detach;
4615 t->to_resume = linux_nat_resume;
4616 t->to_wait = linux_nat_wait;
2455069d 4617 t->to_pass_signals = linux_nat_pass_signals;
f973ed9c
DJ
4618 t->to_xfer_partial = linux_nat_xfer_partial;
4619 t->to_kill = linux_nat_kill;
4620 t->to_mourn_inferior = linux_nat_mourn_inferior;
4621 t->to_thread_alive = linux_nat_thread_alive;
8a06aea7 4622 t->to_update_thread_list = linux_nat_update_thread_list;
f973ed9c 4623 t->to_pid_to_str = linux_nat_pid_to_str;
4694da01 4624 t->to_thread_name = linux_nat_thread_name;
f973ed9c 4625 t->to_has_thread_control = tc_schedlock;
c0694254 4626 t->to_thread_address_space = linux_nat_thread_address_space;
ebec9a0f
PA
4627 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
4628 t->to_stopped_data_address = linux_nat_stopped_data_address;
faf09f01
PA
4629 t->to_stopped_by_sw_breakpoint = linux_nat_stopped_by_sw_breakpoint;
4630 t->to_supports_stopped_by_sw_breakpoint = linux_nat_supports_stopped_by_sw_breakpoint;
4631 t->to_stopped_by_hw_breakpoint = linux_nat_stopped_by_hw_breakpoint;
4632 t->to_supports_stopped_by_hw_breakpoint = linux_nat_supports_stopped_by_hw_breakpoint;
f973ed9c 4633
b84876c2
PA
4634 t->to_can_async_p = linux_nat_can_async_p;
4635 t->to_is_async_p = linux_nat_is_async_p;
9908b566 4636 t->to_supports_non_stop = linux_nat_supports_non_stop;
fbea99ea 4637 t->to_always_non_stop_p = linux_nat_always_non_stop_p;
b84876c2 4638 t->to_async = linux_nat_async;
b84876c2
PA
4639 t->to_terminal_inferior = linux_nat_terminal_inferior;
4640 t->to_terminal_ours = linux_nat_terminal_ours;
6a3cb8e8
PA
4641
4642 super_close = t->to_close;
d90e17a7 4643 t->to_close = linux_nat_close;
b84876c2 4644
4c28f408 4645 t->to_stop = linux_nat_stop;
bfedc46a 4646 t->to_interrupt = linux_nat_interrupt;
4c28f408 4647
d90e17a7
PA
4648 t->to_supports_multi_process = linux_nat_supports_multi_process;
4649
03583c20
UW
4650 t->to_supports_disable_randomization
4651 = linux_nat_supports_disable_randomization;
4652
dc146f7c
VP
4653 t->to_core_of_thread = linux_nat_core_of_thread;
4654
7a6a1731
GB
4655 t->to_filesystem_is_local = linux_nat_filesystem_is_local;
4656 t->to_fileio_open = linux_nat_fileio_open;
4657 t->to_fileio_readlink = linux_nat_fileio_readlink;
4658 t->to_fileio_unlink = linux_nat_fileio_unlink;
4659
f973ed9c
DJ
4660 /* We don't change the stratum; this target will sit at
4661 process_stratum and thread_db will set at thread_stratum. This
4662 is a little strange, since this is a multi-threaded-capable
4663 target, but we want to be on the stack below thread_db, and we
4664 also want to be used for single-threaded processes. */
4665
4666 add_target (t);
f973ed9c
DJ
4667}
4668
9f0bdab8
DJ
4669/* Register a method to call whenever a new thread is attached. */
4670void
7b50312a
PA
4671linux_nat_set_new_thread (struct target_ops *t,
4672 void (*new_thread) (struct lwp_info *))
9f0bdab8
DJ
4673{
4674 /* Save the pointer. We only support a single registered instance
4675 of the GNU/Linux native target, so we do not need to map this to
4676 T. */
4677 linux_nat_new_thread = new_thread;
4678}
4679
26cb8b7c
PA
4680/* See declaration in linux-nat.h. */
4681
4682void
4683linux_nat_set_new_fork (struct target_ops *t,
4684 linux_nat_new_fork_ftype *new_fork)
4685{
4686 /* Save the pointer. */
4687 linux_nat_new_fork = new_fork;
4688}
4689
4690/* See declaration in linux-nat.h. */
4691
4692void
4693linux_nat_set_forget_process (struct target_ops *t,
4694 linux_nat_forget_process_ftype *fn)
4695{
4696 /* Save the pointer. */
4697 linux_nat_forget_process_hook = fn;
4698}
4699
4700/* See declaration in linux-nat.h. */
4701
4702void
4703linux_nat_forget_process (pid_t pid)
4704{
4705 if (linux_nat_forget_process_hook != NULL)
4706 linux_nat_forget_process_hook (pid);
4707}
4708
5b009018
PA
4709/* Register a method that converts a siginfo object between the layout
4710 that ptrace returns, and the layout in the architecture of the
4711 inferior. */
4712void
4713linux_nat_set_siginfo_fixup (struct target_ops *t,
a5362b9a 4714 int (*siginfo_fixup) (siginfo_t *,
5b009018
PA
4715 gdb_byte *,
4716 int))
4717{
4718 /* Save the pointer. */
4719 linux_nat_siginfo_fixup = siginfo_fixup;
4720}
4721
7b50312a
PA
4722/* Register a method to call prior to resuming a thread. */
4723
4724void
4725linux_nat_set_prepare_to_resume (struct target_ops *t,
4726 void (*prepare_to_resume) (struct lwp_info *))
4727{
4728 /* Save the pointer. */
4729 linux_nat_prepare_to_resume = prepare_to_resume;
4730}
4731
f865ee35
JK
4732/* See linux-nat.h. */
4733
4734int
4735linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
9f0bdab8 4736{
da559b09 4737 int pid;
9f0bdab8 4738
dfd4cc63 4739 pid = ptid_get_lwp (ptid);
da559b09 4740 if (pid == 0)
dfd4cc63 4741 pid = ptid_get_pid (ptid);
f865ee35 4742
da559b09
JK
4743 errno = 0;
4744 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
4745 if (errno != 0)
4746 {
4747 memset (siginfo, 0, sizeof (*siginfo));
4748 return 0;
4749 }
f865ee35 4750 return 1;
9f0bdab8
DJ
4751}
4752
7b669087
GB
4753/* See nat/linux-nat.h. */
4754
4755ptid_t
4756current_lwp_ptid (void)
4757{
4758 gdb_assert (ptid_lwp_p (inferior_ptid));
4759 return inferior_ptid;
4760}
4761
2c0b251b
PA
4762/* Provide a prototype to silence -Wmissing-prototypes. */
4763extern initialize_file_ftype _initialize_linux_nat;
4764
d6b0e80f
AC
4765void
4766_initialize_linux_nat (void)
4767{
ccce17b0
YQ
4768 add_setshow_zuinteger_cmd ("lin-lwp", class_maintenance,
4769 &debug_linux_nat, _("\
b84876c2
PA
4770Set debugging of GNU/Linux lwp module."), _("\
4771Show debugging of GNU/Linux lwp module."), _("\
4772Enables printf debugging output."),
ccce17b0
YQ
4773 NULL,
4774 show_debug_linux_nat,
4775 &setdebuglist, &showdebuglist);
b84876c2 4776
7a6a1731
GB
4777 add_setshow_boolean_cmd ("linux-namespaces", class_maintenance,
4778 &debug_linux_namespaces, _("\
4779Set debugging of GNU/Linux namespaces module."), _("\
4780Show debugging of GNU/Linux namespaces module."), _("\
4781Enables printf debugging output."),
4782 NULL,
4783 NULL,
4784 &setdebuglist, &showdebuglist);
4785
b84876c2 4786 /* Save this mask as the default. */
d6b0e80f
AC
4787 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4788
7feb7d06
PA
4789 /* Install a SIGCHLD handler. */
4790 sigchld_action.sa_handler = sigchld_handler;
4791 sigemptyset (&sigchld_action.sa_mask);
4792 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
4793
4794 /* Make it the default. */
7feb7d06 4795 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
4796
4797 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4798 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4799 sigdelset (&suspend_mask, SIGCHLD);
4800
7feb7d06 4801 sigemptyset (&blocked_mask);
d6b0e80f
AC
4802}
4803\f
4804
4805/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4806 the GNU/Linux Threads library and therefore doesn't really belong
4807 here. */
4808
d6b0e80f
AC
4809/* Return the set of signals used by the threads library in *SET. */
4810
4811void
4812lin_thread_get_thread_signals (sigset_t *set)
4813{
d6b0e80f
AC
4814 sigemptyset (set);
4815
4a6ed09b
PA
4816 /* NPTL reserves the first two RT signals, but does not provide any
4817 way for the debugger to query the signal numbers - fortunately
4818 they don't change. */
4819 sigaddset (set, __SIGRTMIN);
4820 sigaddset (set, __SIGRTMIN + 1);
d6b0e80f 4821}