]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/linux-nat.c
Stupid git!
[thirdparty/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
32d0add0 3 Copyright (C) 2001-2015 Free Software Foundation, Inc.
3993f6b1
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
19
20#include "defs.h"
21#include "inferior.h"
45741a9c 22#include "infrun.h"
3993f6b1 23#include "target.h"
96d7229d
LM
24#include "nat/linux-nat.h"
25#include "nat/linux-waitpid.h"
3993f6b1 26#include "gdb_wait.h"
d6b0e80f
AC
27#ifdef HAVE_TKILL_SYSCALL
28#include <unistd.h>
29#include <sys/syscall.h>
30#endif
3993f6b1 31#include <sys/ptrace.h>
0274a8ce 32#include "linux-nat.h"
125f8a3d
GB
33#include "nat/linux-ptrace.h"
34#include "nat/linux-procfs.h"
8cc73a39 35#include "nat/linux-personality.h"
ac264b3b 36#include "linux-fork.h"
d6b0e80f
AC
37#include "gdbthread.h"
38#include "gdbcmd.h"
39#include "regcache.h"
4f844a66 40#include "regset.h"
dab06dbe 41#include "inf-child.h"
10d6c8cd
DJ
42#include "inf-ptrace.h"
43#include "auxv.h"
1777feb0 44#include <sys/procfs.h> /* for elf_gregset etc. */
dba24537
AC
45#include "elf-bfd.h" /* for elfcore_write_* */
46#include "gregset.h" /* for gregset */
47#include "gdbcore.h" /* for get_exec_file */
48#include <ctype.h> /* for isdigit */
53ce3c39 49#include <sys/stat.h> /* for struct stat */
dba24537 50#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
51#include "inf-loop.h"
52#include "event-loop.h"
53#include "event-top.h"
07e059b5
VP
54#include <pwd.h>
55#include <sys/types.h>
2978b111 56#include <dirent.h>
07e059b5 57#include "xml-support.h"
efcbbd14 58#include <sys/vfs.h>
6c95b8df 59#include "solib.h"
125f8a3d 60#include "nat/linux-osdata.h"
6432734d 61#include "linux-tdep.h"
7dcd53a0 62#include "symfile.h"
5808517f
YQ
63#include "agent.h"
64#include "tracepoint.h"
87b0bb13 65#include "buffer.h"
6ecd4729 66#include "target-descriptions.h"
614c279d 67#include "filestuff.h"
77e371c0 68#include "objfiles.h"
efcbbd14
UW
69
70#ifndef SPUFS_MAGIC
71#define SPUFS_MAGIC 0x23c9b64e
72#endif
dba24537 73
1777feb0 74/* This comment documents high-level logic of this file.
8a77dff3
VP
75
76Waiting for events in sync mode
77===============================
78
79When waiting for an event in a specific thread, we just use waitpid, passing
80the specific pid, and not passing WNOHANG.
81
1777feb0 82When waiting for an event in all threads, waitpid is not quite good. Prior to
8a77dff3 83version 2.4, Linux can either wait for event in main thread, or in secondary
1777feb0 84threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
8a77dff3
VP
85miss an event. The solution is to use non-blocking waitpid, together with
86sigsuspend. First, we use non-blocking waitpid to get an event in the main
1777feb0 87process, if any. Second, we use non-blocking waitpid with the __WCLONED
8a77dff3
VP
88flag to check for events in cloned processes. If nothing is found, we use
89sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
90happened to a child process -- and SIGCHLD will be delivered both for events
91in main debugged process and in cloned processes. As soon as we know there's
3e43a32a
MS
92an event, we get back to calling nonblocking waitpid with and without
93__WCLONED.
8a77dff3
VP
94
95Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
1777feb0 96so that we don't miss a signal. If SIGCHLD arrives in between, when it's
8a77dff3
VP
97blocked, the signal becomes pending and sigsuspend immediately
98notices it and returns.
99
100Waiting for events in async mode
101================================
102
7feb7d06
PA
103In async mode, GDB should always be ready to handle both user input
104and target events, so neither blocking waitpid nor sigsuspend are
105viable options. Instead, we should asynchronously notify the GDB main
106event loop whenever there's an unprocessed event from the target. We
107detect asynchronous target events by handling SIGCHLD signals. To
108notify the event loop about target events, the self-pipe trick is used
109--- a pipe is registered as waitable event source in the event loop,
110the event loop select/poll's on the read end of this pipe (as well on
111other event sources, e.g., stdin), and the SIGCHLD handler writes a
112byte to this pipe. This is more portable than relying on
113pselect/ppoll, since on kernels that lack those syscalls, libc
114emulates them with select/poll+sigprocmask, and that is racy
115(a.k.a. plain broken).
116
117Obviously, if we fail to notify the event loop if there's a target
118event, it's bad. OTOH, if we notify the event loop when there's no
119event from the target, linux_nat_wait will detect that there's no real
120event to report, and return event of type TARGET_WAITKIND_IGNORE.
121This is mostly harmless, but it will waste time and is better avoided.
122
123The main design point is that every time GDB is outside linux-nat.c,
124we have a SIGCHLD handler installed that is called when something
125happens to the target and notifies the GDB event loop. Whenever GDB
126core decides to handle the event, and calls into linux-nat.c, we
127process things as in sync mode, except that the we never block in
128sigsuspend.
129
130While processing an event, we may end up momentarily blocked in
131waitpid calls. Those waitpid calls, while blocking, are guarantied to
132return quickly. E.g., in all-stop mode, before reporting to the core
133that an LWP hit a breakpoint, all LWPs are stopped by sending them
134SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
135Note that this is different from blocking indefinitely waiting for the
136next event --- here, we're already handling an event.
8a77dff3
VP
137
138Use of signals
139==============
140
141We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
142signal is not entirely significant; we just need for a signal to be delivered,
143so that we can intercept it. SIGSTOP's advantage is that it can not be
144blocked. A disadvantage is that it is not a real-time signal, so it can only
145be queued once; we do not keep track of other sources of SIGSTOP.
146
147Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
148use them, because they have special behavior when the signal is generated -
149not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
150kills the entire thread group.
151
152A delivered SIGSTOP would stop the entire thread group, not just the thread we
153tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
154cancel it (by PTRACE_CONT without passing SIGSTOP).
155
156We could use a real-time signal instead. This would solve those problems; we
157could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
158But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
159generates it, and there are races with trying to find a signal that is not
160blocked. */
a0ef4274 161
dba24537
AC
162#ifndef O_LARGEFILE
163#define O_LARGEFILE 0
164#endif
0274a8ce 165
10d6c8cd
DJ
166/* The single-threaded native GNU/Linux target_ops. We save a pointer for
167 the use of the multi-threaded target. */
168static struct target_ops *linux_ops;
f973ed9c 169static struct target_ops linux_ops_saved;
10d6c8cd 170
9f0bdab8 171/* The method to call, if any, when a new thread is attached. */
7b50312a
PA
172static void (*linux_nat_new_thread) (struct lwp_info *);
173
26cb8b7c
PA
174/* The method to call, if any, when a new fork is attached. */
175static linux_nat_new_fork_ftype *linux_nat_new_fork;
176
177/* The method to call, if any, when a process is no longer
178 attached. */
179static linux_nat_forget_process_ftype *linux_nat_forget_process_hook;
180
7b50312a
PA
181/* Hook to call prior to resuming a thread. */
182static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
9f0bdab8 183
5b009018
PA
184/* The method to call, if any, when the siginfo object needs to be
185 converted between the layout returned by ptrace, and the layout in
186 the architecture of the inferior. */
a5362b9a 187static int (*linux_nat_siginfo_fixup) (siginfo_t *,
5b009018
PA
188 gdb_byte *,
189 int);
190
ac264b3b
MS
191/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
192 Called by our to_xfer_partial. */
4ac248ca 193static target_xfer_partial_ftype *super_xfer_partial;
10d6c8cd 194
6a3cb8e8
PA
195/* The saved to_close method, inherited from inf-ptrace.c.
196 Called by our to_close. */
197static void (*super_close) (struct target_ops *);
198
ccce17b0 199static unsigned int debug_linux_nat;
920d2a44
AC
200static void
201show_debug_linux_nat (struct ui_file *file, int from_tty,
202 struct cmd_list_element *c, const char *value)
203{
204 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
205 value);
206}
d6b0e80f 207
ae087d01
DJ
208struct simple_pid_list
209{
210 int pid;
3d799a95 211 int status;
ae087d01
DJ
212 struct simple_pid_list *next;
213};
214struct simple_pid_list *stopped_pids;
215
3dd5b83d
PA
216/* Async mode support. */
217
b84876c2
PA
218/* The read/write ends of the pipe registered as waitable file in the
219 event loop. */
220static int linux_nat_event_pipe[2] = { -1, -1 };
221
198297aa
PA
222/* True if we're currently in async mode. */
223#define linux_is_async_p() (linux_nat_event_pipe[0] != -1)
224
7feb7d06 225/* Flush the event pipe. */
b84876c2 226
7feb7d06
PA
227static void
228async_file_flush (void)
b84876c2 229{
7feb7d06
PA
230 int ret;
231 char buf;
b84876c2 232
7feb7d06 233 do
b84876c2 234 {
7feb7d06 235 ret = read (linux_nat_event_pipe[0], &buf, 1);
b84876c2 236 }
7feb7d06 237 while (ret >= 0 || (ret == -1 && errno == EINTR));
b84876c2
PA
238}
239
7feb7d06
PA
240/* Put something (anything, doesn't matter what, or how much) in event
241 pipe, so that the select/poll in the event-loop realizes we have
242 something to process. */
252fbfc8 243
b84876c2 244static void
7feb7d06 245async_file_mark (void)
b84876c2 246{
7feb7d06 247 int ret;
b84876c2 248
7feb7d06
PA
249 /* It doesn't really matter what the pipe contains, as long we end
250 up with something in it. Might as well flush the previous
251 left-overs. */
252 async_file_flush ();
b84876c2 253
7feb7d06 254 do
b84876c2 255 {
7feb7d06 256 ret = write (linux_nat_event_pipe[1], "+", 1);
b84876c2 257 }
7feb7d06 258 while (ret == -1 && errno == EINTR);
b84876c2 259
7feb7d06
PA
260 /* Ignore EAGAIN. If the pipe is full, the event loop will already
261 be awakened anyway. */
b84876c2
PA
262}
263
7feb7d06
PA
264static int kill_lwp (int lwpid, int signo);
265
266static int stop_callback (struct lwp_info *lp, void *data);
2db9a427 267static int resume_stopped_resumed_lwps (struct lwp_info *lp, void *data);
7feb7d06
PA
268
269static void block_child_signals (sigset_t *prev_mask);
270static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
271
272struct lwp_info;
273static struct lwp_info *add_lwp (ptid_t ptid);
274static void purge_lwp_list (int pid);
4403d8e9 275static void delete_lwp (ptid_t ptid);
2277426b
PA
276static struct lwp_info *find_lwp_pid (ptid_t ptid);
277
8a99810d
PA
278static int lwp_status_pending_p (struct lwp_info *lp);
279
9c02b525
PA
280static int check_stopped_by_breakpoint (struct lwp_info *lp);
281static int sigtrap_is_event (int status);
282static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
283
ae087d01
DJ
284\f
285/* Trivial list manipulation functions to keep track of a list of
286 new stopped processes. */
287static void
3d799a95 288add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
289{
290 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
e0881a8e 291
ae087d01 292 new_pid->pid = pid;
3d799a95 293 new_pid->status = status;
ae087d01
DJ
294 new_pid->next = *listp;
295 *listp = new_pid;
296}
297
84636d28
PA
298static int
299in_pid_list_p (struct simple_pid_list *list, int pid)
300{
301 struct simple_pid_list *p;
302
303 for (p = list; p != NULL; p = p->next)
304 if (p->pid == pid)
305 return 1;
306 return 0;
307}
308
ae087d01 309static int
46a96992 310pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
311{
312 struct simple_pid_list **p;
313
314 for (p = listp; *p != NULL; p = &(*p)->next)
315 if ((*p)->pid == pid)
316 {
317 struct simple_pid_list *next = (*p)->next;
e0881a8e 318
46a96992 319 *statusp = (*p)->status;
ae087d01
DJ
320 xfree (*p);
321 *p = next;
322 return 1;
323 }
324 return 0;
325}
326
96d7229d 327/* Initialize ptrace warnings and check for supported ptrace
beed38b8
JB
328 features given PID.
329
330 ATTACHED should be nonzero iff we attached to the inferior. */
3993f6b1
DJ
331
332static void
beed38b8 333linux_init_ptrace (pid_t pid, int attached)
3993f6b1 334{
beed38b8 335 linux_enable_event_reporting (pid, attached);
96d7229d 336 linux_ptrace_init_warnings ();
4de4c07c
DJ
337}
338
6d8fd2b7 339static void
f045800c 340linux_child_post_attach (struct target_ops *self, int pid)
4de4c07c 341{
beed38b8 342 linux_init_ptrace (pid, 1);
4de4c07c
DJ
343}
344
10d6c8cd 345static void
2e97a79e 346linux_child_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4de4c07c 347{
beed38b8 348 linux_init_ptrace (ptid_get_pid (ptid), 0);
4de4c07c
DJ
349}
350
4403d8e9
JK
351/* Return the number of known LWPs in the tgid given by PID. */
352
353static int
354num_lwps (int pid)
355{
356 int count = 0;
357 struct lwp_info *lp;
358
359 for (lp = lwp_list; lp; lp = lp->next)
360 if (ptid_get_pid (lp->ptid) == pid)
361 count++;
362
363 return count;
364}
365
366/* Call delete_lwp with prototype compatible for make_cleanup. */
367
368static void
369delete_lwp_cleanup (void *lp_voidp)
370{
371 struct lwp_info *lp = lp_voidp;
372
373 delete_lwp (lp->ptid);
374}
375
d83ad864
DB
376/* Target hook for follow_fork. On entry inferior_ptid must be the
377 ptid of the followed inferior. At return, inferior_ptid will be
378 unchanged. */
379
6d8fd2b7 380static int
07107ca6
LM
381linux_child_follow_fork (struct target_ops *ops, int follow_child,
382 int detach_fork)
3993f6b1 383{
d83ad864 384 if (!follow_child)
4de4c07c 385 {
6c95b8df 386 struct lwp_info *child_lp = NULL;
d83ad864
DB
387 int status = W_STOPCODE (0);
388 struct cleanup *old_chain;
389 int has_vforked;
390 int parent_pid, child_pid;
391
392 has_vforked = (inferior_thread ()->pending_follow.kind
393 == TARGET_WAITKIND_VFORKED);
394 parent_pid = ptid_get_lwp (inferior_ptid);
395 if (parent_pid == 0)
396 parent_pid = ptid_get_pid (inferior_ptid);
397 child_pid
398 = ptid_get_pid (inferior_thread ()->pending_follow.value.related_pid);
399
4de4c07c 400
1777feb0 401 /* We're already attached to the parent, by default. */
d83ad864
DB
402 old_chain = save_inferior_ptid ();
403 inferior_ptid = ptid_build (child_pid, child_pid, 0);
404 child_lp = add_lwp (inferior_ptid);
405 child_lp->stopped = 1;
406 child_lp->last_resume_kind = resume_stop;
4de4c07c 407
ac264b3b
MS
408 /* Detach new forked process? */
409 if (detach_fork)
f75c00e4 410 {
4403d8e9
JK
411 make_cleanup (delete_lwp_cleanup, child_lp);
412
4403d8e9
JK
413 if (linux_nat_prepare_to_resume != NULL)
414 linux_nat_prepare_to_resume (child_lp);
c077881a
HZ
415
416 /* When debugging an inferior in an architecture that supports
417 hardware single stepping on a kernel without commit
418 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
419 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
420 set if the parent process had them set.
421 To work around this, single step the child process
422 once before detaching to clear the flags. */
423
424 if (!gdbarch_software_single_step_p (target_thread_architecture
425 (child_lp->ptid)))
426 {
c077881a
HZ
427 linux_disable_event_reporting (child_pid);
428 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
429 perror_with_name (_("Couldn't do single step"));
430 if (my_waitpid (child_pid, &status, 0) < 0)
431 perror_with_name (_("Couldn't wait vfork process"));
432 }
433
434 if (WIFSTOPPED (status))
9caaaa83
PA
435 {
436 int signo;
437
438 signo = WSTOPSIG (status);
439 if (signo != 0
440 && !signal_pass_state (gdb_signal_from_host (signo)))
441 signo = 0;
442 ptrace (PTRACE_DETACH, child_pid, 0, signo);
443 }
4403d8e9 444
d83ad864 445 /* Resets value of inferior_ptid to parent ptid. */
4403d8e9 446 do_cleanups (old_chain);
ac264b3b
MS
447 }
448 else
449 {
6c95b8df 450 /* Let the thread_db layer learn about this new process. */
2277426b 451 check_for_thread_db ();
ac264b3b 452 }
9016a515 453
d83ad864
DB
454 do_cleanups (old_chain);
455
9016a515
DJ
456 if (has_vforked)
457 {
3ced3da4 458 struct lwp_info *parent_lp;
6c95b8df 459
3ced3da4 460 parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
96d7229d 461 gdb_assert (linux_supports_tracefork () >= 0);
3ced3da4 462
96d7229d 463 if (linux_supports_tracevforkdone ())
9016a515 464 {
6c95b8df
PA
465 if (debug_linux_nat)
466 fprintf_unfiltered (gdb_stdlog,
467 "LCFF: waiting for VFORK_DONE on %d\n",
468 parent_pid);
3ced3da4 469 parent_lp->stopped = 1;
9016a515 470
6c95b8df
PA
471 /* We'll handle the VFORK_DONE event like any other
472 event, in target_wait. */
9016a515
DJ
473 }
474 else
475 {
476 /* We can't insert breakpoints until the child has
477 finished with the shared memory region. We need to
478 wait until that happens. Ideal would be to just
479 call:
480 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
481 - waitpid (parent_pid, &status, __WALL);
482 However, most architectures can't handle a syscall
483 being traced on the way out if it wasn't traced on
484 the way in.
485
486 We might also think to loop, continuing the child
487 until it exits or gets a SIGTRAP. One problem is
488 that the child might call ptrace with PTRACE_TRACEME.
489
490 There's no simple and reliable way to figure out when
491 the vforked child will be done with its copy of the
492 shared memory. We could step it out of the syscall,
493 two instructions, let it go, and then single-step the
494 parent once. When we have hardware single-step, this
495 would work; with software single-step it could still
496 be made to work but we'd have to be able to insert
497 single-step breakpoints in the child, and we'd have
498 to insert -just- the single-step breakpoint in the
499 parent. Very awkward.
500
501 In the end, the best we can do is to make sure it
502 runs for a little while. Hopefully it will be out of
503 range of any breakpoints we reinsert. Usually this
504 is only the single-step breakpoint at vfork's return
505 point. */
506
6c95b8df
PA
507 if (debug_linux_nat)
508 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
509 "LCFF: no VFORK_DONE "
510 "support, sleeping a bit\n");
6c95b8df 511
9016a515 512 usleep (10000);
9016a515 513
6c95b8df
PA
514 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
515 and leave it pending. The next linux_nat_resume call
516 will notice a pending event, and bypasses actually
517 resuming the inferior. */
3ced3da4
PA
518 parent_lp->status = 0;
519 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
520 parent_lp->stopped = 1;
6c95b8df
PA
521
522 /* If we're in async mode, need to tell the event loop
523 there's something here to process. */
d9d41e78 524 if (target_is_async_p ())
6c95b8df
PA
525 async_file_mark ();
526 }
9016a515 527 }
4de4c07c 528 }
3993f6b1 529 else
4de4c07c 530 {
3ced3da4 531 struct lwp_info *child_lp;
4de4c07c 532
3ced3da4
PA
533 child_lp = add_lwp (inferior_ptid);
534 child_lp->stopped = 1;
25289eb2 535 child_lp->last_resume_kind = resume_stop;
6c95b8df 536
6c95b8df 537 /* Let the thread_db layer learn about this new process. */
ef29ce1a 538 check_for_thread_db ();
4de4c07c
DJ
539 }
540
541 return 0;
542}
543
4de4c07c 544\f
77b06cd7 545static int
a863b201 546linux_child_insert_fork_catchpoint (struct target_ops *self, int pid)
4de4c07c 547{
96d7229d 548 return !linux_supports_tracefork ();
3993f6b1
DJ
549}
550
eb73ad13 551static int
973fc227 552linux_child_remove_fork_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
553{
554 return 0;
555}
556
77b06cd7 557static int
3ecc7da0 558linux_child_insert_vfork_catchpoint (struct target_ops *self, int pid)
3993f6b1 559{
96d7229d 560 return !linux_supports_tracefork ();
3993f6b1
DJ
561}
562
eb73ad13 563static int
e98cf0cd 564linux_child_remove_vfork_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
565{
566 return 0;
567}
568
77b06cd7 569static int
ba025e51 570linux_child_insert_exec_catchpoint (struct target_ops *self, int pid)
3993f6b1 571{
96d7229d 572 return !linux_supports_tracefork ();
3993f6b1
DJ
573}
574
eb73ad13 575static int
758e29d2 576linux_child_remove_exec_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
577{
578 return 0;
579}
580
a96d9b2e 581static int
ff214e67
TT
582linux_child_set_syscall_catchpoint (struct target_ops *self,
583 int pid, int needed, int any_count,
a96d9b2e
SDJ
584 int table_size, int *table)
585{
96d7229d 586 if (!linux_supports_tracesysgood ())
77b06cd7
TJB
587 return 1;
588
a96d9b2e
SDJ
589 /* On GNU/Linux, we ignore the arguments. It means that we only
590 enable the syscall catchpoints, but do not disable them.
77b06cd7 591
a96d9b2e
SDJ
592 Also, we do not use the `table' information because we do not
593 filter system calls here. We let GDB do the logic for us. */
594 return 0;
595}
596
d6b0e80f
AC
597/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
598 are processes sharing the same VM space. A multi-threaded process
599 is basically a group of such processes. However, such a grouping
600 is almost entirely a user-space issue; the kernel doesn't enforce
601 such a grouping at all (this might change in the future). In
602 general, we'll rely on the threads library (i.e. the GNU/Linux
603 Threads library) to provide such a grouping.
604
605 It is perfectly well possible to write a multi-threaded application
606 without the assistance of a threads library, by using the clone
607 system call directly. This module should be able to give some
608 rudimentary support for debugging such applications if developers
609 specify the CLONE_PTRACE flag in the clone system call, and are
610 using the Linux kernel 2.4 or above.
611
612 Note that there are some peculiarities in GNU/Linux that affect
613 this code:
614
615 - In general one should specify the __WCLONE flag to waitpid in
616 order to make it report events for any of the cloned processes
617 (and leave it out for the initial process). However, if a cloned
618 process has exited the exit status is only reported if the
619 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
620 we cannot use it since GDB must work on older systems too.
621
622 - When a traced, cloned process exits and is waited for by the
623 debugger, the kernel reassigns it to the original parent and
624 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
625 library doesn't notice this, which leads to the "zombie problem":
626 When debugged a multi-threaded process that spawns a lot of
627 threads will run out of processes, even if the threads exit,
628 because the "zombies" stay around. */
629
630/* List of known LWPs. */
9f0bdab8 631struct lwp_info *lwp_list;
d6b0e80f
AC
632\f
633
d6b0e80f
AC
634/* Original signal mask. */
635static sigset_t normal_mask;
636
637/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
638 _initialize_linux_nat. */
639static sigset_t suspend_mask;
640
7feb7d06
PA
641/* Signals to block to make that sigsuspend work. */
642static sigset_t blocked_mask;
643
644/* SIGCHLD action. */
645struct sigaction sigchld_action;
b84876c2 646
7feb7d06
PA
647/* Block child signals (SIGCHLD and linux threads signals), and store
648 the previous mask in PREV_MASK. */
84e46146 649
7feb7d06
PA
650static void
651block_child_signals (sigset_t *prev_mask)
652{
653 /* Make sure SIGCHLD is blocked. */
654 if (!sigismember (&blocked_mask, SIGCHLD))
655 sigaddset (&blocked_mask, SIGCHLD);
656
657 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
658}
659
660/* Restore child signals mask, previously returned by
661 block_child_signals. */
662
663static void
664restore_child_signals_mask (sigset_t *prev_mask)
665{
666 sigprocmask (SIG_SETMASK, prev_mask, NULL);
667}
2455069d
UW
668
669/* Mask of signals to pass directly to the inferior. */
670static sigset_t pass_mask;
671
672/* Update signals to pass to the inferior. */
673static void
94bedb42
TT
674linux_nat_pass_signals (struct target_ops *self,
675 int numsigs, unsigned char *pass_signals)
2455069d
UW
676{
677 int signo;
678
679 sigemptyset (&pass_mask);
680
681 for (signo = 1; signo < NSIG; signo++)
682 {
2ea28649 683 int target_signo = gdb_signal_from_host (signo);
2455069d
UW
684 if (target_signo < numsigs && pass_signals[target_signo])
685 sigaddset (&pass_mask, signo);
686 }
687}
688
d6b0e80f
AC
689\f
690
691/* Prototypes for local functions. */
692static int stop_wait_callback (struct lwp_info *lp, void *data);
28439f5e 693static int linux_thread_alive (ptid_t ptid);
8dd27370 694static char *linux_child_pid_to_exec_file (struct target_ops *self, int pid);
20ba1ce6 695static int resume_stopped_resumed_lwps (struct lwp_info *lp, void *data);
710151dd 696
d6b0e80f 697\f
d6b0e80f 698
7b50312a
PA
699/* Destroy and free LP. */
700
701static void
702lwp_free (struct lwp_info *lp)
703{
704 xfree (lp->arch_private);
705 xfree (lp);
706}
707
d90e17a7
PA
708/* Remove all LWPs belong to PID from the lwp list. */
709
710static void
711purge_lwp_list (int pid)
712{
713 struct lwp_info *lp, *lpprev, *lpnext;
714
715 lpprev = NULL;
716
717 for (lp = lwp_list; lp; lp = lpnext)
718 {
719 lpnext = lp->next;
720
721 if (ptid_get_pid (lp->ptid) == pid)
722 {
723 if (lp == lwp_list)
724 lwp_list = lp->next;
725 else
726 lpprev->next = lp->next;
727
7b50312a 728 lwp_free (lp);
d90e17a7
PA
729 }
730 else
731 lpprev = lp;
732 }
733}
734
26cb8b7c
PA
735/* Add the LWP specified by PTID to the list. PTID is the first LWP
736 in the process. Return a pointer to the structure describing the
737 new LWP.
738
739 This differs from add_lwp in that we don't let the arch specific
740 bits know about this new thread. Current clients of this callback
741 take the opportunity to install watchpoints in the new thread, and
742 we shouldn't do that for the first thread. If we're spawning a
743 child ("run"), the thread executes the shell wrapper first, and we
744 shouldn't touch it until it execs the program we want to debug.
745 For "attach", it'd be okay to call the callback, but it's not
746 necessary, because watchpoints can't yet have been inserted into
747 the inferior. */
d6b0e80f
AC
748
749static struct lwp_info *
26cb8b7c 750add_initial_lwp (ptid_t ptid)
d6b0e80f
AC
751{
752 struct lwp_info *lp;
753
dfd4cc63 754 gdb_assert (ptid_lwp_p (ptid));
d6b0e80f
AC
755
756 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
757
758 memset (lp, 0, sizeof (struct lwp_info));
759
25289eb2 760 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
761 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
762
763 lp->ptid = ptid;
dc146f7c 764 lp->core = -1;
d6b0e80f
AC
765
766 lp->next = lwp_list;
767 lwp_list = lp;
d6b0e80f 768
26cb8b7c
PA
769 return lp;
770}
771
772/* Add the LWP specified by PID to the list. Return a pointer to the
773 structure describing the new LWP. The LWP should already be
774 stopped. */
775
776static struct lwp_info *
777add_lwp (ptid_t ptid)
778{
779 struct lwp_info *lp;
780
781 lp = add_initial_lwp (ptid);
782
6e012a6c
PA
783 /* Let the arch specific bits know about this new thread. Current
784 clients of this callback take the opportunity to install
26cb8b7c
PA
785 watchpoints in the new thread. We don't do this for the first
786 thread though. See add_initial_lwp. */
787 if (linux_nat_new_thread != NULL)
7b50312a 788 linux_nat_new_thread (lp);
9f0bdab8 789
d6b0e80f
AC
790 return lp;
791}
792
793/* Remove the LWP specified by PID from the list. */
794
795static void
796delete_lwp (ptid_t ptid)
797{
798 struct lwp_info *lp, *lpprev;
799
800 lpprev = NULL;
801
802 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
803 if (ptid_equal (lp->ptid, ptid))
804 break;
805
806 if (!lp)
807 return;
808
d6b0e80f
AC
809 if (lpprev)
810 lpprev->next = lp->next;
811 else
812 lwp_list = lp->next;
813
7b50312a 814 lwp_free (lp);
d6b0e80f
AC
815}
816
817/* Return a pointer to the structure describing the LWP corresponding
818 to PID. If no corresponding LWP could be found, return NULL. */
819
820static struct lwp_info *
821find_lwp_pid (ptid_t ptid)
822{
823 struct lwp_info *lp;
824 int lwp;
825
dfd4cc63
LM
826 if (ptid_lwp_p (ptid))
827 lwp = ptid_get_lwp (ptid);
d6b0e80f 828 else
dfd4cc63 829 lwp = ptid_get_pid (ptid);
d6b0e80f
AC
830
831 for (lp = lwp_list; lp; lp = lp->next)
dfd4cc63 832 if (lwp == ptid_get_lwp (lp->ptid))
d6b0e80f
AC
833 return lp;
834
835 return NULL;
836}
837
838/* Call CALLBACK with its second argument set to DATA for every LWP in
839 the list. If CALLBACK returns 1 for a particular LWP, return a
840 pointer to the structure describing that LWP immediately.
841 Otherwise return NULL. */
842
843struct lwp_info *
d90e17a7
PA
844iterate_over_lwps (ptid_t filter,
845 int (*callback) (struct lwp_info *, void *),
846 void *data)
d6b0e80f
AC
847{
848 struct lwp_info *lp, *lpnext;
849
850 for (lp = lwp_list; lp; lp = lpnext)
851 {
852 lpnext = lp->next;
d90e17a7
PA
853
854 if (ptid_match (lp->ptid, filter))
855 {
856 if ((*callback) (lp, data))
857 return lp;
858 }
d6b0e80f
AC
859 }
860
861 return NULL;
862}
863
2277426b
PA
864/* Update our internal state when changing from one checkpoint to
865 another indicated by NEW_PTID. We can only switch single-threaded
866 applications, so we only create one new LWP, and the previous list
867 is discarded. */
f973ed9c
DJ
868
869void
870linux_nat_switch_fork (ptid_t new_ptid)
871{
872 struct lwp_info *lp;
873
dfd4cc63 874 purge_lwp_list (ptid_get_pid (inferior_ptid));
2277426b 875
f973ed9c
DJ
876 lp = add_lwp (new_ptid);
877 lp->stopped = 1;
e26af52f 878
2277426b
PA
879 /* This changes the thread's ptid while preserving the gdb thread
880 num. Also changes the inferior pid, while preserving the
881 inferior num. */
882 thread_change_ptid (inferior_ptid, new_ptid);
883
884 /* We've just told GDB core that the thread changed target id, but,
885 in fact, it really is a different thread, with different register
886 contents. */
887 registers_changed ();
e26af52f
DJ
888}
889
e26af52f
DJ
890/* Handle the exit of a single thread LP. */
891
892static void
893exit_lwp (struct lwp_info *lp)
894{
e09875d4 895 struct thread_info *th = find_thread_ptid (lp->ptid);
063bfe2e
VP
896
897 if (th)
e26af52f 898 {
17faa917
DJ
899 if (print_thread_events)
900 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
901
4f8d22e3 902 delete_thread (lp->ptid);
e26af52f
DJ
903 }
904
905 delete_lwp (lp->ptid);
906}
907
a0ef4274
DJ
908/* Wait for the LWP specified by LP, which we have just attached to.
909 Returns a wait status for that LWP, to cache. */
910
911static int
912linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
913 int *signalled)
914{
dfd4cc63 915 pid_t new_pid, pid = ptid_get_lwp (ptid);
a0ef4274
DJ
916 int status;
917
644cebc9 918 if (linux_proc_pid_is_stopped (pid))
a0ef4274
DJ
919 {
920 if (debug_linux_nat)
921 fprintf_unfiltered (gdb_stdlog,
922 "LNPAW: Attaching to a stopped process\n");
923
924 /* The process is definitely stopped. It is in a job control
925 stop, unless the kernel predates the TASK_STOPPED /
926 TASK_TRACED distinction, in which case it might be in a
927 ptrace stop. Make sure it is in a ptrace stop; from there we
928 can kill it, signal it, et cetera.
929
930 First make sure there is a pending SIGSTOP. Since we are
931 already attached, the process can not transition from stopped
932 to running without a PTRACE_CONT; so we know this signal will
933 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
934 probably already in the queue (unless this kernel is old
935 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
936 is not an RT signal, it can only be queued once. */
937 kill_lwp (pid, SIGSTOP);
938
939 /* Finally, resume the stopped process. This will deliver the SIGSTOP
940 (or a higher priority signal, just like normal PTRACE_ATTACH). */
941 ptrace (PTRACE_CONT, pid, 0, 0);
942 }
943
944 /* Make sure the initial process is stopped. The user-level threads
945 layer might want to poke around in the inferior, and that won't
946 work if things haven't stabilized yet. */
947 new_pid = my_waitpid (pid, &status, 0);
948 if (new_pid == -1 && errno == ECHILD)
949 {
950 if (first)
951 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
952
953 /* Try again with __WCLONE to check cloned processes. */
954 new_pid = my_waitpid (pid, &status, __WCLONE);
955 *cloned = 1;
956 }
957
dacc9cb2
PP
958 gdb_assert (pid == new_pid);
959
960 if (!WIFSTOPPED (status))
961 {
962 /* The pid we tried to attach has apparently just exited. */
963 if (debug_linux_nat)
964 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
965 pid, status_to_str (status));
966 return status;
967 }
a0ef4274
DJ
968
969 if (WSTOPSIG (status) != SIGSTOP)
970 {
971 *signalled = 1;
972 if (debug_linux_nat)
973 fprintf_unfiltered (gdb_stdlog,
974 "LNPAW: Received %s after attaching\n",
975 status_to_str (status));
976 }
977
978 return status;
979}
980
84636d28
PA
981/* Attach to the LWP specified by PID. Return 0 if successful, -1 if
982 the new LWP could not be attached, or 1 if we're already auto
983 attached to this thread, but haven't processed the
984 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
985 its existance, without considering it an error. */
d6b0e80f 986
9ee57c33 987int
93815fbf 988lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 989{
9ee57c33 990 struct lwp_info *lp;
84636d28 991 int lwpid;
d6b0e80f 992
dfd4cc63 993 gdb_assert (ptid_lwp_p (ptid));
d6b0e80f 994
9ee57c33 995 lp = find_lwp_pid (ptid);
dfd4cc63 996 lwpid = ptid_get_lwp (ptid);
d6b0e80f 997
3b27ef47 998 /* We assume that we're already attached to any LWP that is already
d6b0e80f
AC
999 in our list of LWPs. If we're not seeing exit events from threads
1000 and we've had PID wraparound since we last tried to stop all threads,
1001 this assumption might be wrong; fortunately, this is very unlikely
1002 to happen. */
3b27ef47 1003 if (lp == NULL)
d6b0e80f 1004 {
a0ef4274 1005 int status, cloned = 0, signalled = 0;
d6b0e80f 1006
84636d28 1007 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
9ee57c33 1008 {
96d7229d 1009 if (linux_supports_tracefork ())
84636d28
PA
1010 {
1011 /* If we haven't stopped all threads when we get here,
1012 we may have seen a thread listed in thread_db's list,
1013 but not processed the PTRACE_EVENT_CLONE yet. If
1014 that's the case, ignore this new thread, and let
1015 normal event handling discover it later. */
1016 if (in_pid_list_p (stopped_pids, lwpid))
1017 {
1018 /* We've already seen this thread stop, but we
1019 haven't seen the PTRACE_EVENT_CLONE extended
1020 event yet. */
3b27ef47
PA
1021 if (debug_linux_nat)
1022 fprintf_unfiltered (gdb_stdlog,
1023 "LLAL: attach failed, but already seen "
1024 "this thread %s stop\n",
1025 target_pid_to_str (ptid));
1026 return 1;
84636d28
PA
1027 }
1028 else
1029 {
1030 int new_pid;
1031 int status;
1032
3b27ef47
PA
1033 if (debug_linux_nat)
1034 fprintf_unfiltered (gdb_stdlog,
1035 "LLAL: attach failed, and haven't seen "
1036 "this thread %s stop yet\n",
1037 target_pid_to_str (ptid));
1038
1039 /* We may or may not be attached to the LWP already.
1040 Try waitpid on it. If that errors, we're not
1041 attached to the LWP yet. Otherwise, we're
1042 already attached. */
a33e3959 1043 gdb_assert (lwpid > 0);
84636d28
PA
1044 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1045 if (new_pid == -1 && errno == ECHILD)
1046 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1047 if (new_pid != -1)
1048 {
3b27ef47
PA
1049 if (new_pid == 0)
1050 {
1051 /* The child hasn't stopped for its initial
1052 SIGSTOP stop yet. */
1053 if (debug_linux_nat)
1054 fprintf_unfiltered (gdb_stdlog,
1055 "LLAL: child hasn't "
1056 "stopped yet\n");
1057 }
1058 else if (WIFSTOPPED (status))
1059 {
1060 if (debug_linux_nat)
1061 fprintf_unfiltered (gdb_stdlog,
1062 "LLAL: adding to stopped_pids\n");
1063 add_to_pid_list (&stopped_pids, lwpid, status);
1064 }
84636d28
PA
1065 return 1;
1066 }
1067 }
1068 }
1069
9ee57c33
DJ
1070 /* If we fail to attach to the thread, issue a warning,
1071 but continue. One way this can happen is if thread
e9efe249 1072 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1073 bug may place threads in the thread list and then fail
1074 to create them. */
1075 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1076 safe_strerror (errno));
1077 return -1;
1078 }
1079
d6b0e80f
AC
1080 if (debug_linux_nat)
1081 fprintf_unfiltered (gdb_stdlog,
1082 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1083 target_pid_to_str (ptid));
1084
a0ef4274 1085 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
dacc9cb2 1086 if (!WIFSTOPPED (status))
12696c10 1087 return 1;
dacc9cb2 1088
a0ef4274
DJ
1089 lp = add_lwp (ptid);
1090 lp->stopped = 1;
3b27ef47 1091 lp->last_resume_kind = resume_stop;
a0ef4274
DJ
1092 lp->cloned = cloned;
1093 lp->signalled = signalled;
1094 if (WSTOPSIG (status) != SIGSTOP)
d6b0e80f 1095 {
a0ef4274
DJ
1096 lp->resumed = 1;
1097 lp->status = status;
d6b0e80f
AC
1098 }
1099
dfd4cc63 1100 target_post_attach (ptid_get_lwp (lp->ptid));
d6b0e80f
AC
1101
1102 if (debug_linux_nat)
1103 {
1104 fprintf_unfiltered (gdb_stdlog,
1105 "LLAL: waitpid %s received %s\n",
1106 target_pid_to_str (ptid),
1107 status_to_str (status));
1108 }
1109 }
9ee57c33 1110
9ee57c33 1111 return 0;
d6b0e80f
AC
1112}
1113
b84876c2 1114static void
136d6dae
VP
1115linux_nat_create_inferior (struct target_ops *ops,
1116 char *exec_file, char *allargs, char **env,
b84876c2
PA
1117 int from_tty)
1118{
8cc73a39
SDJ
1119 struct cleanup *restore_personality
1120 = maybe_disable_address_space_randomization (disable_randomization);
b84876c2
PA
1121
1122 /* The fork_child mechanism is synchronous and calls target_wait, so
1123 we have to mask the async mode. */
1124
2455069d 1125 /* Make sure we report all signals during startup. */
94bedb42 1126 linux_nat_pass_signals (ops, 0, NULL);
2455069d 1127
136d6dae 1128 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
b84876c2 1129
8cc73a39 1130 do_cleanups (restore_personality);
b84876c2
PA
1131}
1132
8784d563
PA
1133/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1134 already attached. Returns true if a new LWP is found, false
1135 otherwise. */
1136
1137static int
1138attach_proc_task_lwp_callback (ptid_t ptid)
1139{
1140 struct lwp_info *lp;
1141
1142 /* Ignore LWPs we're already attached to. */
1143 lp = find_lwp_pid (ptid);
1144 if (lp == NULL)
1145 {
1146 int lwpid = ptid_get_lwp (ptid);
1147
1148 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1149 {
1150 int err = errno;
1151
1152 /* Be quiet if we simply raced with the thread exiting.
1153 EPERM is returned if the thread's task still exists, and
1154 is marked as exited or zombie, as well as other
1155 conditions, so in that case, confirm the status in
1156 /proc/PID/status. */
1157 if (err == ESRCH
1158 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1159 {
1160 if (debug_linux_nat)
1161 {
1162 fprintf_unfiltered (gdb_stdlog,
1163 "Cannot attach to lwp %d: "
1164 "thread is gone (%d: %s)\n",
1165 lwpid, err, safe_strerror (err));
1166 }
1167 }
1168 else
1169 {
f71f0b0d 1170 warning (_("Cannot attach to lwp %d: %s"),
8784d563
PA
1171 lwpid,
1172 linux_ptrace_attach_fail_reason_string (ptid,
1173 err));
1174 }
1175 }
1176 else
1177 {
1178 if (debug_linux_nat)
1179 fprintf_unfiltered (gdb_stdlog,
1180 "PTRACE_ATTACH %s, 0, 0 (OK)\n",
1181 target_pid_to_str (ptid));
1182
1183 lp = add_lwp (ptid);
1184 lp->cloned = 1;
1185
1186 /* The next time we wait for this LWP we'll see a SIGSTOP as
1187 PTRACE_ATTACH brings it to a halt. */
1188 lp->signalled = 1;
1189
1190 /* We need to wait for a stop before being able to make the
1191 next ptrace call on this LWP. */
1192 lp->must_set_ptrace_flags = 1;
1193 }
1194
1195 return 1;
1196 }
1197 return 0;
1198}
1199
d6b0e80f 1200static void
c0939df1 1201linux_nat_attach (struct target_ops *ops, const char *args, int from_tty)
d6b0e80f
AC
1202{
1203 struct lwp_info *lp;
d6b0e80f 1204 int status;
af990527 1205 ptid_t ptid;
87b0bb13 1206 volatile struct gdb_exception ex;
d6b0e80f 1207
2455069d 1208 /* Make sure we report all signals during attach. */
94bedb42 1209 linux_nat_pass_signals (ops, 0, NULL);
2455069d 1210
87b0bb13
JK
1211 TRY_CATCH (ex, RETURN_MASK_ERROR)
1212 {
1213 linux_ops->to_attach (ops, args, from_tty);
1214 }
1215 if (ex.reason < 0)
1216 {
1217 pid_t pid = parse_pid_to_attach (args);
1218 struct buffer buffer;
1219 char *message, *buffer_s;
1220
1221 message = xstrdup (ex.message);
1222 make_cleanup (xfree, message);
1223
1224 buffer_init (&buffer);
7ae1a6a6 1225 linux_ptrace_attach_fail_reason (pid, &buffer);
87b0bb13
JK
1226
1227 buffer_grow_str0 (&buffer, "");
1228 buffer_s = buffer_finish (&buffer);
1229 make_cleanup (xfree, buffer_s);
1230
7ae1a6a6
PA
1231 if (*buffer_s != '\0')
1232 throw_error (ex.error, "warning: %s\n%s", buffer_s, message);
1233 else
1234 throw_error (ex.error, "%s", message);
87b0bb13 1235 }
d6b0e80f 1236
af990527
PA
1237 /* The ptrace base target adds the main thread with (pid,0,0)
1238 format. Decorate it with lwp info. */
dfd4cc63
LM
1239 ptid = ptid_build (ptid_get_pid (inferior_ptid),
1240 ptid_get_pid (inferior_ptid),
1241 0);
af990527
PA
1242 thread_change_ptid (inferior_ptid, ptid);
1243
9f0bdab8 1244 /* Add the initial process as the first LWP to the list. */
26cb8b7c 1245 lp = add_initial_lwp (ptid);
a0ef4274
DJ
1246
1247 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1248 &lp->signalled);
dacc9cb2
PP
1249 if (!WIFSTOPPED (status))
1250 {
1251 if (WIFEXITED (status))
1252 {
1253 int exit_code = WEXITSTATUS (status);
1254
1255 target_terminal_ours ();
1256 target_mourn_inferior ();
1257 if (exit_code == 0)
1258 error (_("Unable to attach: program exited normally."));
1259 else
1260 error (_("Unable to attach: program exited with code %d."),
1261 exit_code);
1262 }
1263 else if (WIFSIGNALED (status))
1264 {
2ea28649 1265 enum gdb_signal signo;
dacc9cb2
PP
1266
1267 target_terminal_ours ();
1268 target_mourn_inferior ();
1269
2ea28649 1270 signo = gdb_signal_from_host (WTERMSIG (status));
dacc9cb2
PP
1271 error (_("Unable to attach: program terminated with signal "
1272 "%s, %s."),
2ea28649
PA
1273 gdb_signal_to_name (signo),
1274 gdb_signal_to_string (signo));
dacc9cb2
PP
1275 }
1276
1277 internal_error (__FILE__, __LINE__,
1278 _("unexpected status %d for PID %ld"),
dfd4cc63 1279 status, (long) ptid_get_lwp (ptid));
dacc9cb2
PP
1280 }
1281
a0ef4274 1282 lp->stopped = 1;
9f0bdab8 1283
a0ef4274 1284 /* Save the wait status to report later. */
d6b0e80f 1285 lp->resumed = 1;
a0ef4274
DJ
1286 if (debug_linux_nat)
1287 fprintf_unfiltered (gdb_stdlog,
1288 "LNA: waitpid %ld, saving status %s\n",
dfd4cc63 1289 (long) ptid_get_pid (lp->ptid), status_to_str (status));
710151dd 1290
7feb7d06
PA
1291 lp->status = status;
1292
8784d563
PA
1293 /* We must attach to every LWP. If /proc is mounted, use that to
1294 find them now. The inferior may be using raw clone instead of
1295 using pthreads. But even if it is using pthreads, thread_db
1296 walks structures in the inferior's address space to find the list
1297 of threads/LWPs, and those structures may well be corrupted.
1298 Note that once thread_db is loaded, we'll still use it to list
1299 threads and associate pthread info with each LWP. */
1300 linux_proc_attach_tgid_threads (ptid_get_pid (lp->ptid),
1301 attach_proc_task_lwp_callback);
1302
7feb7d06
PA
1303 if (target_can_async_p ())
1304 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1305}
1306
a0ef4274
DJ
1307/* Get pending status of LP. */
1308static int
1309get_pending_status (struct lwp_info *lp, int *status)
1310{
a493e3e2 1311 enum gdb_signal signo = GDB_SIGNAL_0;
ca2163eb
PA
1312
1313 /* If we paused threads momentarily, we may have stored pending
1314 events in lp->status or lp->waitstatus (see stop_wait_callback),
1315 and GDB core hasn't seen any signal for those threads.
1316 Otherwise, the last signal reported to the core is found in the
1317 thread object's stop_signal.
1318
1319 There's a corner case that isn't handled here at present. Only
1320 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1321 stop_signal make sense as a real signal to pass to the inferior.
1322 Some catchpoint related events, like
1323 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
a493e3e2 1324 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
ca2163eb
PA
1325 those traps are debug API (ptrace in our case) related and
1326 induced; the inferior wouldn't see them if it wasn't being
1327 traced. Hence, we should never pass them to the inferior, even
1328 when set to pass state. Since this corner case isn't handled by
1329 infrun.c when proceeding with a signal, for consistency, neither
1330 do we handle it here (or elsewhere in the file we check for
1331 signal pass state). Normally SIGTRAP isn't set to pass state, so
1332 this is really a corner case. */
1333
1334 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
a493e3e2 1335 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
ca2163eb 1336 else if (lp->status)
2ea28649 1337 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
ca2163eb
PA
1338 else if (non_stop && !is_executing (lp->ptid))
1339 {
1340 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1341
16c381f0 1342 signo = tp->suspend.stop_signal;
ca2163eb
PA
1343 }
1344 else if (!non_stop)
a0ef4274 1345 {
ca2163eb
PA
1346 struct target_waitstatus last;
1347 ptid_t last_ptid;
4c28f408 1348
ca2163eb 1349 get_last_target_status (&last_ptid, &last);
4c28f408 1350
dfd4cc63 1351 if (ptid_get_lwp (lp->ptid) == ptid_get_lwp (last_ptid))
ca2163eb 1352 {
e09875d4 1353 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1354
16c381f0 1355 signo = tp->suspend.stop_signal;
4c28f408 1356 }
ca2163eb 1357 }
4c28f408 1358
ca2163eb 1359 *status = 0;
4c28f408 1360
a493e3e2 1361 if (signo == GDB_SIGNAL_0)
ca2163eb
PA
1362 {
1363 if (debug_linux_nat)
1364 fprintf_unfiltered (gdb_stdlog,
1365 "GPT: lwp %s has no pending signal\n",
1366 target_pid_to_str (lp->ptid));
1367 }
1368 else if (!signal_pass_state (signo))
1369 {
1370 if (debug_linux_nat)
3e43a32a
MS
1371 fprintf_unfiltered (gdb_stdlog,
1372 "GPT: lwp %s had signal %s, "
1373 "but it is in no pass state\n",
ca2163eb 1374 target_pid_to_str (lp->ptid),
2ea28649 1375 gdb_signal_to_string (signo));
a0ef4274 1376 }
a0ef4274 1377 else
4c28f408 1378 {
2ea28649 1379 *status = W_STOPCODE (gdb_signal_to_host (signo));
ca2163eb
PA
1380
1381 if (debug_linux_nat)
1382 fprintf_unfiltered (gdb_stdlog,
1383 "GPT: lwp %s has pending signal %s\n",
1384 target_pid_to_str (lp->ptid),
2ea28649 1385 gdb_signal_to_string (signo));
4c28f408 1386 }
a0ef4274
DJ
1387
1388 return 0;
1389}
1390
d6b0e80f
AC
1391static int
1392detach_callback (struct lwp_info *lp, void *data)
1393{
1394 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1395
1396 if (debug_linux_nat && lp->status)
1397 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1398 strsignal (WSTOPSIG (lp->status)),
1399 target_pid_to_str (lp->ptid));
1400
a0ef4274
DJ
1401 /* If there is a pending SIGSTOP, get rid of it. */
1402 if (lp->signalled)
d6b0e80f 1403 {
d6b0e80f
AC
1404 if (debug_linux_nat)
1405 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1406 "DC: Sending SIGCONT to %s\n",
1407 target_pid_to_str (lp->ptid));
d6b0e80f 1408
dfd4cc63 1409 kill_lwp (ptid_get_lwp (lp->ptid), SIGCONT);
d6b0e80f 1410 lp->signalled = 0;
d6b0e80f
AC
1411 }
1412
1413 /* We don't actually detach from the LWP that has an id equal to the
1414 overall process id just yet. */
dfd4cc63 1415 if (ptid_get_lwp (lp->ptid) != ptid_get_pid (lp->ptid))
d6b0e80f 1416 {
a0ef4274
DJ
1417 int status = 0;
1418
1419 /* Pass on any pending signal for this LWP. */
1420 get_pending_status (lp, &status);
1421
7b50312a
PA
1422 if (linux_nat_prepare_to_resume != NULL)
1423 linux_nat_prepare_to_resume (lp);
d6b0e80f 1424 errno = 0;
dfd4cc63 1425 if (ptrace (PTRACE_DETACH, ptid_get_lwp (lp->ptid), 0,
a0ef4274 1426 WSTOPSIG (status)) < 0)
8a3fe4f8 1427 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1428 safe_strerror (errno));
1429
1430 if (debug_linux_nat)
1431 fprintf_unfiltered (gdb_stdlog,
1432 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1433 target_pid_to_str (lp->ptid),
7feb7d06 1434 strsignal (WSTOPSIG (status)));
d6b0e80f
AC
1435
1436 delete_lwp (lp->ptid);
1437 }
1438
1439 return 0;
1440}
1441
1442static void
52554a0e 1443linux_nat_detach (struct target_ops *ops, const char *args, int from_tty)
d6b0e80f 1444{
b84876c2 1445 int pid;
a0ef4274 1446 int status;
d90e17a7
PA
1447 struct lwp_info *main_lwp;
1448
dfd4cc63 1449 pid = ptid_get_pid (inferior_ptid);
a0ef4274 1450
ae5e0686
MK
1451 /* Don't unregister from the event loop, as there may be other
1452 inferiors running. */
b84876c2 1453
4c28f408
PA
1454 /* Stop all threads before detaching. ptrace requires that the
1455 thread is stopped to sucessfully detach. */
d90e17a7 1456 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
4c28f408
PA
1457 /* ... and wait until all of them have reported back that
1458 they're no longer running. */
d90e17a7 1459 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
4c28f408 1460
d90e17a7 1461 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
d6b0e80f
AC
1462
1463 /* Only the initial process should be left right now. */
dfd4cc63 1464 gdb_assert (num_lwps (ptid_get_pid (inferior_ptid)) == 1);
d90e17a7
PA
1465
1466 main_lwp = find_lwp_pid (pid_to_ptid (pid));
d6b0e80f 1467
a0ef4274
DJ
1468 /* Pass on any pending signal for the last LWP. */
1469 if ((args == NULL || *args == '\0')
d90e17a7 1470 && get_pending_status (main_lwp, &status) != -1
a0ef4274
DJ
1471 && WIFSTOPPED (status))
1472 {
52554a0e
TT
1473 char *tem;
1474
a0ef4274
DJ
1475 /* Put the signal number in ARGS so that inf_ptrace_detach will
1476 pass it along with PTRACE_DETACH. */
52554a0e 1477 tem = alloca (8);
cde33bf1 1478 xsnprintf (tem, 8, "%d", (int) WSTOPSIG (status));
52554a0e 1479 args = tem;
ddabfc73
TT
1480 if (debug_linux_nat)
1481 fprintf_unfiltered (gdb_stdlog,
1482 "LND: Sending signal %s to %s\n",
1483 args,
1484 target_pid_to_str (main_lwp->ptid));
a0ef4274
DJ
1485 }
1486
7b50312a
PA
1487 if (linux_nat_prepare_to_resume != NULL)
1488 linux_nat_prepare_to_resume (main_lwp);
d90e17a7 1489 delete_lwp (main_lwp->ptid);
b84876c2 1490
7a7d3353
PA
1491 if (forks_exist_p ())
1492 {
1493 /* Multi-fork case. The current inferior_ptid is being detached
1494 from, but there are other viable forks to debug. Detach from
1495 the current fork, and context-switch to the first
1496 available. */
1497 linux_fork_detach (args, from_tty);
7a7d3353
PA
1498 }
1499 else
1500 linux_ops->to_detach (ops, args, from_tty);
d6b0e80f
AC
1501}
1502
8a99810d
PA
1503/* Resume execution of the inferior process. If STEP is nonzero,
1504 single-step it. If SIGNAL is nonzero, give it that signal. */
1505
1506static void
1507linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1508{
8a99810d 1509 lp->step = step;
9c02b525
PA
1510
1511 /* stop_pc doubles as the PC the LWP had when it was last resumed.
1512 We only presently need that if the LWP is stepped though (to
1513 handle the case of stepping a breakpoint instruction). */
1514 if (step)
1515 {
1516 struct regcache *regcache = get_thread_regcache (lp->ptid);
1517
1518 lp->stop_pc = regcache_read_pc (regcache);
1519 }
1520 else
1521 lp->stop_pc = 0;
1522
8a99810d
PA
1523 if (linux_nat_prepare_to_resume != NULL)
1524 linux_nat_prepare_to_resume (lp);
90ad5e1d 1525 linux_ops->to_resume (linux_ops, lp->ptid, step, signo);
9c02b525 1526 lp->stop_reason = LWP_STOPPED_BY_NO_REASON;
8a99810d
PA
1527 lp->stopped = 0;
1528 registers_changed_ptid (lp->ptid);
1529}
1530
d6b0e80f
AC
1531/* Resume LP. */
1532
25289eb2 1533static void
e5ef252a 1534resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
d6b0e80f 1535{
25289eb2 1536 if (lp->stopped)
6c95b8df 1537 {
c9657e70 1538 struct inferior *inf = find_inferior_ptid (lp->ptid);
25289eb2
PA
1539
1540 if (inf->vfork_child != NULL)
1541 {
1542 if (debug_linux_nat)
1543 fprintf_unfiltered (gdb_stdlog,
1544 "RC: Not resuming %s (vfork parent)\n",
1545 target_pid_to_str (lp->ptid));
1546 }
8a99810d 1547 else if (!lwp_status_pending_p (lp))
25289eb2
PA
1548 {
1549 if (debug_linux_nat)
1550 fprintf_unfiltered (gdb_stdlog,
e5ef252a
PA
1551 "RC: Resuming sibling %s, %s, %s\n",
1552 target_pid_to_str (lp->ptid),
1553 (signo != GDB_SIGNAL_0
1554 ? strsignal (gdb_signal_to_host (signo))
1555 : "0"),
1556 step ? "step" : "resume");
25289eb2 1557
8a99810d 1558 linux_resume_one_lwp (lp, step, signo);
25289eb2
PA
1559 }
1560 else
1561 {
1562 if (debug_linux_nat)
1563 fprintf_unfiltered (gdb_stdlog,
1564 "RC: Not resuming sibling %s (has pending)\n",
1565 target_pid_to_str (lp->ptid));
1566 }
6c95b8df 1567 }
25289eb2 1568 else
d6b0e80f 1569 {
d90e17a7
PA
1570 if (debug_linux_nat)
1571 fprintf_unfiltered (gdb_stdlog,
25289eb2 1572 "RC: Not resuming sibling %s (not stopped)\n",
d6b0e80f 1573 target_pid_to_str (lp->ptid));
d6b0e80f 1574 }
25289eb2 1575}
d6b0e80f 1576
8817a6f2
PA
1577/* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1578 Resume LWP with the last stop signal, if it is in pass state. */
e5ef252a 1579
25289eb2 1580static int
8817a6f2 1581linux_nat_resume_callback (struct lwp_info *lp, void *except)
25289eb2 1582{
e5ef252a
PA
1583 enum gdb_signal signo = GDB_SIGNAL_0;
1584
8817a6f2
PA
1585 if (lp == except)
1586 return 0;
1587
e5ef252a
PA
1588 if (lp->stopped)
1589 {
1590 struct thread_info *thread;
1591
1592 thread = find_thread_ptid (lp->ptid);
1593 if (thread != NULL)
1594 {
70509625 1595 signo = thread->suspend.stop_signal;
e5ef252a
PA
1596 thread->suspend.stop_signal = GDB_SIGNAL_0;
1597 }
1598 }
1599
1600 resume_lwp (lp, 0, signo);
d6b0e80f
AC
1601 return 0;
1602}
1603
1604static int
1605resume_clear_callback (struct lwp_info *lp, void *data)
1606{
1607 lp->resumed = 0;
25289eb2 1608 lp->last_resume_kind = resume_stop;
d6b0e80f
AC
1609 return 0;
1610}
1611
1612static int
1613resume_set_callback (struct lwp_info *lp, void *data)
1614{
1615 lp->resumed = 1;
25289eb2 1616 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1617 return 0;
1618}
1619
1620static void
28439f5e 1621linux_nat_resume (struct target_ops *ops,
2ea28649 1622 ptid_t ptid, int step, enum gdb_signal signo)
d6b0e80f
AC
1623{
1624 struct lwp_info *lp;
d90e17a7 1625 int resume_many;
d6b0e80f 1626
76f50ad1
DJ
1627 if (debug_linux_nat)
1628 fprintf_unfiltered (gdb_stdlog,
1629 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1630 step ? "step" : "resume",
1631 target_pid_to_str (ptid),
a493e3e2 1632 (signo != GDB_SIGNAL_0
2ea28649 1633 ? strsignal (gdb_signal_to_host (signo)) : "0"),
76f50ad1
DJ
1634 target_pid_to_str (inferior_ptid));
1635
d6b0e80f 1636 /* A specific PTID means `step only this process id'. */
d90e17a7
PA
1637 resume_many = (ptid_equal (minus_one_ptid, ptid)
1638 || ptid_is_pid (ptid));
4c28f408 1639
e3e9f5a2
PA
1640 /* Mark the lwps we're resuming as resumed. */
1641 iterate_over_lwps (ptid, resume_set_callback, NULL);
d6b0e80f 1642
d90e17a7
PA
1643 /* See if it's the current inferior that should be handled
1644 specially. */
1645 if (resume_many)
1646 lp = find_lwp_pid (inferior_ptid);
1647 else
1648 lp = find_lwp_pid (ptid);
9f0bdab8 1649 gdb_assert (lp != NULL);
d6b0e80f 1650
9f0bdab8 1651 /* Remember if we're stepping. */
25289eb2 1652 lp->last_resume_kind = step ? resume_step : resume_continue;
d6b0e80f 1653
9f0bdab8
DJ
1654 /* If we have a pending wait status for this thread, there is no
1655 point in resuming the process. But first make sure that
1656 linux_nat_wait won't preemptively handle the event - we
1657 should never take this short-circuit if we are going to
1658 leave LP running, since we have skipped resuming all the
1659 other threads. This bit of code needs to be synchronized
1660 with linux_nat_wait. */
76f50ad1 1661
9f0bdab8
DJ
1662 if (lp->status && WIFSTOPPED (lp->status))
1663 {
2455069d
UW
1664 if (!lp->step
1665 && WSTOPSIG (lp->status)
1666 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 1667 {
9f0bdab8
DJ
1668 if (debug_linux_nat)
1669 fprintf_unfiltered (gdb_stdlog,
1670 "LLR: Not short circuiting for ignored "
1671 "status 0x%x\n", lp->status);
1672
d6b0e80f
AC
1673 /* FIXME: What should we do if we are supposed to continue
1674 this thread with a signal? */
a493e3e2 1675 gdb_assert (signo == GDB_SIGNAL_0);
2ea28649 1676 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
1677 lp->status = 0;
1678 }
1679 }
76f50ad1 1680
8a99810d 1681 if (lwp_status_pending_p (lp))
9f0bdab8
DJ
1682 {
1683 /* FIXME: What should we do if we are supposed to continue
1684 this thread with a signal? */
a493e3e2 1685 gdb_assert (signo == GDB_SIGNAL_0);
76f50ad1 1686
9f0bdab8
DJ
1687 if (debug_linux_nat)
1688 fprintf_unfiltered (gdb_stdlog,
1689 "LLR: Short circuiting for status 0x%x\n",
1690 lp->status);
d6b0e80f 1691
7feb7d06
PA
1692 if (target_can_async_p ())
1693 {
1694 target_async (inferior_event_handler, 0);
1695 /* Tell the event loop we have something to process. */
1696 async_file_mark ();
1697 }
9f0bdab8 1698 return;
d6b0e80f
AC
1699 }
1700
d90e17a7 1701 if (resume_many)
8817a6f2 1702 iterate_over_lwps (ptid, linux_nat_resume_callback, lp);
d90e17a7 1703
8a99810d 1704 linux_resume_one_lwp (lp, step, signo);
9f0bdab8 1705
d6b0e80f
AC
1706 if (debug_linux_nat)
1707 fprintf_unfiltered (gdb_stdlog,
1708 "LLR: %s %s, %s (resume event thread)\n",
1709 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1710 target_pid_to_str (ptid),
a493e3e2 1711 (signo != GDB_SIGNAL_0
2ea28649 1712 ? strsignal (gdb_signal_to_host (signo)) : "0"));
b84876c2
PA
1713
1714 if (target_can_async_p ())
8ea051c5 1715 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1716}
1717
c5f62d5f 1718/* Send a signal to an LWP. */
d6b0e80f
AC
1719
1720static int
1721kill_lwp (int lwpid, int signo)
1722{
c5f62d5f
DE
1723 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1724 fails, then we are not using nptl threads and we should be using kill. */
d6b0e80f
AC
1725
1726#ifdef HAVE_TKILL_SYSCALL
c5f62d5f
DE
1727 {
1728 static int tkill_failed;
1729
1730 if (!tkill_failed)
1731 {
1732 int ret;
1733
1734 errno = 0;
1735 ret = syscall (__NR_tkill, lwpid, signo);
1736 if (errno != ENOSYS)
1737 return ret;
1738 tkill_failed = 1;
1739 }
1740 }
d6b0e80f
AC
1741#endif
1742
1743 return kill (lwpid, signo);
1744}
1745
ca2163eb
PA
1746/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1747 event, check if the core is interested in it: if not, ignore the
1748 event, and keep waiting; otherwise, we need to toggle the LWP's
1749 syscall entry/exit status, since the ptrace event itself doesn't
1750 indicate it, and report the trap to higher layers. */
1751
1752static int
1753linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1754{
1755 struct target_waitstatus *ourstatus = &lp->waitstatus;
1756 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
1757 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
1758
1759 if (stopping)
1760 {
1761 /* If we're stopping threads, there's a SIGSTOP pending, which
1762 makes it so that the LWP reports an immediate syscall return,
1763 followed by the SIGSTOP. Skip seeing that "return" using
1764 PTRACE_CONT directly, and let stop_wait_callback collect the
1765 SIGSTOP. Later when the thread is resumed, a new syscall
1766 entry event. If we didn't do this (and returned 0), we'd
1767 leave a syscall entry pending, and our caller, by using
1768 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1769 itself. Later, when the user re-resumes this LWP, we'd see
1770 another syscall entry event and we'd mistake it for a return.
1771
1772 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1773 (leaving immediately with LWP->signalled set, without issuing
1774 a PTRACE_CONT), it would still be problematic to leave this
1775 syscall enter pending, as later when the thread is resumed,
1776 it would then see the same syscall exit mentioned above,
1777 followed by the delayed SIGSTOP, while the syscall didn't
1778 actually get to execute. It seems it would be even more
1779 confusing to the user. */
1780
1781 if (debug_linux_nat)
1782 fprintf_unfiltered (gdb_stdlog,
1783 "LHST: ignoring syscall %d "
1784 "for LWP %ld (stopping threads), "
1785 "resuming with PTRACE_CONT for SIGSTOP\n",
1786 syscall_number,
dfd4cc63 1787 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1788
1789 lp->syscall_state = TARGET_WAITKIND_IGNORE;
dfd4cc63 1790 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
8817a6f2 1791 lp->stopped = 0;
ca2163eb
PA
1792 return 1;
1793 }
1794
1795 if (catch_syscall_enabled ())
1796 {
1797 /* Always update the entry/return state, even if this particular
1798 syscall isn't interesting to the core now. In async mode,
1799 the user could install a new catchpoint for this syscall
1800 between syscall enter/return, and we'll need to know to
1801 report a syscall return if that happens. */
1802 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1803 ? TARGET_WAITKIND_SYSCALL_RETURN
1804 : TARGET_WAITKIND_SYSCALL_ENTRY);
1805
1806 if (catching_syscall_number (syscall_number))
1807 {
1808 /* Alright, an event to report. */
1809 ourstatus->kind = lp->syscall_state;
1810 ourstatus->value.syscall_number = syscall_number;
1811
1812 if (debug_linux_nat)
1813 fprintf_unfiltered (gdb_stdlog,
1814 "LHST: stopping for %s of syscall %d"
1815 " for LWP %ld\n",
3e43a32a
MS
1816 lp->syscall_state
1817 == TARGET_WAITKIND_SYSCALL_ENTRY
ca2163eb
PA
1818 ? "entry" : "return",
1819 syscall_number,
dfd4cc63 1820 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1821 return 0;
1822 }
1823
1824 if (debug_linux_nat)
1825 fprintf_unfiltered (gdb_stdlog,
1826 "LHST: ignoring %s of syscall %d "
1827 "for LWP %ld\n",
1828 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1829 ? "entry" : "return",
1830 syscall_number,
dfd4cc63 1831 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1832 }
1833 else
1834 {
1835 /* If we had been syscall tracing, and hence used PT_SYSCALL
1836 before on this LWP, it could happen that the user removes all
1837 syscall catchpoints before we get to process this event.
1838 There are two noteworthy issues here:
1839
1840 - When stopped at a syscall entry event, resuming with
1841 PT_STEP still resumes executing the syscall and reports a
1842 syscall return.
1843
1844 - Only PT_SYSCALL catches syscall enters. If we last
1845 single-stepped this thread, then this event can't be a
1846 syscall enter. If we last single-stepped this thread, this
1847 has to be a syscall exit.
1848
1849 The points above mean that the next resume, be it PT_STEP or
1850 PT_CONTINUE, can not trigger a syscall trace event. */
1851 if (debug_linux_nat)
1852 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
1853 "LHST: caught syscall event "
1854 "with no syscall catchpoints."
ca2163eb
PA
1855 " %d for LWP %ld, ignoring\n",
1856 syscall_number,
dfd4cc63 1857 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1858 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1859 }
1860
1861 /* The core isn't interested in this event. For efficiency, avoid
1862 stopping all threads only to have the core resume them all again.
1863 Since we're not stopping threads, if we're still syscall tracing
1864 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1865 subsequent syscall. Simply resume using the inf-ptrace layer,
1866 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1867
8a99810d 1868 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
ca2163eb
PA
1869 return 1;
1870}
1871
3d799a95
DJ
1872/* Handle a GNU/Linux extended wait response. If we see a clone
1873 event, we need to add the new LWP to our list (and not report the
1874 trap to higher layers). This function returns non-zero if the
1875 event should be ignored and we should wait again. If STOPPING is
1876 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
1877
1878static int
3d799a95
DJ
1879linux_handle_extended_wait (struct lwp_info *lp, int status,
1880 int stopping)
d6b0e80f 1881{
dfd4cc63 1882 int pid = ptid_get_lwp (lp->ptid);
3d799a95 1883 struct target_waitstatus *ourstatus = &lp->waitstatus;
89a5711c 1884 int event = linux_ptrace_get_extended_event (status);
d6b0e80f 1885
3d799a95
DJ
1886 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1887 || event == PTRACE_EVENT_CLONE)
d6b0e80f 1888 {
3d799a95
DJ
1889 unsigned long new_pid;
1890 int ret;
1891
1892 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 1893
3d799a95
DJ
1894 /* If we haven't already seen the new PID stop, wait for it now. */
1895 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1896 {
1897 /* The new child has a pending SIGSTOP. We can't affect it until it
1898 hits the SIGSTOP, but we're already attached. */
1899 ret = my_waitpid (new_pid, &status,
1900 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1901 if (ret == -1)
1902 perror_with_name (_("waiting for new child"));
1903 else if (ret != new_pid)
1904 internal_error (__FILE__, __LINE__,
1905 _("wait returned unexpected PID %d"), ret);
1906 else if (!WIFSTOPPED (status))
1907 internal_error (__FILE__, __LINE__,
1908 _("wait returned unexpected status 0x%x"), status);
1909 }
1910
3a3e9ee3 1911 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95 1912
26cb8b7c
PA
1913 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1914 {
1915 /* The arch-specific native code may need to know about new
1916 forks even if those end up never mapped to an
1917 inferior. */
1918 if (linux_nat_new_fork != NULL)
1919 linux_nat_new_fork (lp, new_pid);
1920 }
1921
2277426b 1922 if (event == PTRACE_EVENT_FORK
dfd4cc63 1923 && linux_fork_checkpointing_p (ptid_get_pid (lp->ptid)))
2277426b 1924 {
2277426b
PA
1925 /* Handle checkpointing by linux-fork.c here as a special
1926 case. We don't want the follow-fork-mode or 'catch fork'
1927 to interfere with this. */
1928
1929 /* This won't actually modify the breakpoint list, but will
1930 physically remove the breakpoints from the child. */
d80ee84f 1931 detach_breakpoints (ptid_build (new_pid, new_pid, 0));
2277426b
PA
1932
1933 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
1934 if (!find_fork_pid (new_pid))
1935 add_fork (new_pid);
2277426b
PA
1936
1937 /* Report as spurious, so that infrun doesn't want to follow
1938 this fork. We're actually doing an infcall in
1939 linux-fork.c. */
1940 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2277426b
PA
1941
1942 /* Report the stop to the core. */
1943 return 0;
1944 }
1945
3d799a95
DJ
1946 if (event == PTRACE_EVENT_FORK)
1947 ourstatus->kind = TARGET_WAITKIND_FORKED;
1948 else if (event == PTRACE_EVENT_VFORK)
1949 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 1950 else
3d799a95 1951 {
78768c4a
JK
1952 struct lwp_info *new_lp;
1953
3d799a95 1954 ourstatus->kind = TARGET_WAITKIND_IGNORE;
78768c4a 1955
3c4d7e12
PA
1956 if (debug_linux_nat)
1957 fprintf_unfiltered (gdb_stdlog,
1958 "LHEW: Got clone event "
1959 "from LWP %d, new child is LWP %ld\n",
1960 pid, new_pid);
1961
dfd4cc63 1962 new_lp = add_lwp (ptid_build (ptid_get_pid (lp->ptid), new_pid, 0));
3d799a95 1963 new_lp->cloned = 1;
4c28f408 1964 new_lp->stopped = 1;
d6b0e80f 1965
3d799a95
DJ
1966 if (WSTOPSIG (status) != SIGSTOP)
1967 {
1968 /* This can happen if someone starts sending signals to
1969 the new thread before it gets a chance to run, which
1970 have a lower number than SIGSTOP (e.g. SIGUSR1).
1971 This is an unlikely case, and harder to handle for
1972 fork / vfork than for clone, so we do not try - but
1973 we handle it for clone events here. We'll send
1974 the other signal on to the thread below. */
1975
1976 new_lp->signalled = 1;
1977 }
1978 else
79395f92
PA
1979 {
1980 struct thread_info *tp;
1981
1982 /* When we stop for an event in some other thread, and
1983 pull the thread list just as this thread has cloned,
1984 we'll have seen the new thread in the thread_db list
1985 before handling the CLONE event (glibc's
1986 pthread_create adds the new thread to the thread list
1987 before clone'ing, and has the kernel fill in the
1988 thread's tid on the clone call with
1989 CLONE_PARENT_SETTID). If that happened, and the core
1990 had requested the new thread to stop, we'll have
1991 killed it with SIGSTOP. But since SIGSTOP is not an
1992 RT signal, it can only be queued once. We need to be
1993 careful to not resume the LWP if we wanted it to
1994 stop. In that case, we'll leave the SIGSTOP pending.
a493e3e2 1995 It will later be reported as GDB_SIGNAL_0. */
79395f92
PA
1996 tp = find_thread_ptid (new_lp->ptid);
1997 if (tp != NULL && tp->stop_requested)
1998 new_lp->last_resume_kind = resume_stop;
1999 else
2000 status = 0;
2001 }
d6b0e80f 2002
2db9a427
PA
2003 /* If the thread_db layer is active, let it record the user
2004 level thread id and status, and add the thread to GDB's
2005 list. */
2006 if (!thread_db_notice_clone (lp->ptid, new_lp->ptid))
3d799a95 2007 {
2db9a427
PA
2008 /* The process is not using thread_db. Add the LWP to
2009 GDB's list. */
2010 target_post_attach (ptid_get_lwp (new_lp->ptid));
2011 add_thread (new_lp->ptid);
2012 }
4c28f408 2013
2db9a427
PA
2014 if (!stopping)
2015 {
2016 set_running (new_lp->ptid, 1);
2017 set_executing (new_lp->ptid, 1);
2018 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2019 resume_stop. */
2020 new_lp->last_resume_kind = resume_continue;
4c28f408
PA
2021 }
2022
79395f92
PA
2023 if (status != 0)
2024 {
2025 /* We created NEW_LP so it cannot yet contain STATUS. */
2026 gdb_assert (new_lp->status == 0);
2027
2028 /* Save the wait status to report later. */
2029 if (debug_linux_nat)
2030 fprintf_unfiltered (gdb_stdlog,
2031 "LHEW: waitpid of new LWP %ld, "
2032 "saving status %s\n",
dfd4cc63 2033 (long) ptid_get_lwp (new_lp->ptid),
79395f92
PA
2034 status_to_str (status));
2035 new_lp->status = status;
2036 }
2037
20ba1ce6 2038 new_lp->resumed = !stopping;
3d799a95
DJ
2039 return 1;
2040 }
2041
2042 return 0;
d6b0e80f
AC
2043 }
2044
3d799a95
DJ
2045 if (event == PTRACE_EVENT_EXEC)
2046 {
a75724bc
PA
2047 if (debug_linux_nat)
2048 fprintf_unfiltered (gdb_stdlog,
2049 "LHEW: Got exec event from LWP %ld\n",
dfd4cc63 2050 ptid_get_lwp (lp->ptid));
a75724bc 2051
3d799a95
DJ
2052 ourstatus->kind = TARGET_WAITKIND_EXECD;
2053 ourstatus->value.execd_pathname
8dd27370 2054 = xstrdup (linux_child_pid_to_exec_file (NULL, pid));
3d799a95 2055
8af756ef
PA
2056 /* The thread that execed must have been resumed, but, when a
2057 thread execs, it changes its tid to the tgid, and the old
2058 tgid thread might have not been resumed. */
2059 lp->resumed = 1;
6c95b8df
PA
2060 return 0;
2061 }
2062
2063 if (event == PTRACE_EVENT_VFORK_DONE)
2064 {
2065 if (current_inferior ()->waiting_for_vfork_done)
3d799a95 2066 {
6c95b8df 2067 if (debug_linux_nat)
3e43a32a
MS
2068 fprintf_unfiltered (gdb_stdlog,
2069 "LHEW: Got expected PTRACE_EVENT_"
2070 "VFORK_DONE from LWP %ld: stopping\n",
dfd4cc63 2071 ptid_get_lwp (lp->ptid));
3d799a95 2072
6c95b8df
PA
2073 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2074 return 0;
3d799a95
DJ
2075 }
2076
6c95b8df 2077 if (debug_linux_nat)
3e43a32a
MS
2078 fprintf_unfiltered (gdb_stdlog,
2079 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
20ba1ce6 2080 "from LWP %ld: ignoring\n",
dfd4cc63 2081 ptid_get_lwp (lp->ptid));
6c95b8df 2082 return 1;
3d799a95
DJ
2083 }
2084
2085 internal_error (__FILE__, __LINE__,
2086 _("unknown ptrace event %d"), event);
d6b0e80f
AC
2087}
2088
2089/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2090 exited. */
2091
2092static int
2093wait_lwp (struct lwp_info *lp)
2094{
2095 pid_t pid;
432b4d03 2096 int status = 0;
d6b0e80f 2097 int thread_dead = 0;
432b4d03 2098 sigset_t prev_mask;
d6b0e80f
AC
2099
2100 gdb_assert (!lp->stopped);
2101 gdb_assert (lp->status == 0);
2102
432b4d03
JK
2103 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2104 block_child_signals (&prev_mask);
2105
2106 for (;;)
d6b0e80f 2107 {
432b4d03
JK
2108 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2109 was right and we should just call sigsuspend. */
2110
dfd4cc63 2111 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, WNOHANG);
d6b0e80f 2112 if (pid == -1 && errno == ECHILD)
dfd4cc63 2113 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, __WCLONE | WNOHANG);
a9f4bb21
PA
2114 if (pid == -1 && errno == ECHILD)
2115 {
2116 /* The thread has previously exited. We need to delete it
2117 now because, for some vendor 2.4 kernels with NPTL
2118 support backported, there won't be an exit event unless
2119 it is the main thread. 2.6 kernels will report an exit
2120 event for each thread that exits, as expected. */
2121 thread_dead = 1;
2122 if (debug_linux_nat)
2123 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2124 target_pid_to_str (lp->ptid));
2125 }
432b4d03
JK
2126 if (pid != 0)
2127 break;
2128
2129 /* Bugs 10970, 12702.
2130 Thread group leader may have exited in which case we'll lock up in
2131 waitpid if there are other threads, even if they are all zombies too.
2132 Basically, we're not supposed to use waitpid this way.
2133 __WCLONE is not applicable for the leader so we can't use that.
2134 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2135 process; it gets ESRCH both for the zombie and for running processes.
2136
2137 As a workaround, check if we're waiting for the thread group leader and
2138 if it's a zombie, and avoid calling waitpid if it is.
2139
2140 This is racy, what if the tgl becomes a zombie right after we check?
2141 Therefore always use WNOHANG with sigsuspend - it is equivalent to
5f572dec 2142 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
432b4d03 2143
dfd4cc63
LM
2144 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid)
2145 && linux_proc_pid_is_zombie (ptid_get_lwp (lp->ptid)))
d6b0e80f 2146 {
d6b0e80f
AC
2147 thread_dead = 1;
2148 if (debug_linux_nat)
432b4d03
JK
2149 fprintf_unfiltered (gdb_stdlog,
2150 "WL: Thread group leader %s vanished.\n",
d6b0e80f 2151 target_pid_to_str (lp->ptid));
432b4d03 2152 break;
d6b0e80f 2153 }
432b4d03
JK
2154
2155 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2156 get invoked despite our caller had them intentionally blocked by
2157 block_child_signals. This is sensitive only to the loop of
2158 linux_nat_wait_1 and there if we get called my_waitpid gets called
2159 again before it gets to sigsuspend so we can safely let the handlers
2160 get executed here. */
2161
d36bf488
DE
2162 if (debug_linux_nat)
2163 fprintf_unfiltered (gdb_stdlog, "WL: about to sigsuspend\n");
432b4d03
JK
2164 sigsuspend (&suspend_mask);
2165 }
2166
2167 restore_child_signals_mask (&prev_mask);
2168
d6b0e80f
AC
2169 if (!thread_dead)
2170 {
dfd4cc63 2171 gdb_assert (pid == ptid_get_lwp (lp->ptid));
d6b0e80f
AC
2172
2173 if (debug_linux_nat)
2174 {
2175 fprintf_unfiltered (gdb_stdlog,
2176 "WL: waitpid %s received %s\n",
2177 target_pid_to_str (lp->ptid),
2178 status_to_str (status));
2179 }
d6b0e80f 2180
a9f4bb21
PA
2181 /* Check if the thread has exited. */
2182 if (WIFEXITED (status) || WIFSIGNALED (status))
2183 {
2184 thread_dead = 1;
2185 if (debug_linux_nat)
2186 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2187 target_pid_to_str (lp->ptid));
2188 }
d6b0e80f
AC
2189 }
2190
2191 if (thread_dead)
2192 {
e26af52f 2193 exit_lwp (lp);
d6b0e80f
AC
2194 return 0;
2195 }
2196
2197 gdb_assert (WIFSTOPPED (status));
8817a6f2 2198 lp->stopped = 1;
d6b0e80f 2199
8784d563
PA
2200 if (lp->must_set_ptrace_flags)
2201 {
2202 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
2203
2204 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), inf->attach_flag);
2205 lp->must_set_ptrace_flags = 0;
2206 }
2207
ca2163eb
PA
2208 /* Handle GNU/Linux's syscall SIGTRAPs. */
2209 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2210 {
2211 /* No longer need the sysgood bit. The ptrace event ends up
2212 recorded in lp->waitstatus if we care for it. We can carry
2213 on handling the event like a regular SIGTRAP from here
2214 on. */
2215 status = W_STOPCODE (SIGTRAP);
2216 if (linux_handle_syscall_trap (lp, 1))
2217 return wait_lwp (lp);
2218 }
2219
d6b0e80f 2220 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2221 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2222 && linux_is_extended_waitstatus (status))
d6b0e80f
AC
2223 {
2224 if (debug_linux_nat)
2225 fprintf_unfiltered (gdb_stdlog,
2226 "WL: Handling extended status 0x%06x\n",
2227 status);
20ba1ce6
PA
2228 linux_handle_extended_wait (lp, status, 1);
2229 return 0;
d6b0e80f
AC
2230 }
2231
2232 return status;
2233}
2234
2235/* Send a SIGSTOP to LP. */
2236
2237static int
2238stop_callback (struct lwp_info *lp, void *data)
2239{
2240 if (!lp->stopped && !lp->signalled)
2241 {
2242 int ret;
2243
2244 if (debug_linux_nat)
2245 {
2246 fprintf_unfiltered (gdb_stdlog,
2247 "SC: kill %s **<SIGSTOP>**\n",
2248 target_pid_to_str (lp->ptid));
2249 }
2250 errno = 0;
dfd4cc63 2251 ret = kill_lwp (ptid_get_lwp (lp->ptid), SIGSTOP);
d6b0e80f
AC
2252 if (debug_linux_nat)
2253 {
2254 fprintf_unfiltered (gdb_stdlog,
2255 "SC: lwp kill %d %s\n",
2256 ret,
2257 errno ? safe_strerror (errno) : "ERRNO-OK");
2258 }
2259
2260 lp->signalled = 1;
2261 gdb_assert (lp->status == 0);
2262 }
2263
2264 return 0;
2265}
2266
7b50312a
PA
2267/* Request a stop on LWP. */
2268
2269void
2270linux_stop_lwp (struct lwp_info *lwp)
2271{
2272 stop_callback (lwp, NULL);
2273}
2274
2db9a427
PA
2275/* See linux-nat.h */
2276
2277void
2278linux_stop_and_wait_all_lwps (void)
2279{
2280 /* Stop all LWP's ... */
2281 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
2282
2283 /* ... and wait until all of them have reported back that
2284 they're no longer running. */
2285 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
2286}
2287
2288/* See linux-nat.h */
2289
2290void
2291linux_unstop_all_lwps (void)
2292{
2293 iterate_over_lwps (minus_one_ptid,
2294 resume_stopped_resumed_lwps, &minus_one_ptid);
2295}
2296
57380f4e 2297/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2298
2299static int
57380f4e
DJ
2300linux_nat_has_pending_sigint (int pid)
2301{
2302 sigset_t pending, blocked, ignored;
57380f4e
DJ
2303
2304 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2305
2306 if (sigismember (&pending, SIGINT)
2307 && !sigismember (&ignored, SIGINT))
2308 return 1;
2309
2310 return 0;
2311}
2312
2313/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2314
2315static int
2316set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2317{
57380f4e
DJ
2318 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2319 flag to consume the next one. */
2320 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2321 && WSTOPSIG (lp->status) == SIGINT)
2322 lp->status = 0;
2323 else
2324 lp->ignore_sigint = 1;
2325
2326 return 0;
2327}
2328
2329/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2330 This function is called after we know the LWP has stopped; if the LWP
2331 stopped before the expected SIGINT was delivered, then it will never have
2332 arrived. Also, if the signal was delivered to a shared queue and consumed
2333 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2334
57380f4e
DJ
2335static void
2336maybe_clear_ignore_sigint (struct lwp_info *lp)
2337{
2338 if (!lp->ignore_sigint)
2339 return;
2340
dfd4cc63 2341 if (!linux_nat_has_pending_sigint (ptid_get_lwp (lp->ptid)))
57380f4e
DJ
2342 {
2343 if (debug_linux_nat)
2344 fprintf_unfiltered (gdb_stdlog,
2345 "MCIS: Clearing bogus flag for %s\n",
2346 target_pid_to_str (lp->ptid));
2347 lp->ignore_sigint = 0;
2348 }
2349}
2350
ebec9a0f
PA
2351/* Fetch the possible triggered data watchpoint info and store it in
2352 LP.
2353
2354 On some archs, like x86, that use debug registers to set
2355 watchpoints, it's possible that the way to know which watched
2356 address trapped, is to check the register that is used to select
2357 which address to watch. Problem is, between setting the watchpoint
2358 and reading back which data address trapped, the user may change
2359 the set of watchpoints, and, as a consequence, GDB changes the
2360 debug registers in the inferior. To avoid reading back a stale
2361 stopped-data-address when that happens, we cache in LP the fact
2362 that a watchpoint trapped, and the corresponding data address, as
2363 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2364 registers meanwhile, we have the cached data we can rely on. */
2365
9c02b525
PA
2366static int
2367check_stopped_by_watchpoint (struct lwp_info *lp)
ebec9a0f
PA
2368{
2369 struct cleanup *old_chain;
2370
2371 if (linux_ops->to_stopped_by_watchpoint == NULL)
9c02b525 2372 return 0;
ebec9a0f
PA
2373
2374 old_chain = save_inferior_ptid ();
2375 inferior_ptid = lp->ptid;
2376
9c02b525 2377 if (linux_ops->to_stopped_by_watchpoint (linux_ops))
ebec9a0f 2378 {
9c02b525
PA
2379 lp->stop_reason = LWP_STOPPED_BY_WATCHPOINT;
2380
ebec9a0f
PA
2381 if (linux_ops->to_stopped_data_address != NULL)
2382 lp->stopped_data_address_p =
2383 linux_ops->to_stopped_data_address (&current_target,
2384 &lp->stopped_data_address);
2385 else
2386 lp->stopped_data_address_p = 0;
2387 }
2388
2389 do_cleanups (old_chain);
9c02b525
PA
2390
2391 return lp->stop_reason == LWP_STOPPED_BY_WATCHPOINT;
2392}
2393
2394/* Called when the LWP stopped for a trap that could be explained by a
2395 watchpoint or a breakpoint. */
2396
2397static void
2398save_sigtrap (struct lwp_info *lp)
2399{
2400 gdb_assert (lp->stop_reason == LWP_STOPPED_BY_NO_REASON);
2401 gdb_assert (lp->status != 0);
2402
2403 if (check_stopped_by_watchpoint (lp))
2404 return;
2405
2406 if (linux_nat_status_is_event (lp->status))
2407 check_stopped_by_breakpoint (lp);
ebec9a0f
PA
2408}
2409
9c02b525 2410/* Returns true if the LWP had stopped for a watchpoint. */
ebec9a0f
PA
2411
2412static int
6a109b6b 2413linux_nat_stopped_by_watchpoint (struct target_ops *ops)
ebec9a0f
PA
2414{
2415 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2416
2417 gdb_assert (lp != NULL);
2418
9c02b525 2419 return lp->stop_reason == LWP_STOPPED_BY_WATCHPOINT;
ebec9a0f
PA
2420}
2421
2422static int
2423linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2424{
2425 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2426
2427 gdb_assert (lp != NULL);
2428
2429 *addr_p = lp->stopped_data_address;
2430
2431 return lp->stopped_data_address_p;
2432}
2433
26ab7092
JK
2434/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2435
2436static int
2437sigtrap_is_event (int status)
2438{
2439 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2440}
2441
26ab7092
JK
2442/* Set alternative SIGTRAP-like events recognizer. If
2443 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2444 applied. */
2445
2446void
2447linux_nat_set_status_is_event (struct target_ops *t,
2448 int (*status_is_event) (int status))
2449{
2450 linux_nat_status_is_event = status_is_event;
2451}
2452
57380f4e
DJ
2453/* Wait until LP is stopped. */
2454
2455static int
2456stop_wait_callback (struct lwp_info *lp, void *data)
2457{
c9657e70 2458 struct inferior *inf = find_inferior_ptid (lp->ptid);
6c95b8df
PA
2459
2460 /* If this is a vfork parent, bail out, it is not going to report
2461 any SIGSTOP until the vfork is done with. */
2462 if (inf->vfork_child != NULL)
2463 return 0;
2464
d6b0e80f
AC
2465 if (!lp->stopped)
2466 {
2467 int status;
2468
2469 status = wait_lwp (lp);
2470 if (status == 0)
2471 return 0;
2472
57380f4e
DJ
2473 if (lp->ignore_sigint && WIFSTOPPED (status)
2474 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2475 {
57380f4e 2476 lp->ignore_sigint = 0;
d6b0e80f
AC
2477
2478 errno = 0;
dfd4cc63 2479 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
8817a6f2 2480 lp->stopped = 0;
d6b0e80f
AC
2481 if (debug_linux_nat)
2482 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2483 "PTRACE_CONT %s, 0, 0 (%s) "
2484 "(discarding SIGINT)\n",
d6b0e80f
AC
2485 target_pid_to_str (lp->ptid),
2486 errno ? safe_strerror (errno) : "OK");
2487
57380f4e 2488 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2489 }
2490
57380f4e
DJ
2491 maybe_clear_ignore_sigint (lp);
2492
d6b0e80f
AC
2493 if (WSTOPSIG (status) != SIGSTOP)
2494 {
e5ef252a 2495 /* The thread was stopped with a signal other than SIGSTOP. */
7feb7d06 2496
e5ef252a
PA
2497 if (debug_linux_nat)
2498 fprintf_unfiltered (gdb_stdlog,
2499 "SWC: Pending event %s in %s\n",
2500 status_to_str ((int) status),
2501 target_pid_to_str (lp->ptid));
2502
2503 /* Save the sigtrap event. */
2504 lp->status = status;
e5ef252a 2505 gdb_assert (lp->signalled);
9c02b525 2506 save_sigtrap (lp);
d6b0e80f
AC
2507 }
2508 else
2509 {
2510 /* We caught the SIGSTOP that we intended to catch, so
2511 there's no SIGSTOP pending. */
e5ef252a
PA
2512
2513 if (debug_linux_nat)
2514 fprintf_unfiltered (gdb_stdlog,
2515 "SWC: Delayed SIGSTOP caught for %s.\n",
2516 target_pid_to_str (lp->ptid));
2517
e5ef252a
PA
2518 /* Reset SIGNALLED only after the stop_wait_callback call
2519 above as it does gdb_assert on SIGNALLED. */
d6b0e80f
AC
2520 lp->signalled = 0;
2521 }
2522 }
2523
2524 return 0;
2525}
2526
9c02b525
PA
2527/* Return non-zero if LP has a wait status pending. Discard the
2528 pending event and resume the LWP if the event that originally
2529 caused the stop became uninteresting. */
d6b0e80f
AC
2530
2531static int
2532status_callback (struct lwp_info *lp, void *data)
2533{
2534 /* Only report a pending wait status if we pretend that this has
2535 indeed been resumed. */
ca2163eb
PA
2536 if (!lp->resumed)
2537 return 0;
2538
9c02b525
PA
2539 if (lp->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT
2540 || lp->stop_reason == LWP_STOPPED_BY_HW_BREAKPOINT)
2541 {
2542 struct regcache *regcache = get_thread_regcache (lp->ptid);
2543 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2544 CORE_ADDR pc;
2545 int discard = 0;
2546
2547 gdb_assert (lp->status != 0);
2548
2549 pc = regcache_read_pc (regcache);
2550
2551 if (pc != lp->stop_pc)
2552 {
2553 if (debug_linux_nat)
2554 fprintf_unfiltered (gdb_stdlog,
2555 "SC: PC of %s changed. was=%s, now=%s\n",
2556 target_pid_to_str (lp->ptid),
2557 paddress (target_gdbarch (), lp->stop_pc),
2558 paddress (target_gdbarch (), pc));
2559 discard = 1;
2560 }
2561 else if (!breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
2562 {
2563 if (debug_linux_nat)
2564 fprintf_unfiltered (gdb_stdlog,
2565 "SC: previous breakpoint of %s, at %s gone\n",
2566 target_pid_to_str (lp->ptid),
2567 paddress (target_gdbarch (), lp->stop_pc));
2568
2569 discard = 1;
2570 }
2571
2572 if (discard)
2573 {
2574 if (debug_linux_nat)
2575 fprintf_unfiltered (gdb_stdlog,
2576 "SC: pending event of %s cancelled.\n",
2577 target_pid_to_str (lp->ptid));
2578
2579 lp->status = 0;
2580 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2581 return 0;
2582 }
2583 return 1;
2584 }
2585
8a99810d 2586 return lwp_status_pending_p (lp);
d6b0e80f
AC
2587}
2588
2589/* Return non-zero if LP isn't stopped. */
2590
2591static int
2592running_callback (struct lwp_info *lp, void *data)
2593{
25289eb2 2594 return (!lp->stopped
8a99810d 2595 || (lwp_status_pending_p (lp) && lp->resumed));
d6b0e80f
AC
2596}
2597
2598/* Count the LWP's that have had events. */
2599
2600static int
2601count_events_callback (struct lwp_info *lp, void *data)
2602{
2603 int *count = data;
2604
2605 gdb_assert (count != NULL);
2606
9c02b525
PA
2607 /* Select only resumed LWPs that have an event pending. */
2608 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2609 (*count)++;
2610
2611 return 0;
2612}
2613
2614/* Select the LWP (if any) that is currently being single-stepped. */
2615
2616static int
2617select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2618{
25289eb2
PA
2619 if (lp->last_resume_kind == resume_step
2620 && lp->status != 0)
d6b0e80f
AC
2621 return 1;
2622 else
2623 return 0;
2624}
2625
8a99810d
PA
2626/* Returns true if LP has a status pending. */
2627
2628static int
2629lwp_status_pending_p (struct lwp_info *lp)
2630{
2631 /* We check for lp->waitstatus in addition to lp->status, because we
2632 can have pending process exits recorded in lp->status and
2633 W_EXITCODE(0,0) happens to be 0. */
2634 return lp->status != 0 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE;
2635}
2636
d6b0e80f
AC
2637/* Select the Nth LWP that has had a SIGTRAP event. */
2638
2639static int
2640select_event_lwp_callback (struct lwp_info *lp, void *data)
2641{
2642 int *selector = data;
2643
2644 gdb_assert (selector != NULL);
2645
9c02b525
PA
2646 /* Select only resumed LWPs that have an event pending. */
2647 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2648 if ((*selector)-- == 0)
2649 return 1;
2650
2651 return 0;
2652}
2653
9c02b525
PA
2654/* Called when the LWP got a signal/trap that could be explained by a
2655 software or hardware breakpoint. */
2656
710151dd 2657static int
9c02b525 2658check_stopped_by_breakpoint (struct lwp_info *lp)
710151dd
PA
2659{
2660 /* Arrange for a breakpoint to be hit again later. We don't keep
2661 the SIGTRAP status and don't forward the SIGTRAP signal to the
2662 LWP. We will handle the current event, eventually we will resume
2663 this LWP, and this breakpoint will trap again.
2664
2665 If we do not do this, then we run the risk that the user will
2666 delete or disable the breakpoint, but the LWP will have already
2667 tripped on it. */
2668
515630c5
UW
2669 struct regcache *regcache = get_thread_regcache (lp->ptid);
2670 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2671 CORE_ADDR pc;
9c02b525
PA
2672 CORE_ADDR sw_bp_pc;
2673
2674 pc = regcache_read_pc (regcache);
2675 sw_bp_pc = pc - target_decr_pc_after_break (gdbarch);
515630c5 2676
9c02b525
PA
2677 if ((!lp->step || lp->stop_pc == sw_bp_pc)
2678 && software_breakpoint_inserted_here_p (get_regcache_aspace (regcache),
2679 sw_bp_pc))
710151dd 2680 {
9c02b525
PA
2681 /* The LWP was either continued, or stepped a software
2682 breakpoint instruction. */
710151dd
PA
2683 if (debug_linux_nat)
2684 fprintf_unfiltered (gdb_stdlog,
9c02b525 2685 "CB: Push back software breakpoint for %s\n",
710151dd
PA
2686 target_pid_to_str (lp->ptid));
2687
2688 /* Back up the PC if necessary. */
9c02b525
PA
2689 if (pc != sw_bp_pc)
2690 regcache_write_pc (regcache, sw_bp_pc);
515630c5 2691
9c02b525
PA
2692 lp->stop_pc = sw_bp_pc;
2693 lp->stop_reason = LWP_STOPPED_BY_SW_BREAKPOINT;
710151dd
PA
2694 return 1;
2695 }
710151dd 2696
9c02b525
PA
2697 if (hardware_breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
2698 {
2699 if (debug_linux_nat)
2700 fprintf_unfiltered (gdb_stdlog,
2701 "CB: Push back hardware breakpoint for %s\n",
2702 target_pid_to_str (lp->ptid));
d6b0e80f 2703
9c02b525
PA
2704 lp->stop_pc = pc;
2705 lp->stop_reason = LWP_STOPPED_BY_HW_BREAKPOINT;
2706 return 1;
2707 }
d6b0e80f
AC
2708
2709 return 0;
2710}
2711
2712/* Select one LWP out of those that have events pending. */
2713
2714static void
d90e17a7 2715select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
2716{
2717 int num_events = 0;
2718 int random_selector;
9c02b525 2719 struct lwp_info *event_lp = NULL;
d6b0e80f 2720
ac264b3b 2721 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2722 (*orig_lp)->status = *status;
2723
9c02b525
PA
2724 /* In all-stop, give preference to the LWP that is being
2725 single-stepped. There will be at most one, and it will be the
2726 LWP that the core is most interested in. If we didn't do this,
2727 then we'd have to handle pending step SIGTRAPs somehow in case
2728 the core later continues the previously-stepped thread, as
2729 otherwise we'd report the pending SIGTRAP then, and the core, not
2730 having stepped the thread, wouldn't understand what the trap was
2731 for, and therefore would report it to the user as a random
2732 signal. */
2733 if (!non_stop)
d6b0e80f 2734 {
9c02b525
PA
2735 event_lp = iterate_over_lwps (filter,
2736 select_singlestep_lwp_callback, NULL);
2737 if (event_lp != NULL)
2738 {
2739 if (debug_linux_nat)
2740 fprintf_unfiltered (gdb_stdlog,
2741 "SEL: Select single-step %s\n",
2742 target_pid_to_str (event_lp->ptid));
2743 }
d6b0e80f 2744 }
9c02b525
PA
2745
2746 if (event_lp == NULL)
d6b0e80f 2747 {
9c02b525 2748 /* Pick one at random, out of those which have had events. */
d6b0e80f 2749
9c02b525 2750 /* First see how many events we have. */
d90e17a7 2751 iterate_over_lwps (filter, count_events_callback, &num_events);
d6b0e80f 2752
9c02b525
PA
2753 /* Now randomly pick a LWP out of those that have had
2754 events. */
d6b0e80f
AC
2755 random_selector = (int)
2756 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2757
2758 if (debug_linux_nat && num_events > 1)
2759 fprintf_unfiltered (gdb_stdlog,
9c02b525 2760 "SEL: Found %d events, selecting #%d\n",
d6b0e80f
AC
2761 num_events, random_selector);
2762
d90e17a7
PA
2763 event_lp = iterate_over_lwps (filter,
2764 select_event_lwp_callback,
d6b0e80f
AC
2765 &random_selector);
2766 }
2767
2768 if (event_lp != NULL)
2769 {
2770 /* Switch the event LWP. */
2771 *orig_lp = event_lp;
2772 *status = event_lp->status;
2773 }
2774
2775 /* Flush the wait status for the event LWP. */
2776 (*orig_lp)->status = 0;
2777}
2778
2779/* Return non-zero if LP has been resumed. */
2780
2781static int
2782resumed_callback (struct lwp_info *lp, void *data)
2783{
2784 return lp->resumed;
2785}
2786
12d9289a
PA
2787/* Stop an active thread, verify it still exists, then resume it. If
2788 the thread ends up with a pending status, then it is not resumed,
2789 and *DATA (really a pointer to int), is set. */
d6b0e80f
AC
2790
2791static int
2792stop_and_resume_callback (struct lwp_info *lp, void *data)
2793{
25289eb2 2794 if (!lp->stopped)
d6b0e80f 2795 {
25289eb2
PA
2796 ptid_t ptid = lp->ptid;
2797
d6b0e80f
AC
2798 stop_callback (lp, NULL);
2799 stop_wait_callback (lp, NULL);
25289eb2
PA
2800
2801 /* Resume if the lwp still exists, and the core wanted it
2802 running. */
12d9289a
PA
2803 lp = find_lwp_pid (ptid);
2804 if (lp != NULL)
25289eb2 2805 {
12d9289a 2806 if (lp->last_resume_kind == resume_stop
8a99810d 2807 && !lwp_status_pending_p (lp))
12d9289a
PA
2808 {
2809 /* The core wanted the LWP to stop. Even if it stopped
2810 cleanly (with SIGSTOP), leave the event pending. */
2811 if (debug_linux_nat)
2812 fprintf_unfiltered (gdb_stdlog,
2813 "SARC: core wanted LWP %ld stopped "
2814 "(leaving SIGSTOP pending)\n",
dfd4cc63 2815 ptid_get_lwp (lp->ptid));
12d9289a
PA
2816 lp->status = W_STOPCODE (SIGSTOP);
2817 }
2818
8a99810d 2819 if (!lwp_status_pending_p (lp))
12d9289a
PA
2820 {
2821 if (debug_linux_nat)
2822 fprintf_unfiltered (gdb_stdlog,
2823 "SARC: re-resuming LWP %ld\n",
dfd4cc63 2824 ptid_get_lwp (lp->ptid));
e5ef252a 2825 resume_lwp (lp, lp->step, GDB_SIGNAL_0);
12d9289a
PA
2826 }
2827 else
2828 {
2829 if (debug_linux_nat)
2830 fprintf_unfiltered (gdb_stdlog,
2831 "SARC: not re-resuming LWP %ld "
2832 "(has pending)\n",
dfd4cc63 2833 ptid_get_lwp (lp->ptid));
12d9289a 2834 }
25289eb2 2835 }
d6b0e80f
AC
2836 }
2837 return 0;
2838}
2839
02f3fc28 2840/* Check if we should go on and pass this event to common code.
9c02b525 2841 Return the affected lwp if we are, or NULL otherwise. */
12d9289a 2842
02f3fc28 2843static struct lwp_info *
9c02b525 2844linux_nat_filter_event (int lwpid, int status)
02f3fc28
PA
2845{
2846 struct lwp_info *lp;
89a5711c 2847 int event = linux_ptrace_get_extended_event (status);
02f3fc28
PA
2848
2849 lp = find_lwp_pid (pid_to_ptid (lwpid));
2850
2851 /* Check for stop events reported by a process we didn't already
2852 know about - anything not already in our LWP list.
2853
2854 If we're expecting to receive stopped processes after
2855 fork, vfork, and clone events, then we'll just add the
2856 new one to our list and go back to waiting for the event
2857 to be reported - the stopped process might be returned
0e5bf2a8
PA
2858 from waitpid before or after the event is.
2859
2860 But note the case of a non-leader thread exec'ing after the
2861 leader having exited, and gone from our lists. The non-leader
2862 thread changes its tid to the tgid. */
2863
2864 if (WIFSTOPPED (status) && lp == NULL
89a5711c 2865 && (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC))
0e5bf2a8
PA
2866 {
2867 /* A multi-thread exec after we had seen the leader exiting. */
2868 if (debug_linux_nat)
2869 fprintf_unfiltered (gdb_stdlog,
2870 "LLW: Re-adding thread group leader LWP %d.\n",
2871 lwpid);
2872
dfd4cc63 2873 lp = add_lwp (ptid_build (lwpid, lwpid, 0));
0e5bf2a8
PA
2874 lp->stopped = 1;
2875 lp->resumed = 1;
2876 add_thread (lp->ptid);
2877 }
2878
02f3fc28
PA
2879 if (WIFSTOPPED (status) && !lp)
2880 {
3b27ef47
PA
2881 if (debug_linux_nat)
2882 fprintf_unfiltered (gdb_stdlog,
2883 "LHEW: saving LWP %ld status %s in stopped_pids list\n",
2884 (long) lwpid, status_to_str (status));
84636d28 2885 add_to_pid_list (&stopped_pids, lwpid, status);
02f3fc28
PA
2886 return NULL;
2887 }
2888
2889 /* Make sure we don't report an event for the exit of an LWP not in
1777feb0 2890 our list, i.e. not part of the current process. This can happen
fd62cb89 2891 if we detach from a program we originally forked and then it
02f3fc28
PA
2892 exits. */
2893 if (!WIFSTOPPED (status) && !lp)
2894 return NULL;
2895
8817a6f2
PA
2896 /* This LWP is stopped now. (And if dead, this prevents it from
2897 ever being continued.) */
2898 lp->stopped = 1;
2899
8784d563
PA
2900 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
2901 {
2902 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
2903
2904 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), inf->attach_flag);
2905 lp->must_set_ptrace_flags = 0;
2906 }
2907
ca2163eb
PA
2908 /* Handle GNU/Linux's syscall SIGTRAPs. */
2909 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2910 {
2911 /* No longer need the sysgood bit. The ptrace event ends up
2912 recorded in lp->waitstatus if we care for it. We can carry
2913 on handling the event like a regular SIGTRAP from here
2914 on. */
2915 status = W_STOPCODE (SIGTRAP);
2916 if (linux_handle_syscall_trap (lp, 0))
2917 return NULL;
2918 }
02f3fc28 2919
ca2163eb 2920 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2921 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2922 && linux_is_extended_waitstatus (status))
02f3fc28
PA
2923 {
2924 if (debug_linux_nat)
2925 fprintf_unfiltered (gdb_stdlog,
2926 "LLW: Handling extended status 0x%06x\n",
2927 status);
2928 if (linux_handle_extended_wait (lp, status, 0))
2929 return NULL;
2930 }
2931
2932 /* Check if the thread has exited. */
9c02b525
PA
2933 if (WIFEXITED (status) || WIFSIGNALED (status))
2934 {
2935 if (num_lwps (ptid_get_pid (lp->ptid)) > 1)
02f3fc28 2936 {
9c02b525
PA
2937 /* If this is the main thread, we must stop all threads and
2938 verify if they are still alive. This is because in the
2939 nptl thread model on Linux 2.4, there is no signal issued
2940 for exiting LWPs other than the main thread. We only get
2941 the main thread exit signal once all child threads have
2942 already exited. If we stop all the threads and use the
2943 stop_wait_callback to check if they have exited we can
2944 determine whether this signal should be ignored or
2945 whether it means the end of the debugged application,
2946 regardless of which threading model is being used. */
2947 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid))
2948 {
2949 iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)),
2950 stop_and_resume_callback, NULL);
2951 }
2952
2953 if (debug_linux_nat)
2954 fprintf_unfiltered (gdb_stdlog,
2955 "LLW: %s exited.\n",
2956 target_pid_to_str (lp->ptid));
2957
2958 if (num_lwps (ptid_get_pid (lp->ptid)) > 1)
2959 {
2960 /* If there is at least one more LWP, then the exit signal
2961 was not the end of the debugged application and should be
2962 ignored. */
2963 exit_lwp (lp);
2964 return NULL;
2965 }
02f3fc28
PA
2966 }
2967
9c02b525
PA
2968 gdb_assert (lp->resumed);
2969
02f3fc28
PA
2970 if (debug_linux_nat)
2971 fprintf_unfiltered (gdb_stdlog,
9c02b525
PA
2972 "Process %ld exited\n",
2973 ptid_get_lwp (lp->ptid));
02f3fc28 2974
9c02b525
PA
2975 /* This was the last lwp in the process. Since events are
2976 serialized to GDB core, we may not be able report this one
2977 right now, but GDB core and the other target layers will want
2978 to be notified about the exit code/signal, leave the status
2979 pending for the next time we're able to report it. */
2980
2981 /* Dead LWP's aren't expected to reported a pending sigstop. */
2982 lp->signalled = 0;
2983
2984 /* Store the pending event in the waitstatus, because
2985 W_EXITCODE(0,0) == 0. */
2986 store_waitstatus (&lp->waitstatus, status);
2987 return lp;
02f3fc28
PA
2988 }
2989
2990 /* Check if the current LWP has previously exited. In the nptl
2991 thread model, LWPs other than the main thread do not issue
2992 signals when they exit so we must check whenever the thread has
2993 stopped. A similar check is made in stop_wait_callback(). */
dfd4cc63 2994 if (num_lwps (ptid_get_pid (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
02f3fc28 2995 {
dfd4cc63 2996 ptid_t ptid = pid_to_ptid (ptid_get_pid (lp->ptid));
d90e17a7 2997
02f3fc28
PA
2998 if (debug_linux_nat)
2999 fprintf_unfiltered (gdb_stdlog,
3000 "LLW: %s exited.\n",
3001 target_pid_to_str (lp->ptid));
3002
3003 exit_lwp (lp);
3004
3005 /* Make sure there is at least one thread running. */
d90e17a7 3006 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
02f3fc28
PA
3007
3008 /* Discard the event. */
3009 return NULL;
3010 }
3011
3012 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3013 an attempt to stop an LWP. */
3014 if (lp->signalled
3015 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3016 {
3017 if (debug_linux_nat)
3018 fprintf_unfiltered (gdb_stdlog,
3019 "LLW: Delayed SIGSTOP caught for %s.\n",
3020 target_pid_to_str (lp->ptid));
3021
02f3fc28
PA
3022 lp->signalled = 0;
3023
25289eb2
PA
3024 if (lp->last_resume_kind != resume_stop)
3025 {
3026 /* This is a delayed SIGSTOP. */
02f3fc28 3027
8a99810d 3028 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
25289eb2
PA
3029 if (debug_linux_nat)
3030 fprintf_unfiltered (gdb_stdlog,
3031 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3032 lp->step ?
3033 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3034 target_pid_to_str (lp->ptid));
02f3fc28 3035
25289eb2 3036 gdb_assert (lp->resumed);
02f3fc28 3037
25289eb2
PA
3038 /* Discard the event. */
3039 return NULL;
3040 }
02f3fc28
PA
3041 }
3042
57380f4e
DJ
3043 /* Make sure we don't report a SIGINT that we have already displayed
3044 for another thread. */
3045 if (lp->ignore_sigint
3046 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3047 {
3048 if (debug_linux_nat)
3049 fprintf_unfiltered (gdb_stdlog,
3050 "LLW: Delayed SIGINT caught for %s.\n",
3051 target_pid_to_str (lp->ptid));
3052
3053 /* This is a delayed SIGINT. */
3054 lp->ignore_sigint = 0;
3055
8a99810d 3056 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
57380f4e
DJ
3057 if (debug_linux_nat)
3058 fprintf_unfiltered (gdb_stdlog,
3059 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3060 lp->step ?
3061 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3062 target_pid_to_str (lp->ptid));
57380f4e
DJ
3063 gdb_assert (lp->resumed);
3064
3065 /* Discard the event. */
3066 return NULL;
3067 }
3068
9c02b525
PA
3069 /* Don't report signals that GDB isn't interested in, such as
3070 signals that are neither printed nor stopped upon. Stopping all
3071 threads can be a bit time-consuming so if we want decent
3072 performance with heavily multi-threaded programs, especially when
3073 they're using a high frequency timer, we'd better avoid it if we
3074 can. */
3075 if (WIFSTOPPED (status))
3076 {
3077 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3078
3079 if (!non_stop)
3080 {
3081 /* Only do the below in all-stop, as we currently use SIGSTOP
3082 to implement target_stop (see linux_nat_stop) in
3083 non-stop. */
3084 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3085 {
3086 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3087 forwarded to the entire process group, that is, all LWPs
3088 will receive it - unless they're using CLONE_THREAD to
3089 share signals. Since we only want to report it once, we
3090 mark it as ignored for all LWPs except this one. */
3091 iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)),
3092 set_ignore_sigint, NULL);
3093 lp->ignore_sigint = 0;
3094 }
3095 else
3096 maybe_clear_ignore_sigint (lp);
3097 }
3098
3099 /* When using hardware single-step, we need to report every signal.
c9587f88
AT
3100 Otherwise, signals in pass_mask may be short-circuited
3101 except signals that might be caused by a breakpoint. */
9c02b525 3102 if (!lp->step
c9587f88
AT
3103 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))
3104 && !linux_wstatus_maybe_breakpoint (status))
9c02b525
PA
3105 {
3106 linux_resume_one_lwp (lp, lp->step, signo);
3107 if (debug_linux_nat)
3108 fprintf_unfiltered (gdb_stdlog,
3109 "LLW: %s %s, %s (preempt 'handle')\n",
3110 lp->step ?
3111 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3112 target_pid_to_str (lp->ptid),
3113 (signo != GDB_SIGNAL_0
3114 ? strsignal (gdb_signal_to_host (signo))
3115 : "0"));
3116 return NULL;
3117 }
3118 }
3119
02f3fc28
PA
3120 /* An interesting event. */
3121 gdb_assert (lp);
ca2163eb 3122 lp->status = status;
9c02b525 3123 save_sigtrap (lp);
02f3fc28
PA
3124 return lp;
3125}
3126
0e5bf2a8
PA
3127/* Detect zombie thread group leaders, and "exit" them. We can't reap
3128 their exits until all other threads in the group have exited. */
3129
3130static void
3131check_zombie_leaders (void)
3132{
3133 struct inferior *inf;
3134
3135 ALL_INFERIORS (inf)
3136 {
3137 struct lwp_info *leader_lp;
3138
3139 if (inf->pid == 0)
3140 continue;
3141
3142 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3143 if (leader_lp != NULL
3144 /* Check if there are other threads in the group, as we may
3145 have raced with the inferior simply exiting. */
3146 && num_lwps (inf->pid) > 1
5f572dec 3147 && linux_proc_pid_is_zombie (inf->pid))
0e5bf2a8
PA
3148 {
3149 if (debug_linux_nat)
3150 fprintf_unfiltered (gdb_stdlog,
3151 "CZL: Thread group leader %d zombie "
3152 "(it exited, or another thread execd).\n",
3153 inf->pid);
3154
3155 /* A leader zombie can mean one of two things:
3156
3157 - It exited, and there's an exit status pending
3158 available, or only the leader exited (not the whole
3159 program). In the latter case, we can't waitpid the
3160 leader's exit status until all other threads are gone.
3161
3162 - There are 3 or more threads in the group, and a thread
3163 other than the leader exec'd. On an exec, the Linux
3164 kernel destroys all other threads (except the execing
3165 one) in the thread group, and resets the execing thread's
3166 tid to the tgid. No exit notification is sent for the
3167 execing thread -- from the ptracer's perspective, it
3168 appears as though the execing thread just vanishes.
3169 Until we reap all other threads except the leader and the
3170 execing thread, the leader will be zombie, and the
3171 execing thread will be in `D (disc sleep)'. As soon as
3172 all other threads are reaped, the execing thread changes
3173 it's tid to the tgid, and the previous (zombie) leader
3174 vanishes, giving place to the "new" leader. We could try
3175 distinguishing the exit and exec cases, by waiting once
3176 more, and seeing if something comes out, but it doesn't
3177 sound useful. The previous leader _does_ go away, and
3178 we'll re-add the new one once we see the exec event
3179 (which is just the same as what would happen if the
3180 previous leader did exit voluntarily before some other
3181 thread execs). */
3182
3183 if (debug_linux_nat)
3184 fprintf_unfiltered (gdb_stdlog,
3185 "CZL: Thread group leader %d vanished.\n",
3186 inf->pid);
3187 exit_lwp (leader_lp);
3188 }
3189 }
3190}
3191
d6b0e80f 3192static ptid_t
7feb7d06 3193linux_nat_wait_1 (struct target_ops *ops,
47608cb1
PA
3194 ptid_t ptid, struct target_waitstatus *ourstatus,
3195 int target_options)
d6b0e80f 3196{
fc9b8e47 3197 sigset_t prev_mask;
4b60df3d 3198 enum resume_kind last_resume_kind;
12d9289a 3199 struct lwp_info *lp;
12d9289a 3200 int status;
d6b0e80f 3201
01124a23 3202 if (debug_linux_nat)
b84876c2
PA
3203 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3204
f973ed9c
DJ
3205 /* The first time we get here after starting a new inferior, we may
3206 not have added it to the LWP list yet - this is the earliest
3207 moment at which we know its PID. */
d90e17a7 3208 if (ptid_is_pid (inferior_ptid))
f973ed9c 3209 {
27c9d204
PA
3210 /* Upgrade the main thread's ptid. */
3211 thread_change_ptid (inferior_ptid,
dfd4cc63
LM
3212 ptid_build (ptid_get_pid (inferior_ptid),
3213 ptid_get_pid (inferior_ptid), 0));
27c9d204 3214
26cb8b7c 3215 lp = add_initial_lwp (inferior_ptid);
f973ed9c
DJ
3216 lp->resumed = 1;
3217 }
3218
12696c10 3219 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
7feb7d06 3220 block_child_signals (&prev_mask);
d6b0e80f 3221
d6b0e80f 3222 /* First check if there is a LWP with a wait status pending. */
8a99810d
PA
3223 lp = iterate_over_lwps (ptid, status_callback, NULL);
3224 if (lp != NULL)
d6b0e80f
AC
3225 {
3226 if (debug_linux_nat)
d6b0e80f
AC
3227 fprintf_unfiltered (gdb_stdlog,
3228 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3229 status_to_str (lp->status),
d6b0e80f 3230 target_pid_to_str (lp->ptid));
d6b0e80f
AC
3231 }
3232
d9d41e78 3233 if (!target_is_async_p ())
b84876c2
PA
3234 {
3235 /* Causes SIGINT to be passed on to the attached process. */
3236 set_sigint_trap ();
b84876c2 3237 }
d6b0e80f 3238
9c02b525
PA
3239 /* But if we don't find a pending event, we'll have to wait. Always
3240 pull all events out of the kernel. We'll randomly select an
3241 event LWP out of all that have events, to prevent starvation. */
7feb7d06 3242
d90e17a7 3243 while (lp == NULL)
d6b0e80f
AC
3244 {
3245 pid_t lwpid;
3246
0e5bf2a8
PA
3247 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3248 quirks:
3249
3250 - If the thread group leader exits while other threads in the
3251 thread group still exist, waitpid(TGID, ...) hangs. That
3252 waitpid won't return an exit status until the other threads
3253 in the group are reapped.
3254
3255 - When a non-leader thread execs, that thread just vanishes
3256 without reporting an exit (so we'd hang if we waited for it
3257 explicitly in that case). The exec event is reported to
3258 the TGID pid. */
3259
3260 errno = 0;
3261 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3262 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3263 lwpid = my_waitpid (-1, &status, WNOHANG);
3264
3265 if (debug_linux_nat)
3266 fprintf_unfiltered (gdb_stdlog,
3267 "LNW: waitpid(-1, ...) returned %d, %s\n",
3268 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
b84876c2 3269
d6b0e80f
AC
3270 if (lwpid > 0)
3271 {
d6b0e80f
AC
3272 if (debug_linux_nat)
3273 {
3274 fprintf_unfiltered (gdb_stdlog,
3275 "LLW: waitpid %ld received %s\n",
3276 (long) lwpid, status_to_str (status));
3277 }
3278
9c02b525 3279 linux_nat_filter_event (lwpid, status);
0e5bf2a8
PA
3280 /* Retry until nothing comes out of waitpid. A single
3281 SIGCHLD can indicate more than one child stopped. */
3282 continue;
d6b0e80f
AC
3283 }
3284
20ba1ce6
PA
3285 /* Now that we've pulled all events out of the kernel, resume
3286 LWPs that don't have an interesting event to report. */
3287 iterate_over_lwps (minus_one_ptid,
3288 resume_stopped_resumed_lwps, &minus_one_ptid);
3289
3290 /* ... and find an LWP with a status to report to the core, if
3291 any. */
9c02b525
PA
3292 lp = iterate_over_lwps (ptid, status_callback, NULL);
3293 if (lp != NULL)
3294 break;
3295
0e5bf2a8
PA
3296 /* Check for zombie thread group leaders. Those can't be reaped
3297 until all other threads in the thread group are. */
3298 check_zombie_leaders ();
d6b0e80f 3299
0e5bf2a8
PA
3300 /* If there are no resumed children left, bail. We'd be stuck
3301 forever in the sigsuspend call below otherwise. */
3302 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3303 {
3304 if (debug_linux_nat)
3305 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
b84876c2 3306
0e5bf2a8 3307 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
b84876c2 3308
d9d41e78 3309 if (!target_is_async_p ())
0e5bf2a8 3310 clear_sigint_trap ();
b84876c2 3311
0e5bf2a8
PA
3312 restore_child_signals_mask (&prev_mask);
3313 return minus_one_ptid;
d6b0e80f 3314 }
28736962 3315
0e5bf2a8
PA
3316 /* No interesting event to report to the core. */
3317
3318 if (target_options & TARGET_WNOHANG)
3319 {
01124a23 3320 if (debug_linux_nat)
28736962
PA
3321 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3322
0e5bf2a8 3323 ourstatus->kind = TARGET_WAITKIND_IGNORE;
28736962
PA
3324 restore_child_signals_mask (&prev_mask);
3325 return minus_one_ptid;
3326 }
d6b0e80f
AC
3327
3328 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3329 gdb_assert (lp == NULL);
0e5bf2a8
PA
3330
3331 /* Block until we get an event reported with SIGCHLD. */
d36bf488
DE
3332 if (debug_linux_nat)
3333 fprintf_unfiltered (gdb_stdlog, "LNW: about to sigsuspend\n");
0e5bf2a8 3334 sigsuspend (&suspend_mask);
d6b0e80f
AC
3335 }
3336
d9d41e78 3337 if (!target_is_async_p ())
d26b5354 3338 clear_sigint_trap ();
d6b0e80f
AC
3339
3340 gdb_assert (lp);
3341
ca2163eb
PA
3342 status = lp->status;
3343 lp->status = 0;
3344
4c28f408
PA
3345 if (!non_stop)
3346 {
3347 /* Now stop all other LWP's ... */
d90e17a7 3348 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
4c28f408
PA
3349
3350 /* ... and wait until all of them have reported back that
3351 they're no longer running. */
d90e17a7 3352 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
9c02b525
PA
3353 }
3354
3355 /* If we're not waiting for a specific LWP, choose an event LWP from
3356 among those that have had events. Giving equal priority to all
3357 LWPs that have had events helps prevent starvation. */
3358 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3359 select_event_lwp (ptid, &lp, &status);
3360
3361 gdb_assert (lp != NULL);
3362
3363 /* Now that we've selected our final event LWP, un-adjust its PC if
3364 it was a software breakpoint. */
3365 if (lp->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT)
3366 {
3367 struct regcache *regcache = get_thread_regcache (lp->ptid);
3368 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3369 int decr_pc = target_decr_pc_after_break (gdbarch);
4c28f408 3370
9c02b525
PA
3371 if (decr_pc != 0)
3372 {
3373 CORE_ADDR pc;
d6b0e80f 3374
9c02b525
PA
3375 pc = regcache_read_pc (regcache);
3376 regcache_write_pc (regcache, pc + decr_pc);
3377 }
3378 }
e3e9f5a2 3379
9c02b525
PA
3380 /* We'll need this to determine whether to report a SIGSTOP as
3381 GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3382 clears it. */
3383 last_resume_kind = lp->last_resume_kind;
4b60df3d 3384
9c02b525
PA
3385 if (!non_stop)
3386 {
e3e9f5a2
PA
3387 /* In all-stop, from the core's perspective, all LWPs are now
3388 stopped until a new resume action is sent over. */
3389 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3390 }
3391 else
25289eb2 3392 {
4b60df3d 3393 resume_clear_callback (lp, NULL);
25289eb2 3394 }
d6b0e80f 3395
26ab7092 3396 if (linux_nat_status_is_event (status))
d6b0e80f 3397 {
d6b0e80f
AC
3398 if (debug_linux_nat)
3399 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3400 "LLW: trap ptid is %s.\n",
3401 target_pid_to_str (lp->ptid));
d6b0e80f 3402 }
d6b0e80f
AC
3403
3404 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3405 {
3406 *ourstatus = lp->waitstatus;
3407 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3408 }
3409 else
3410 store_waitstatus (ourstatus, status);
3411
01124a23 3412 if (debug_linux_nat)
b84876c2
PA
3413 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3414
7feb7d06 3415 restore_child_signals_mask (&prev_mask);
1e225492 3416
4b60df3d 3417 if (last_resume_kind == resume_stop
25289eb2
PA
3418 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3419 && WSTOPSIG (status) == SIGSTOP)
3420 {
3421 /* A thread that has been requested to stop by GDB with
3422 target_stop, and it stopped cleanly, so report as SIG0. The
3423 use of SIGSTOP is an implementation detail. */
a493e3e2 3424 ourstatus->value.sig = GDB_SIGNAL_0;
25289eb2
PA
3425 }
3426
1e225492
JK
3427 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3428 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3429 lp->core = -1;
3430 else
2e794194 3431 lp->core = linux_common_core_of_thread (lp->ptid);
1e225492 3432
f973ed9c 3433 return lp->ptid;
d6b0e80f
AC
3434}
3435
e3e9f5a2
PA
3436/* Resume LWPs that are currently stopped without any pending status
3437 to report, but are resumed from the core's perspective. */
3438
3439static int
3440resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3441{
3442 ptid_t *wait_ptid_p = data;
3443
3444 if (lp->stopped
3445 && lp->resumed
8a99810d 3446 && !lwp_status_pending_p (lp))
e3e9f5a2 3447 {
336060f3
PA
3448 struct regcache *regcache = get_thread_regcache (lp->ptid);
3449 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3450 CORE_ADDR pc = regcache_read_pc (regcache);
3451
e3e9f5a2
PA
3452 /* Don't bother if there's a breakpoint at PC that we'd hit
3453 immediately, and we're not waiting for this LWP. */
3454 if (!ptid_match (lp->ptid, *wait_ptid_p))
3455 {
e3e9f5a2
PA
3456 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3457 return 0;
3458 }
3459
3460 if (debug_linux_nat)
3461 fprintf_unfiltered (gdb_stdlog,
336060f3
PA
3462 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
3463 target_pid_to_str (lp->ptid),
3464 paddress (gdbarch, pc),
3465 lp->step);
e3e9f5a2 3466
8a99810d 3467 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
e3e9f5a2
PA
3468 }
3469
3470 return 0;
3471}
3472
7feb7d06
PA
3473static ptid_t
3474linux_nat_wait (struct target_ops *ops,
47608cb1
PA
3475 ptid_t ptid, struct target_waitstatus *ourstatus,
3476 int target_options)
7feb7d06
PA
3477{
3478 ptid_t event_ptid;
3479
3480 if (debug_linux_nat)
09826ec5
PA
3481 {
3482 char *options_string;
3483
3484 options_string = target_options_to_string (target_options);
3485 fprintf_unfiltered (gdb_stdlog,
3486 "linux_nat_wait: [%s], [%s]\n",
3487 target_pid_to_str (ptid),
3488 options_string);
3489 xfree (options_string);
3490 }
7feb7d06
PA
3491
3492 /* Flush the async file first. */
d9d41e78 3493 if (target_is_async_p ())
7feb7d06
PA
3494 async_file_flush ();
3495
e3e9f5a2
PA
3496 /* Resume LWPs that are currently stopped without any pending status
3497 to report, but are resumed from the core's perspective. LWPs get
3498 in this state if we find them stopping at a time we're not
3499 interested in reporting the event (target_wait on a
3500 specific_process, for example, see linux_nat_wait_1), and
3501 meanwhile the event became uninteresting. Don't bother resuming
3502 LWPs we're not going to wait for if they'd stop immediately. */
3503 if (non_stop)
3504 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3505
47608cb1 3506 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
7feb7d06
PA
3507
3508 /* If we requested any event, and something came out, assume there
3509 may be more. If we requested a specific lwp or process, also
3510 assume there may be more. */
d9d41e78 3511 if (target_is_async_p ()
6953d224
PA
3512 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3513 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
7feb7d06
PA
3514 || !ptid_equal (ptid, minus_one_ptid)))
3515 async_file_mark ();
3516
7feb7d06
PA
3517 return event_ptid;
3518}
3519
d6b0e80f
AC
3520static int
3521kill_callback (struct lwp_info *lp, void *data)
3522{
ed731959
JK
3523 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3524
3525 errno = 0;
69ff6be5 3526 kill_lwp (ptid_get_lwp (lp->ptid), SIGKILL);
ed731959 3527 if (debug_linux_nat)
57745c90
PA
3528 {
3529 int save_errno = errno;
3530
3531 fprintf_unfiltered (gdb_stdlog,
3532 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
3533 target_pid_to_str (lp->ptid),
3534 save_errno ? safe_strerror (save_errno) : "OK");
3535 }
ed731959
JK
3536
3537 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3538
d6b0e80f 3539 errno = 0;
dfd4cc63 3540 ptrace (PTRACE_KILL, ptid_get_lwp (lp->ptid), 0, 0);
d6b0e80f 3541 if (debug_linux_nat)
57745c90
PA
3542 {
3543 int save_errno = errno;
3544
3545 fprintf_unfiltered (gdb_stdlog,
3546 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3547 target_pid_to_str (lp->ptid),
3548 save_errno ? safe_strerror (save_errno) : "OK");
3549 }
d6b0e80f
AC
3550
3551 return 0;
3552}
3553
3554static int
3555kill_wait_callback (struct lwp_info *lp, void *data)
3556{
3557 pid_t pid;
3558
3559 /* We must make sure that there are no pending events (delayed
3560 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3561 program doesn't interfere with any following debugging session. */
3562
3563 /* For cloned processes we must check both with __WCLONE and
3564 without, since the exit status of a cloned process isn't reported
3565 with __WCLONE. */
3566 if (lp->cloned)
3567 {
3568 do
3569 {
dfd4cc63 3570 pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, __WCLONE);
e85a822c 3571 if (pid != (pid_t) -1)
d6b0e80f 3572 {
e85a822c
DJ
3573 if (debug_linux_nat)
3574 fprintf_unfiltered (gdb_stdlog,
3575 "KWC: wait %s received unknown.\n",
3576 target_pid_to_str (lp->ptid));
3577 /* The Linux kernel sometimes fails to kill a thread
3578 completely after PTRACE_KILL; that goes from the stop
3579 point in do_fork out to the one in
3580 get_signal_to_deliever and waits again. So kill it
3581 again. */
3582 kill_callback (lp, NULL);
d6b0e80f
AC
3583 }
3584 }
dfd4cc63 3585 while (pid == ptid_get_lwp (lp->ptid));
d6b0e80f
AC
3586
3587 gdb_assert (pid == -1 && errno == ECHILD);
3588 }
3589
3590 do
3591 {
dfd4cc63 3592 pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, 0);
e85a822c 3593 if (pid != (pid_t) -1)
d6b0e80f 3594 {
e85a822c
DJ
3595 if (debug_linux_nat)
3596 fprintf_unfiltered (gdb_stdlog,
3597 "KWC: wait %s received unk.\n",
3598 target_pid_to_str (lp->ptid));
3599 /* See the call to kill_callback above. */
3600 kill_callback (lp, NULL);
d6b0e80f
AC
3601 }
3602 }
dfd4cc63 3603 while (pid == ptid_get_lwp (lp->ptid));
d6b0e80f
AC
3604
3605 gdb_assert (pid == -1 && errno == ECHILD);
3606 return 0;
3607}
3608
3609static void
7d85a9c0 3610linux_nat_kill (struct target_ops *ops)
d6b0e80f 3611{
f973ed9c
DJ
3612 struct target_waitstatus last;
3613 ptid_t last_ptid;
3614 int status;
d6b0e80f 3615
f973ed9c
DJ
3616 /* If we're stopped while forking and we haven't followed yet,
3617 kill the other task. We need to do this first because the
3618 parent will be sleeping if this is a vfork. */
d6b0e80f 3619
f973ed9c 3620 get_last_target_status (&last_ptid, &last);
d6b0e80f 3621
f973ed9c
DJ
3622 if (last.kind == TARGET_WAITKIND_FORKED
3623 || last.kind == TARGET_WAITKIND_VFORKED)
3624 {
dfd4cc63 3625 ptrace (PT_KILL, ptid_get_pid (last.value.related_pid), 0, 0);
f973ed9c 3626 wait (&status);
26cb8b7c
PA
3627
3628 /* Let the arch-specific native code know this process is
3629 gone. */
dfd4cc63 3630 linux_nat_forget_process (ptid_get_pid (last.value.related_pid));
f973ed9c
DJ
3631 }
3632
3633 if (forks_exist_p ())
7feb7d06 3634 linux_fork_killall ();
f973ed9c
DJ
3635 else
3636 {
d90e17a7 3637 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
e0881a8e 3638
4c28f408
PA
3639 /* Stop all threads before killing them, since ptrace requires
3640 that the thread is stopped to sucessfully PTRACE_KILL. */
d90e17a7 3641 iterate_over_lwps (ptid, stop_callback, NULL);
4c28f408
PA
3642 /* ... and wait until all of them have reported back that
3643 they're no longer running. */
d90e17a7 3644 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4c28f408 3645
f973ed9c 3646 /* Kill all LWP's ... */
d90e17a7 3647 iterate_over_lwps (ptid, kill_callback, NULL);
f973ed9c
DJ
3648
3649 /* ... and wait until we've flushed all events. */
d90e17a7 3650 iterate_over_lwps (ptid, kill_wait_callback, NULL);
f973ed9c
DJ
3651 }
3652
3653 target_mourn_inferior ();
d6b0e80f
AC
3654}
3655
3656static void
136d6dae 3657linux_nat_mourn_inferior (struct target_ops *ops)
d6b0e80f 3658{
26cb8b7c
PA
3659 int pid = ptid_get_pid (inferior_ptid);
3660
3661 purge_lwp_list (pid);
d6b0e80f 3662
f973ed9c 3663 if (! forks_exist_p ())
d90e17a7
PA
3664 /* Normal case, no other forks available. */
3665 linux_ops->to_mourn_inferior (ops);
f973ed9c
DJ
3666 else
3667 /* Multi-fork case. The current inferior_ptid has exited, but
3668 there are other viable forks to debug. Delete the exiting
3669 one and context-switch to the first available. */
3670 linux_fork_mourn_inferior ();
26cb8b7c
PA
3671
3672 /* Let the arch-specific native code know this process is gone. */
3673 linux_nat_forget_process (pid);
d6b0e80f
AC
3674}
3675
5b009018
PA
3676/* Convert a native/host siginfo object, into/from the siginfo in the
3677 layout of the inferiors' architecture. */
3678
3679static void
a5362b9a 3680siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
5b009018
PA
3681{
3682 int done = 0;
3683
3684 if (linux_nat_siginfo_fixup != NULL)
3685 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3686
3687 /* If there was no callback, or the callback didn't do anything,
3688 then just do a straight memcpy. */
3689 if (!done)
3690 {
3691 if (direction == 1)
a5362b9a 3692 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5b009018 3693 else
a5362b9a 3694 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5b009018
PA
3695 }
3696}
3697
9b409511 3698static enum target_xfer_status
4aa995e1
PA
3699linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
3700 const char *annex, gdb_byte *readbuf,
9b409511
YQ
3701 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3702 ULONGEST *xfered_len)
4aa995e1 3703{
4aa995e1 3704 int pid;
a5362b9a
TS
3705 siginfo_t siginfo;
3706 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1
PA
3707
3708 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3709 gdb_assert (readbuf || writebuf);
3710
dfd4cc63 3711 pid = ptid_get_lwp (inferior_ptid);
4aa995e1 3712 if (pid == 0)
dfd4cc63 3713 pid = ptid_get_pid (inferior_ptid);
4aa995e1
PA
3714
3715 if (offset > sizeof (siginfo))
2ed4b548 3716 return TARGET_XFER_E_IO;
4aa995e1
PA
3717
3718 errno = 0;
3719 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3720 if (errno != 0)
2ed4b548 3721 return TARGET_XFER_E_IO;
4aa995e1 3722
5b009018
PA
3723 /* When GDB is built as a 64-bit application, ptrace writes into
3724 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3725 inferior with a 64-bit GDB should look the same as debugging it
3726 with a 32-bit GDB, we need to convert it. GDB core always sees
3727 the converted layout, so any read/write will have to be done
3728 post-conversion. */
3729 siginfo_fixup (&siginfo, inf_siginfo, 0);
3730
4aa995e1
PA
3731 if (offset + len > sizeof (siginfo))
3732 len = sizeof (siginfo) - offset;
3733
3734 if (readbuf != NULL)
5b009018 3735 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
3736 else
3737 {
5b009018
PA
3738 memcpy (inf_siginfo + offset, writebuf, len);
3739
3740 /* Convert back to ptrace layout before flushing it out. */
3741 siginfo_fixup (&siginfo, inf_siginfo, 1);
3742
4aa995e1
PA
3743 errno = 0;
3744 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3745 if (errno != 0)
2ed4b548 3746 return TARGET_XFER_E_IO;
4aa995e1
PA
3747 }
3748
9b409511
YQ
3749 *xfered_len = len;
3750 return TARGET_XFER_OK;
4aa995e1
PA
3751}
3752
9b409511 3753static enum target_xfer_status
10d6c8cd
DJ
3754linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3755 const char *annex, gdb_byte *readbuf,
3756 const gdb_byte *writebuf,
9b409511 3757 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
d6b0e80f 3758{
4aa995e1 3759 struct cleanup *old_chain;
9b409511 3760 enum target_xfer_status xfer;
d6b0e80f 3761
4aa995e1
PA
3762 if (object == TARGET_OBJECT_SIGNAL_INFO)
3763 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
9b409511 3764 offset, len, xfered_len);
4aa995e1 3765
c35b1492
PA
3766 /* The target is connected but no live inferior is selected. Pass
3767 this request down to a lower stratum (e.g., the executable
3768 file). */
3769 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
9b409511 3770 return TARGET_XFER_EOF;
c35b1492 3771
4aa995e1
PA
3772 old_chain = save_inferior_ptid ();
3773
dfd4cc63
LM
3774 if (ptid_lwp_p (inferior_ptid))
3775 inferior_ptid = pid_to_ptid (ptid_get_lwp (inferior_ptid));
d6b0e80f 3776
10d6c8cd 3777 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511 3778 offset, len, xfered_len);
d6b0e80f
AC
3779
3780 do_cleanups (old_chain);
3781 return xfer;
3782}
3783
3784static int
28439f5e 3785linux_thread_alive (ptid_t ptid)
d6b0e80f 3786{
8c6a60d1 3787 int err, tmp_errno;
4c28f408 3788
dfd4cc63 3789 gdb_assert (ptid_lwp_p (ptid));
d6b0e80f 3790
4c28f408
PA
3791 /* Send signal 0 instead of anything ptrace, because ptracing a
3792 running thread errors out claiming that the thread doesn't
3793 exist. */
dfd4cc63 3794 err = kill_lwp (ptid_get_lwp (ptid), 0);
8c6a60d1 3795 tmp_errno = errno;
d6b0e80f
AC
3796 if (debug_linux_nat)
3797 fprintf_unfiltered (gdb_stdlog,
4c28f408 3798 "LLTA: KILL(SIG0) %s (%s)\n",
d6b0e80f 3799 target_pid_to_str (ptid),
8c6a60d1 3800 err ? safe_strerror (tmp_errno) : "OK");
9c0dd46b 3801
4c28f408 3802 if (err != 0)
d6b0e80f
AC
3803 return 0;
3804
3805 return 1;
3806}
3807
28439f5e
PA
3808static int
3809linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
3810{
3811 return linux_thread_alive (ptid);
3812}
3813
d6b0e80f 3814static char *
117de6a9 3815linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
d6b0e80f
AC
3816{
3817 static char buf[64];
3818
dfd4cc63
LM
3819 if (ptid_lwp_p (ptid)
3820 && (ptid_get_pid (ptid) != ptid_get_lwp (ptid)
3821 || num_lwps (ptid_get_pid (ptid)) > 1))
d6b0e80f 3822 {
dfd4cc63 3823 snprintf (buf, sizeof (buf), "LWP %ld", ptid_get_lwp (ptid));
d6b0e80f
AC
3824 return buf;
3825 }
3826
3827 return normal_pid_to_str (ptid);
3828}
3829
4694da01 3830static char *
503a628d 3831linux_nat_thread_name (struct target_ops *self, struct thread_info *thr)
4694da01
TT
3832{
3833 int pid = ptid_get_pid (thr->ptid);
3834 long lwp = ptid_get_lwp (thr->ptid);
3835#define FORMAT "/proc/%d/task/%ld/comm"
3836 char buf[sizeof (FORMAT) + 30];
3837 FILE *comm_file;
3838 char *result = NULL;
3839
3840 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
614c279d 3841 comm_file = gdb_fopen_cloexec (buf, "r");
4694da01
TT
3842 if (comm_file)
3843 {
3844 /* Not exported by the kernel, so we define it here. */
3845#define COMM_LEN 16
3846 static char line[COMM_LEN + 1];
3847
3848 if (fgets (line, sizeof (line), comm_file))
3849 {
3850 char *nl = strchr (line, '\n');
3851
3852 if (nl)
3853 *nl = '\0';
3854 if (*line != '\0')
3855 result = line;
3856 }
3857
3858 fclose (comm_file);
3859 }
3860
3861#undef COMM_LEN
3862#undef FORMAT
3863
3864 return result;
3865}
3866
dba24537
AC
3867/* Accepts an integer PID; Returns a string representing a file that
3868 can be opened to get the symbols for the child process. */
3869
6d8fd2b7 3870static char *
8dd27370 3871linux_child_pid_to_exec_file (struct target_ops *self, int pid)
dba24537 3872{
b4ab256d
HZ
3873 static char buf[PATH_MAX];
3874 char name[PATH_MAX];
dba24537 3875
b4ab256d
HZ
3876 xsnprintf (name, PATH_MAX, "/proc/%d/exe", pid);
3877 memset (buf, 0, PATH_MAX);
3878 if (readlink (name, buf, PATH_MAX - 1) <= 0)
3879 strcpy (buf, name);
dba24537 3880
b4ab256d 3881 return buf;
dba24537
AC
3882}
3883
10d6c8cd
DJ
3884/* Implement the to_xfer_partial interface for memory reads using the /proc
3885 filesystem. Because we can use a single read() call for /proc, this
3886 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3887 but it doesn't support writes. */
3888
9b409511 3889static enum target_xfer_status
10d6c8cd
DJ
3890linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3891 const char *annex, gdb_byte *readbuf,
3892 const gdb_byte *writebuf,
9b409511 3893 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
dba24537 3894{
10d6c8cd
DJ
3895 LONGEST ret;
3896 int fd;
dba24537
AC
3897 char filename[64];
3898
10d6c8cd 3899 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
3900 return 0;
3901
3902 /* Don't bother for one word. */
3903 if (len < 3 * sizeof (long))
9b409511 3904 return TARGET_XFER_EOF;
dba24537
AC
3905
3906 /* We could keep this file open and cache it - possibly one per
3907 thread. That requires some juggling, but is even faster. */
cde33bf1
YQ
3908 xsnprintf (filename, sizeof filename, "/proc/%d/mem",
3909 ptid_get_pid (inferior_ptid));
614c279d 3910 fd = gdb_open_cloexec (filename, O_RDONLY | O_LARGEFILE, 0);
dba24537 3911 if (fd == -1)
9b409511 3912 return TARGET_XFER_EOF;
dba24537
AC
3913
3914 /* If pread64 is available, use it. It's faster if the kernel
3915 supports it (only one syscall), and it's 64-bit safe even on
3916 32-bit platforms (for instance, SPARC debugging a SPARC64
3917 application). */
3918#ifdef HAVE_PREAD64
10d6c8cd 3919 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 3920#else
10d6c8cd 3921 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
3922#endif
3923 ret = 0;
3924 else
3925 ret = len;
3926
3927 close (fd);
9b409511
YQ
3928
3929 if (ret == 0)
3930 return TARGET_XFER_EOF;
3931 else
3932 {
3933 *xfered_len = ret;
3934 return TARGET_XFER_OK;
3935 }
dba24537
AC
3936}
3937
efcbbd14
UW
3938
3939/* Enumerate spufs IDs for process PID. */
3940static LONGEST
b55e14c7 3941spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, ULONGEST len)
efcbbd14 3942{
f5656ead 3943 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
efcbbd14
UW
3944 LONGEST pos = 0;
3945 LONGEST written = 0;
3946 char path[128];
3947 DIR *dir;
3948 struct dirent *entry;
3949
3950 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
3951 dir = opendir (path);
3952 if (!dir)
3953 return -1;
3954
3955 rewinddir (dir);
3956 while ((entry = readdir (dir)) != NULL)
3957 {
3958 struct stat st;
3959 struct statfs stfs;
3960 int fd;
3961
3962 fd = atoi (entry->d_name);
3963 if (!fd)
3964 continue;
3965
3966 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
3967 if (stat (path, &st) != 0)
3968 continue;
3969 if (!S_ISDIR (st.st_mode))
3970 continue;
3971
3972 if (statfs (path, &stfs) != 0)
3973 continue;
3974 if (stfs.f_type != SPUFS_MAGIC)
3975 continue;
3976
3977 if (pos >= offset && pos + 4 <= offset + len)
3978 {
3979 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
3980 written += 4;
3981 }
3982 pos += 4;
3983 }
3984
3985 closedir (dir);
3986 return written;
3987}
3988
3989/* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
3990 object type, using the /proc file system. */
9b409511
YQ
3991
3992static enum target_xfer_status
efcbbd14
UW
3993linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
3994 const char *annex, gdb_byte *readbuf,
3995 const gdb_byte *writebuf,
9b409511 3996 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
efcbbd14
UW
3997{
3998 char buf[128];
3999 int fd = 0;
4000 int ret = -1;
dfd4cc63 4001 int pid = ptid_get_pid (inferior_ptid);
efcbbd14
UW
4002
4003 if (!annex)
4004 {
4005 if (!readbuf)
2ed4b548 4006 return TARGET_XFER_E_IO;
efcbbd14 4007 else
9b409511
YQ
4008 {
4009 LONGEST l = spu_enumerate_spu_ids (pid, readbuf, offset, len);
4010
4011 if (l < 0)
4012 return TARGET_XFER_E_IO;
4013 else if (l == 0)
4014 return TARGET_XFER_EOF;
4015 else
4016 {
4017 *xfered_len = (ULONGEST) l;
4018 return TARGET_XFER_OK;
4019 }
4020 }
efcbbd14
UW
4021 }
4022
4023 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
614c279d 4024 fd = gdb_open_cloexec (buf, writebuf? O_WRONLY : O_RDONLY, 0);
efcbbd14 4025 if (fd <= 0)
2ed4b548 4026 return TARGET_XFER_E_IO;
efcbbd14
UW
4027
4028 if (offset != 0
4029 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4030 {
4031 close (fd);
9b409511 4032 return TARGET_XFER_EOF;
efcbbd14
UW
4033 }
4034
4035 if (writebuf)
4036 ret = write (fd, writebuf, (size_t) len);
4037 else if (readbuf)
4038 ret = read (fd, readbuf, (size_t) len);
4039
4040 close (fd);
9b409511
YQ
4041
4042 if (ret < 0)
4043 return TARGET_XFER_E_IO;
4044 else if (ret == 0)
4045 return TARGET_XFER_EOF;
4046 else
4047 {
4048 *xfered_len = (ULONGEST) ret;
4049 return TARGET_XFER_OK;
4050 }
efcbbd14
UW
4051}
4052
4053
dba24537
AC
4054/* Parse LINE as a signal set and add its set bits to SIGS. */
4055
4056static void
4057add_line_to_sigset (const char *line, sigset_t *sigs)
4058{
4059 int len = strlen (line) - 1;
4060 const char *p;
4061 int signum;
4062
4063 if (line[len] != '\n')
8a3fe4f8 4064 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4065
4066 p = line;
4067 signum = len * 4;
4068 while (len-- > 0)
4069 {
4070 int digit;
4071
4072 if (*p >= '0' && *p <= '9')
4073 digit = *p - '0';
4074 else if (*p >= 'a' && *p <= 'f')
4075 digit = *p - 'a' + 10;
4076 else
8a3fe4f8 4077 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4078
4079 signum -= 4;
4080
4081 if (digit & 1)
4082 sigaddset (sigs, signum + 1);
4083 if (digit & 2)
4084 sigaddset (sigs, signum + 2);
4085 if (digit & 4)
4086 sigaddset (sigs, signum + 3);
4087 if (digit & 8)
4088 sigaddset (sigs, signum + 4);
4089
4090 p++;
4091 }
4092}
4093
4094/* Find process PID's pending signals from /proc/pid/status and set
4095 SIGS to match. */
4096
4097void
3e43a32a
MS
4098linux_proc_pending_signals (int pid, sigset_t *pending,
4099 sigset_t *blocked, sigset_t *ignored)
dba24537
AC
4100{
4101 FILE *procfile;
d8d2a3ee 4102 char buffer[PATH_MAX], fname[PATH_MAX];
7c8a8b04 4103 struct cleanup *cleanup;
dba24537
AC
4104
4105 sigemptyset (pending);
4106 sigemptyset (blocked);
4107 sigemptyset (ignored);
cde33bf1 4108 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
614c279d 4109 procfile = gdb_fopen_cloexec (fname, "r");
dba24537 4110 if (procfile == NULL)
8a3fe4f8 4111 error (_("Could not open %s"), fname);
7c8a8b04 4112 cleanup = make_cleanup_fclose (procfile);
dba24537 4113
d8d2a3ee 4114 while (fgets (buffer, PATH_MAX, procfile) != NULL)
dba24537
AC
4115 {
4116 /* Normal queued signals are on the SigPnd line in the status
4117 file. However, 2.6 kernels also have a "shared" pending
4118 queue for delivering signals to a thread group, so check for
4119 a ShdPnd line also.
4120
4121 Unfortunately some Red Hat kernels include the shared pending
4122 queue but not the ShdPnd status field. */
4123
4124 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
4125 add_line_to_sigset (buffer + 8, pending);
4126 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
4127 add_line_to_sigset (buffer + 8, pending);
4128 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
4129 add_line_to_sigset (buffer + 8, blocked);
4130 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
4131 add_line_to_sigset (buffer + 8, ignored);
4132 }
4133
7c8a8b04 4134 do_cleanups (cleanup);
dba24537
AC
4135}
4136
9b409511 4137static enum target_xfer_status
07e059b5 4138linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
e0881a8e 4139 const char *annex, gdb_byte *readbuf,
9b409511
YQ
4140 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4141 ULONGEST *xfered_len)
07e059b5 4142{
07e059b5
VP
4143 gdb_assert (object == TARGET_OBJECT_OSDATA);
4144
9b409511
YQ
4145 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4146 if (*xfered_len == 0)
4147 return TARGET_XFER_EOF;
4148 else
4149 return TARGET_XFER_OK;
07e059b5
VP
4150}
4151
9b409511 4152static enum target_xfer_status
10d6c8cd
DJ
4153linux_xfer_partial (struct target_ops *ops, enum target_object object,
4154 const char *annex, gdb_byte *readbuf,
9b409511
YQ
4155 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4156 ULONGEST *xfered_len)
10d6c8cd 4157{
9b409511 4158 enum target_xfer_status xfer;
10d6c8cd
DJ
4159
4160 if (object == TARGET_OBJECT_AUXV)
9f2982ff 4161 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
9b409511 4162 offset, len, xfered_len);
10d6c8cd 4163
07e059b5
VP
4164 if (object == TARGET_OBJECT_OSDATA)
4165 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
9b409511 4166 offset, len, xfered_len);
07e059b5 4167
efcbbd14
UW
4168 if (object == TARGET_OBJECT_SPU)
4169 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
9b409511 4170 offset, len, xfered_len);
efcbbd14 4171
8f313923
JK
4172 /* GDB calculates all the addresses in possibly larget width of the address.
4173 Address width needs to be masked before its final use - either by
4174 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4175
4176 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4177
4178 if (object == TARGET_OBJECT_MEMORY)
4179 {
f5656ead 4180 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
8f313923
JK
4181
4182 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4183 offset &= ((ULONGEST) 1 << addr_bit) - 1;
4184 }
4185
10d6c8cd 4186 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511
YQ
4187 offset, len, xfered_len);
4188 if (xfer != TARGET_XFER_EOF)
10d6c8cd
DJ
4189 return xfer;
4190
4191 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511 4192 offset, len, xfered_len);
10d6c8cd
DJ
4193}
4194
5808517f
YQ
4195static void
4196cleanup_target_stop (void *arg)
4197{
4198 ptid_t *ptid = (ptid_t *) arg;
4199
4200 gdb_assert (arg != NULL);
4201
4202 /* Unpause all */
a493e3e2 4203 target_resume (*ptid, 0, GDB_SIGNAL_0);
5808517f
YQ
4204}
4205
4206static VEC(static_tracepoint_marker_p) *
c686c57f
TT
4207linux_child_static_tracepoint_markers_by_strid (struct target_ops *self,
4208 const char *strid)
5808517f
YQ
4209{
4210 char s[IPA_CMD_BUF_SIZE];
4211 struct cleanup *old_chain;
4212 int pid = ptid_get_pid (inferior_ptid);
4213 VEC(static_tracepoint_marker_p) *markers = NULL;
4214 struct static_tracepoint_marker *marker = NULL;
4215 char *p = s;
4216 ptid_t ptid = ptid_build (pid, 0, 0);
4217
4218 /* Pause all */
4219 target_stop (ptid);
4220
4221 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4222 s[sizeof ("qTfSTM")] = 0;
4223
42476b70 4224 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4225
4226 old_chain = make_cleanup (free_current_marker, &marker);
4227 make_cleanup (cleanup_target_stop, &ptid);
4228
4229 while (*p++ == 'm')
4230 {
4231 if (marker == NULL)
4232 marker = XCNEW (struct static_tracepoint_marker);
4233
4234 do
4235 {
4236 parse_static_tracepoint_marker_definition (p, &p, marker);
4237
4238 if (strid == NULL || strcmp (strid, marker->str_id) == 0)
4239 {
4240 VEC_safe_push (static_tracepoint_marker_p,
4241 markers, marker);
4242 marker = NULL;
4243 }
4244 else
4245 {
4246 release_static_tracepoint_marker (marker);
4247 memset (marker, 0, sizeof (*marker));
4248 }
4249 }
4250 while (*p++ == ','); /* comma-separated list */
4251
4252 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4253 s[sizeof ("qTsSTM")] = 0;
42476b70 4254 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4255 p = s;
4256 }
4257
4258 do_cleanups (old_chain);
4259
4260 return markers;
4261}
4262
e9efe249 4263/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
4264 it with local methods. */
4265
910122bf
UW
4266static void
4267linux_target_install_ops (struct target_ops *t)
10d6c8cd 4268{
6d8fd2b7 4269 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
eb73ad13 4270 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
6d8fd2b7 4271 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
eb73ad13 4272 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
6d8fd2b7 4273 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
eb73ad13 4274 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
a96d9b2e 4275 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
6d8fd2b7 4276 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 4277 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
4278 t->to_post_attach = linux_child_post_attach;
4279 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
4280
4281 super_xfer_partial = t->to_xfer_partial;
4282 t->to_xfer_partial = linux_xfer_partial;
5808517f
YQ
4283
4284 t->to_static_tracepoint_markers_by_strid
4285 = linux_child_static_tracepoint_markers_by_strid;
910122bf
UW
4286}
4287
4288struct target_ops *
4289linux_target (void)
4290{
4291 struct target_ops *t;
4292
4293 t = inf_ptrace_target ();
4294 linux_target_install_ops (t);
4295
4296 return t;
4297}
4298
4299struct target_ops *
7714d83a 4300linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
4301{
4302 struct target_ops *t;
4303
4304 t = inf_ptrace_trad_target (register_u_offset);
4305 linux_target_install_ops (t);
10d6c8cd 4306
10d6c8cd
DJ
4307 return t;
4308}
4309
b84876c2
PA
4310/* target_is_async_p implementation. */
4311
4312static int
6a109b6b 4313linux_nat_is_async_p (struct target_ops *ops)
b84876c2 4314{
198297aa 4315 return linux_is_async_p ();
b84876c2
PA
4316}
4317
4318/* target_can_async_p implementation. */
4319
4320static int
6a109b6b 4321linux_nat_can_async_p (struct target_ops *ops)
b84876c2
PA
4322{
4323 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 4324 it explicitly with the "set target-async" command.
b84876c2 4325 Someday, linux will always be async. */
3dd5b83d 4326 return target_async_permitted;
b84876c2
PA
4327}
4328
9908b566 4329static int
2a9a2795 4330linux_nat_supports_non_stop (struct target_ops *self)
9908b566
VP
4331{
4332 return 1;
4333}
4334
d90e17a7
PA
4335/* True if we want to support multi-process. To be removed when GDB
4336 supports multi-exec. */
4337
2277426b 4338int linux_multi_process = 1;
d90e17a7
PA
4339
4340static int
86ce2668 4341linux_nat_supports_multi_process (struct target_ops *self)
d90e17a7
PA
4342{
4343 return linux_multi_process;
4344}
4345
03583c20 4346static int
2bfc0540 4347linux_nat_supports_disable_randomization (struct target_ops *self)
03583c20
UW
4348{
4349#ifdef HAVE_PERSONALITY
4350 return 1;
4351#else
4352 return 0;
4353#endif
4354}
4355
b84876c2
PA
4356static int async_terminal_is_ours = 1;
4357
4d4ca2a1
DE
4358/* target_terminal_inferior implementation.
4359
4360 This is a wrapper around child_terminal_inferior to add async support. */
b84876c2
PA
4361
4362static void
d2f640d4 4363linux_nat_terminal_inferior (struct target_ops *self)
b84876c2 4364{
198297aa
PA
4365 /* Like target_terminal_inferior, use target_can_async_p, not
4366 target_is_async_p, since at this point the target is not async
4367 yet. If it can async, then we know it will become async prior to
4368 resume. */
4369 if (!target_can_async_p ())
b84876c2
PA
4370 {
4371 /* Async mode is disabled. */
d6b64346 4372 child_terminal_inferior (self);
b84876c2
PA
4373 return;
4374 }
4375
d6b64346 4376 child_terminal_inferior (self);
b84876c2 4377
d9d2d8b6 4378 /* Calls to target_terminal_*() are meant to be idempotent. */
b84876c2
PA
4379 if (!async_terminal_is_ours)
4380 return;
4381
4382 delete_file_handler (input_fd);
4383 async_terminal_is_ours = 0;
4384 set_sigint_trap ();
4385}
4386
4d4ca2a1
DE
4387/* target_terminal_ours implementation.
4388
4389 This is a wrapper around child_terminal_ours to add async support (and
4390 implement the target_terminal_ours vs target_terminal_ours_for_output
4391 distinction). child_terminal_ours is currently no different than
4392 child_terminal_ours_for_output.
4393 We leave target_terminal_ours_for_output alone, leaving it to
4394 child_terminal_ours_for_output. */
b84876c2 4395
2c0b251b 4396static void
e3594fd1 4397linux_nat_terminal_ours (struct target_ops *self)
b84876c2 4398{
b84876c2
PA
4399 /* GDB should never give the terminal to the inferior if the
4400 inferior is running in the background (run&, continue&, etc.),
4401 but claiming it sure should. */
d6b64346 4402 child_terminal_ours (self);
b84876c2 4403
b84876c2
PA
4404 if (async_terminal_is_ours)
4405 return;
4406
4407 clear_sigint_trap ();
4408 add_file_handler (input_fd, stdin_event_handler, 0);
4409 async_terminal_is_ours = 1;
4410}
4411
4412static void (*async_client_callback) (enum inferior_event_type event_type,
4413 void *context);
4414static void *async_client_context;
4415
7feb7d06
PA
4416/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4417 so we notice when any child changes state, and notify the
4418 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4419 above to wait for the arrival of a SIGCHLD. */
4420
b84876c2 4421static void
7feb7d06 4422sigchld_handler (int signo)
b84876c2 4423{
7feb7d06
PA
4424 int old_errno = errno;
4425
01124a23
DE
4426 if (debug_linux_nat)
4427 ui_file_write_async_safe (gdb_stdlog,
4428 "sigchld\n", sizeof ("sigchld\n") - 1);
7feb7d06
PA
4429
4430 if (signo == SIGCHLD
4431 && linux_nat_event_pipe[0] != -1)
4432 async_file_mark (); /* Let the event loop know that there are
4433 events to handle. */
4434
4435 errno = old_errno;
4436}
4437
4438/* Callback registered with the target events file descriptor. */
4439
4440static void
4441handle_target_event (int error, gdb_client_data client_data)
4442{
4443 (*async_client_callback) (INF_REG_EVENT, async_client_context);
4444}
4445
4446/* Create/destroy the target events pipe. Returns previous state. */
4447
4448static int
4449linux_async_pipe (int enable)
4450{
198297aa 4451 int previous = linux_is_async_p ();
7feb7d06
PA
4452
4453 if (previous != enable)
4454 {
4455 sigset_t prev_mask;
4456
12696c10
PA
4457 /* Block child signals while we create/destroy the pipe, as
4458 their handler writes to it. */
7feb7d06
PA
4459 block_child_signals (&prev_mask);
4460
4461 if (enable)
4462 {
614c279d 4463 if (gdb_pipe_cloexec (linux_nat_event_pipe) == -1)
7feb7d06
PA
4464 internal_error (__FILE__, __LINE__,
4465 "creating event pipe failed.");
4466
4467 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4468 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4469 }
4470 else
4471 {
4472 close (linux_nat_event_pipe[0]);
4473 close (linux_nat_event_pipe[1]);
4474 linux_nat_event_pipe[0] = -1;
4475 linux_nat_event_pipe[1] = -1;
4476 }
4477
4478 restore_child_signals_mask (&prev_mask);
4479 }
4480
4481 return previous;
b84876c2
PA
4482}
4483
4484/* target_async implementation. */
4485
4486static void
6a109b6b
TT
4487linux_nat_async (struct target_ops *ops,
4488 void (*callback) (enum inferior_event_type event_type,
4489 void *context),
4490 void *context)
b84876c2 4491{
b84876c2
PA
4492 if (callback != NULL)
4493 {
4494 async_client_callback = callback;
4495 async_client_context = context;
7feb7d06
PA
4496 if (!linux_async_pipe (1))
4497 {
4498 add_file_handler (linux_nat_event_pipe[0],
4499 handle_target_event, NULL);
4500 /* There may be pending events to handle. Tell the event loop
4501 to poll them. */
4502 async_file_mark ();
4503 }
b84876c2
PA
4504 }
4505 else
4506 {
4507 async_client_callback = callback;
4508 async_client_context = context;
b84876c2 4509 delete_file_handler (linux_nat_event_pipe[0]);
7feb7d06 4510 linux_async_pipe (0);
b84876c2
PA
4511 }
4512 return;
4513}
4514
a493e3e2 4515/* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
252fbfc8
PA
4516 event came out. */
4517
4c28f408 4518static int
252fbfc8 4519linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4c28f408 4520{
d90e17a7 4521 if (!lwp->stopped)
252fbfc8 4522 {
d90e17a7
PA
4523 if (debug_linux_nat)
4524 fprintf_unfiltered (gdb_stdlog,
4525 "LNSL: running -> suspending %s\n",
4526 target_pid_to_str (lwp->ptid));
252fbfc8 4527
252fbfc8 4528
25289eb2
PA
4529 if (lwp->last_resume_kind == resume_stop)
4530 {
4531 if (debug_linux_nat)
4532 fprintf_unfiltered (gdb_stdlog,
4533 "linux-nat: already stopping LWP %ld at "
4534 "GDB's request\n",
4535 ptid_get_lwp (lwp->ptid));
4536 return 0;
4537 }
252fbfc8 4538
25289eb2
PA
4539 stop_callback (lwp, NULL);
4540 lwp->last_resume_kind = resume_stop;
d90e17a7
PA
4541 }
4542 else
4543 {
4544 /* Already known to be stopped; do nothing. */
252fbfc8 4545
d90e17a7
PA
4546 if (debug_linux_nat)
4547 {
e09875d4 4548 if (find_thread_ptid (lwp->ptid)->stop_requested)
3e43a32a
MS
4549 fprintf_unfiltered (gdb_stdlog,
4550 "LNSL: already stopped/stop_requested %s\n",
d90e17a7
PA
4551 target_pid_to_str (lwp->ptid));
4552 else
3e43a32a
MS
4553 fprintf_unfiltered (gdb_stdlog,
4554 "LNSL: already stopped/no "
4555 "stop_requested yet %s\n",
d90e17a7 4556 target_pid_to_str (lwp->ptid));
252fbfc8
PA
4557 }
4558 }
4c28f408
PA
4559 return 0;
4560}
4561
4562static void
1eab8a48 4563linux_nat_stop (struct target_ops *self, ptid_t ptid)
4c28f408
PA
4564{
4565 if (non_stop)
d90e17a7 4566 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4c28f408 4567 else
1eab8a48 4568 linux_ops->to_stop (linux_ops, ptid);
4c28f408
PA
4569}
4570
d90e17a7 4571static void
de90e03d 4572linux_nat_close (struct target_ops *self)
d90e17a7
PA
4573{
4574 /* Unregister from the event loop. */
9debeba0
DE
4575 if (linux_nat_is_async_p (self))
4576 linux_nat_async (self, NULL, NULL);
d90e17a7 4577
d90e17a7 4578 if (linux_ops->to_close)
de90e03d 4579 linux_ops->to_close (linux_ops);
6a3cb8e8
PA
4580
4581 super_close (self);
d90e17a7
PA
4582}
4583
c0694254
PA
4584/* When requests are passed down from the linux-nat layer to the
4585 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4586 used. The address space pointer is stored in the inferior object,
4587 but the common code that is passed such ptid can't tell whether
4588 lwpid is a "main" process id or not (it assumes so). We reverse
4589 look up the "main" process id from the lwp here. */
4590
70221824 4591static struct address_space *
c0694254
PA
4592linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
4593{
4594 struct lwp_info *lwp;
4595 struct inferior *inf;
4596 int pid;
4597
dfd4cc63 4598 if (ptid_get_lwp (ptid) == 0)
c0694254
PA
4599 {
4600 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4601 tgid. */
4602 lwp = find_lwp_pid (ptid);
dfd4cc63 4603 pid = ptid_get_pid (lwp->ptid);
c0694254
PA
4604 }
4605 else
4606 {
4607 /* A (pid,lwpid,0) ptid. */
dfd4cc63 4608 pid = ptid_get_pid (ptid);
c0694254
PA
4609 }
4610
4611 inf = find_inferior_pid (pid);
4612 gdb_assert (inf != NULL);
4613 return inf->aspace;
4614}
4615
dc146f7c
VP
4616/* Return the cached value of the processor core for thread PTID. */
4617
70221824 4618static int
dc146f7c
VP
4619linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
4620{
4621 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 4622
dc146f7c
VP
4623 if (info)
4624 return info->core;
4625 return -1;
4626}
4627
f973ed9c
DJ
4628void
4629linux_nat_add_target (struct target_ops *t)
4630{
f973ed9c
DJ
4631 /* Save the provided single-threaded target. We save this in a separate
4632 variable because another target we've inherited from (e.g. inf-ptrace)
4633 may have saved a pointer to T; we want to use it for the final
4634 process stratum target. */
4635 linux_ops_saved = *t;
4636 linux_ops = &linux_ops_saved;
4637
4638 /* Override some methods for multithreading. */
b84876c2 4639 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
4640 t->to_attach = linux_nat_attach;
4641 t->to_detach = linux_nat_detach;
4642 t->to_resume = linux_nat_resume;
4643 t->to_wait = linux_nat_wait;
2455069d 4644 t->to_pass_signals = linux_nat_pass_signals;
f973ed9c
DJ
4645 t->to_xfer_partial = linux_nat_xfer_partial;
4646 t->to_kill = linux_nat_kill;
4647 t->to_mourn_inferior = linux_nat_mourn_inferior;
4648 t->to_thread_alive = linux_nat_thread_alive;
4649 t->to_pid_to_str = linux_nat_pid_to_str;
4694da01 4650 t->to_thread_name = linux_nat_thread_name;
f973ed9c 4651 t->to_has_thread_control = tc_schedlock;
c0694254 4652 t->to_thread_address_space = linux_nat_thread_address_space;
ebec9a0f
PA
4653 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
4654 t->to_stopped_data_address = linux_nat_stopped_data_address;
f973ed9c 4655
b84876c2
PA
4656 t->to_can_async_p = linux_nat_can_async_p;
4657 t->to_is_async_p = linux_nat_is_async_p;
9908b566 4658 t->to_supports_non_stop = linux_nat_supports_non_stop;
b84876c2 4659 t->to_async = linux_nat_async;
b84876c2
PA
4660 t->to_terminal_inferior = linux_nat_terminal_inferior;
4661 t->to_terminal_ours = linux_nat_terminal_ours;
6a3cb8e8
PA
4662
4663 super_close = t->to_close;
d90e17a7 4664 t->to_close = linux_nat_close;
b84876c2 4665
4c28f408
PA
4666 /* Methods for non-stop support. */
4667 t->to_stop = linux_nat_stop;
4668
d90e17a7
PA
4669 t->to_supports_multi_process = linux_nat_supports_multi_process;
4670
03583c20
UW
4671 t->to_supports_disable_randomization
4672 = linux_nat_supports_disable_randomization;
4673
dc146f7c
VP
4674 t->to_core_of_thread = linux_nat_core_of_thread;
4675
f973ed9c
DJ
4676 /* We don't change the stratum; this target will sit at
4677 process_stratum and thread_db will set at thread_stratum. This
4678 is a little strange, since this is a multi-threaded-capable
4679 target, but we want to be on the stack below thread_db, and we
4680 also want to be used for single-threaded processes. */
4681
4682 add_target (t);
f973ed9c
DJ
4683}
4684
9f0bdab8
DJ
4685/* Register a method to call whenever a new thread is attached. */
4686void
7b50312a
PA
4687linux_nat_set_new_thread (struct target_ops *t,
4688 void (*new_thread) (struct lwp_info *))
9f0bdab8
DJ
4689{
4690 /* Save the pointer. We only support a single registered instance
4691 of the GNU/Linux native target, so we do not need to map this to
4692 T. */
4693 linux_nat_new_thread = new_thread;
4694}
4695
26cb8b7c
PA
4696/* See declaration in linux-nat.h. */
4697
4698void
4699linux_nat_set_new_fork (struct target_ops *t,
4700 linux_nat_new_fork_ftype *new_fork)
4701{
4702 /* Save the pointer. */
4703 linux_nat_new_fork = new_fork;
4704}
4705
4706/* See declaration in linux-nat.h. */
4707
4708void
4709linux_nat_set_forget_process (struct target_ops *t,
4710 linux_nat_forget_process_ftype *fn)
4711{
4712 /* Save the pointer. */
4713 linux_nat_forget_process_hook = fn;
4714}
4715
4716/* See declaration in linux-nat.h. */
4717
4718void
4719linux_nat_forget_process (pid_t pid)
4720{
4721 if (linux_nat_forget_process_hook != NULL)
4722 linux_nat_forget_process_hook (pid);
4723}
4724
5b009018
PA
4725/* Register a method that converts a siginfo object between the layout
4726 that ptrace returns, and the layout in the architecture of the
4727 inferior. */
4728void
4729linux_nat_set_siginfo_fixup (struct target_ops *t,
a5362b9a 4730 int (*siginfo_fixup) (siginfo_t *,
5b009018
PA
4731 gdb_byte *,
4732 int))
4733{
4734 /* Save the pointer. */
4735 linux_nat_siginfo_fixup = siginfo_fixup;
4736}
4737
7b50312a
PA
4738/* Register a method to call prior to resuming a thread. */
4739
4740void
4741linux_nat_set_prepare_to_resume (struct target_ops *t,
4742 void (*prepare_to_resume) (struct lwp_info *))
4743{
4744 /* Save the pointer. */
4745 linux_nat_prepare_to_resume = prepare_to_resume;
4746}
4747
f865ee35
JK
4748/* See linux-nat.h. */
4749
4750int
4751linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
9f0bdab8 4752{
da559b09 4753 int pid;
9f0bdab8 4754
dfd4cc63 4755 pid = ptid_get_lwp (ptid);
da559b09 4756 if (pid == 0)
dfd4cc63 4757 pid = ptid_get_pid (ptid);
f865ee35 4758
da559b09
JK
4759 errno = 0;
4760 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
4761 if (errno != 0)
4762 {
4763 memset (siginfo, 0, sizeof (*siginfo));
4764 return 0;
4765 }
f865ee35 4766 return 1;
9f0bdab8
DJ
4767}
4768
2c0b251b
PA
4769/* Provide a prototype to silence -Wmissing-prototypes. */
4770extern initialize_file_ftype _initialize_linux_nat;
4771
d6b0e80f
AC
4772void
4773_initialize_linux_nat (void)
4774{
ccce17b0
YQ
4775 add_setshow_zuinteger_cmd ("lin-lwp", class_maintenance,
4776 &debug_linux_nat, _("\
b84876c2
PA
4777Set debugging of GNU/Linux lwp module."), _("\
4778Show debugging of GNU/Linux lwp module."), _("\
4779Enables printf debugging output."),
ccce17b0
YQ
4780 NULL,
4781 show_debug_linux_nat,
4782 &setdebuglist, &showdebuglist);
b84876c2 4783
b84876c2 4784 /* Save this mask as the default. */
d6b0e80f
AC
4785 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4786
7feb7d06
PA
4787 /* Install a SIGCHLD handler. */
4788 sigchld_action.sa_handler = sigchld_handler;
4789 sigemptyset (&sigchld_action.sa_mask);
4790 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
4791
4792 /* Make it the default. */
7feb7d06 4793 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
4794
4795 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4796 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4797 sigdelset (&suspend_mask, SIGCHLD);
4798
7feb7d06 4799 sigemptyset (&blocked_mask);
8009206a
TT
4800
4801 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to
4802 support read-only process state. */
4803 linux_ptrace_set_additional_flags (PTRACE_O_TRACESYSGOOD
4804 | PTRACE_O_TRACEVFORKDONE
4805 | PTRACE_O_TRACEVFORK
4806 | PTRACE_O_TRACEFORK
4807 | PTRACE_O_TRACEEXEC);
d6b0e80f
AC
4808}
4809\f
4810
4811/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4812 the GNU/Linux Threads library and therefore doesn't really belong
4813 here. */
4814
4815/* Read variable NAME in the target and return its value if found.
4816 Otherwise return zero. It is assumed that the type of the variable
4817 is `int'. */
4818
4819static int
4820get_signo (const char *name)
4821{
3b7344d5 4822 struct bound_minimal_symbol ms;
d6b0e80f
AC
4823 int signo;
4824
4825 ms = lookup_minimal_symbol (name, NULL, NULL);
3b7344d5 4826 if (ms.minsym == NULL)
d6b0e80f
AC
4827 return 0;
4828
77e371c0 4829 if (target_read_memory (BMSYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
4830 sizeof (signo)) != 0)
4831 return 0;
4832
4833 return signo;
4834}
4835
4836/* Return the set of signals used by the threads library in *SET. */
4837
4838void
4839lin_thread_get_thread_signals (sigset_t *set)
4840{
4841 struct sigaction action;
4842 int restart, cancel;
4843
b84876c2 4844 sigemptyset (&blocked_mask);
d6b0e80f
AC
4845 sigemptyset (set);
4846
4847 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
4848 cancel = get_signo ("__pthread_sig_cancel");
4849
4850 /* LinuxThreads normally uses the first two RT signals, but in some legacy
4851 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
4852 not provide any way for the debugger to query the signal numbers -
4853 fortunately they don't change! */
4854
d6b0e80f 4855 if (restart == 0)
17fbb0bd 4856 restart = __SIGRTMIN;
d6b0e80f 4857
d6b0e80f 4858 if (cancel == 0)
17fbb0bd 4859 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
4860
4861 sigaddset (set, restart);
4862 sigaddset (set, cancel);
4863
4864 /* The GNU/Linux Threads library makes terminating threads send a
4865 special "cancel" signal instead of SIGCHLD. Make sure we catch
4866 those (to prevent them from terminating GDB itself, which is
4867 likely to be their default action) and treat them the same way as
4868 SIGCHLD. */
4869
4870 action.sa_handler = sigchld_handler;
4871 sigemptyset (&action.sa_mask);
58aecb61 4872 action.sa_flags = SA_RESTART;
d6b0e80f
AC
4873 sigaction (cancel, &action, NULL);
4874
4875 /* We block the "cancel" signal throughout this code ... */
4876 sigaddset (&blocked_mask, cancel);
4877 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
4878
4879 /* ... except during a sigsuspend. */
4880 sigdelset (&suspend_mask, cancel);
4881}