]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/linux-nat.c
linux-nat.c: always mark execing LWP as resumed
[thirdparty/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
32d0add0 3 Copyright (C) 2001-2015 Free Software Foundation, Inc.
3993f6b1
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
19
20#include "defs.h"
21#include "inferior.h"
45741a9c 22#include "infrun.h"
3993f6b1 23#include "target.h"
96d7229d
LM
24#include "nat/linux-nat.h"
25#include "nat/linux-waitpid.h"
3993f6b1 26#include "gdb_wait.h"
d6b0e80f
AC
27#ifdef HAVE_TKILL_SYSCALL
28#include <unistd.h>
29#include <sys/syscall.h>
30#endif
3993f6b1 31#include <sys/ptrace.h>
0274a8ce 32#include "linux-nat.h"
125f8a3d
GB
33#include "nat/linux-ptrace.h"
34#include "nat/linux-procfs.h"
ac264b3b 35#include "linux-fork.h"
d6b0e80f
AC
36#include "gdbthread.h"
37#include "gdbcmd.h"
38#include "regcache.h"
4f844a66 39#include "regset.h"
dab06dbe 40#include "inf-child.h"
10d6c8cd
DJ
41#include "inf-ptrace.h"
42#include "auxv.h"
1777feb0 43#include <sys/procfs.h> /* for elf_gregset etc. */
dba24537
AC
44#include "elf-bfd.h" /* for elfcore_write_* */
45#include "gregset.h" /* for gregset */
46#include "gdbcore.h" /* for get_exec_file */
47#include <ctype.h> /* for isdigit */
53ce3c39 48#include <sys/stat.h> /* for struct stat */
dba24537 49#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
50#include "inf-loop.h"
51#include "event-loop.h"
52#include "event-top.h"
07e059b5
VP
53#include <pwd.h>
54#include <sys/types.h>
2978b111 55#include <dirent.h>
07e059b5 56#include "xml-support.h"
efcbbd14 57#include <sys/vfs.h>
6c95b8df 58#include "solib.h"
125f8a3d 59#include "nat/linux-osdata.h"
6432734d 60#include "linux-tdep.h"
7dcd53a0 61#include "symfile.h"
5808517f
YQ
62#include "agent.h"
63#include "tracepoint.h"
87b0bb13 64#include "buffer.h"
6ecd4729 65#include "target-descriptions.h"
614c279d 66#include "filestuff.h"
77e371c0 67#include "objfiles.h"
efcbbd14
UW
68
69#ifndef SPUFS_MAGIC
70#define SPUFS_MAGIC 0x23c9b64e
71#endif
dba24537 72
10568435
JK
73#ifdef HAVE_PERSONALITY
74# include <sys/personality.h>
75# if !HAVE_DECL_ADDR_NO_RANDOMIZE
76# define ADDR_NO_RANDOMIZE 0x0040000
77# endif
78#endif /* HAVE_PERSONALITY */
79
1777feb0 80/* This comment documents high-level logic of this file.
8a77dff3
VP
81
82Waiting for events in sync mode
83===============================
84
85When waiting for an event in a specific thread, we just use waitpid, passing
86the specific pid, and not passing WNOHANG.
87
1777feb0 88When waiting for an event in all threads, waitpid is not quite good. Prior to
8a77dff3 89version 2.4, Linux can either wait for event in main thread, or in secondary
1777feb0 90threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
8a77dff3
VP
91miss an event. The solution is to use non-blocking waitpid, together with
92sigsuspend. First, we use non-blocking waitpid to get an event in the main
1777feb0 93process, if any. Second, we use non-blocking waitpid with the __WCLONED
8a77dff3
VP
94flag to check for events in cloned processes. If nothing is found, we use
95sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
96happened to a child process -- and SIGCHLD will be delivered both for events
97in main debugged process and in cloned processes. As soon as we know there's
3e43a32a
MS
98an event, we get back to calling nonblocking waitpid with and without
99__WCLONED.
8a77dff3
VP
100
101Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
1777feb0 102so that we don't miss a signal. If SIGCHLD arrives in between, when it's
8a77dff3
VP
103blocked, the signal becomes pending and sigsuspend immediately
104notices it and returns.
105
106Waiting for events in async mode
107================================
108
7feb7d06
PA
109In async mode, GDB should always be ready to handle both user input
110and target events, so neither blocking waitpid nor sigsuspend are
111viable options. Instead, we should asynchronously notify the GDB main
112event loop whenever there's an unprocessed event from the target. We
113detect asynchronous target events by handling SIGCHLD signals. To
114notify the event loop about target events, the self-pipe trick is used
115--- a pipe is registered as waitable event source in the event loop,
116the event loop select/poll's on the read end of this pipe (as well on
117other event sources, e.g., stdin), and the SIGCHLD handler writes a
118byte to this pipe. This is more portable than relying on
119pselect/ppoll, since on kernels that lack those syscalls, libc
120emulates them with select/poll+sigprocmask, and that is racy
121(a.k.a. plain broken).
122
123Obviously, if we fail to notify the event loop if there's a target
124event, it's bad. OTOH, if we notify the event loop when there's no
125event from the target, linux_nat_wait will detect that there's no real
126event to report, and return event of type TARGET_WAITKIND_IGNORE.
127This is mostly harmless, but it will waste time and is better avoided.
128
129The main design point is that every time GDB is outside linux-nat.c,
130we have a SIGCHLD handler installed that is called when something
131happens to the target and notifies the GDB event loop. Whenever GDB
132core decides to handle the event, and calls into linux-nat.c, we
133process things as in sync mode, except that the we never block in
134sigsuspend.
135
136While processing an event, we may end up momentarily blocked in
137waitpid calls. Those waitpid calls, while blocking, are guarantied to
138return quickly. E.g., in all-stop mode, before reporting to the core
139that an LWP hit a breakpoint, all LWPs are stopped by sending them
140SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
141Note that this is different from blocking indefinitely waiting for the
142next event --- here, we're already handling an event.
8a77dff3
VP
143
144Use of signals
145==============
146
147We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
148signal is not entirely significant; we just need for a signal to be delivered,
149so that we can intercept it. SIGSTOP's advantage is that it can not be
150blocked. A disadvantage is that it is not a real-time signal, so it can only
151be queued once; we do not keep track of other sources of SIGSTOP.
152
153Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
154use them, because they have special behavior when the signal is generated -
155not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
156kills the entire thread group.
157
158A delivered SIGSTOP would stop the entire thread group, not just the thread we
159tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
160cancel it (by PTRACE_CONT without passing SIGSTOP).
161
162We could use a real-time signal instead. This would solve those problems; we
163could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
164But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
165generates it, and there are races with trying to find a signal that is not
166blocked. */
a0ef4274 167
dba24537
AC
168#ifndef O_LARGEFILE
169#define O_LARGEFILE 0
170#endif
0274a8ce 171
10d6c8cd
DJ
172/* The single-threaded native GNU/Linux target_ops. We save a pointer for
173 the use of the multi-threaded target. */
174static struct target_ops *linux_ops;
f973ed9c 175static struct target_ops linux_ops_saved;
10d6c8cd 176
9f0bdab8 177/* The method to call, if any, when a new thread is attached. */
7b50312a
PA
178static void (*linux_nat_new_thread) (struct lwp_info *);
179
26cb8b7c
PA
180/* The method to call, if any, when a new fork is attached. */
181static linux_nat_new_fork_ftype *linux_nat_new_fork;
182
183/* The method to call, if any, when a process is no longer
184 attached. */
185static linux_nat_forget_process_ftype *linux_nat_forget_process_hook;
186
7b50312a
PA
187/* Hook to call prior to resuming a thread. */
188static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
9f0bdab8 189
5b009018
PA
190/* The method to call, if any, when the siginfo object needs to be
191 converted between the layout returned by ptrace, and the layout in
192 the architecture of the inferior. */
a5362b9a 193static int (*linux_nat_siginfo_fixup) (siginfo_t *,
5b009018
PA
194 gdb_byte *,
195 int);
196
ac264b3b
MS
197/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
198 Called by our to_xfer_partial. */
4ac248ca 199static target_xfer_partial_ftype *super_xfer_partial;
10d6c8cd 200
6a3cb8e8
PA
201/* The saved to_close method, inherited from inf-ptrace.c.
202 Called by our to_close. */
203static void (*super_close) (struct target_ops *);
204
ccce17b0 205static unsigned int debug_linux_nat;
920d2a44
AC
206static void
207show_debug_linux_nat (struct ui_file *file, int from_tty,
208 struct cmd_list_element *c, const char *value)
209{
210 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
211 value);
212}
d6b0e80f 213
ae087d01
DJ
214struct simple_pid_list
215{
216 int pid;
3d799a95 217 int status;
ae087d01
DJ
218 struct simple_pid_list *next;
219};
220struct simple_pid_list *stopped_pids;
221
3dd5b83d
PA
222/* Async mode support. */
223
b84876c2
PA
224/* The read/write ends of the pipe registered as waitable file in the
225 event loop. */
226static int linux_nat_event_pipe[2] = { -1, -1 };
227
7feb7d06 228/* Flush the event pipe. */
b84876c2 229
7feb7d06
PA
230static void
231async_file_flush (void)
b84876c2 232{
7feb7d06
PA
233 int ret;
234 char buf;
b84876c2 235
7feb7d06 236 do
b84876c2 237 {
7feb7d06 238 ret = read (linux_nat_event_pipe[0], &buf, 1);
b84876c2 239 }
7feb7d06 240 while (ret >= 0 || (ret == -1 && errno == EINTR));
b84876c2
PA
241}
242
7feb7d06
PA
243/* Put something (anything, doesn't matter what, or how much) in event
244 pipe, so that the select/poll in the event-loop realizes we have
245 something to process. */
252fbfc8 246
b84876c2 247static void
7feb7d06 248async_file_mark (void)
b84876c2 249{
7feb7d06 250 int ret;
b84876c2 251
7feb7d06
PA
252 /* It doesn't really matter what the pipe contains, as long we end
253 up with something in it. Might as well flush the previous
254 left-overs. */
255 async_file_flush ();
b84876c2 256
7feb7d06 257 do
b84876c2 258 {
7feb7d06 259 ret = write (linux_nat_event_pipe[1], "+", 1);
b84876c2 260 }
7feb7d06 261 while (ret == -1 && errno == EINTR);
b84876c2 262
7feb7d06
PA
263 /* Ignore EAGAIN. If the pipe is full, the event loop will already
264 be awakened anyway. */
b84876c2
PA
265}
266
7feb7d06
PA
267static int kill_lwp (int lwpid, int signo);
268
269static int stop_callback (struct lwp_info *lp, void *data);
270
271static void block_child_signals (sigset_t *prev_mask);
272static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
273
274struct lwp_info;
275static struct lwp_info *add_lwp (ptid_t ptid);
276static void purge_lwp_list (int pid);
4403d8e9 277static void delete_lwp (ptid_t ptid);
2277426b
PA
278static struct lwp_info *find_lwp_pid (ptid_t ptid);
279
8a99810d
PA
280static int lwp_status_pending_p (struct lwp_info *lp);
281
ae087d01
DJ
282\f
283/* Trivial list manipulation functions to keep track of a list of
284 new stopped processes. */
285static void
3d799a95 286add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
287{
288 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
e0881a8e 289
ae087d01 290 new_pid->pid = pid;
3d799a95 291 new_pid->status = status;
ae087d01
DJ
292 new_pid->next = *listp;
293 *listp = new_pid;
294}
295
84636d28
PA
296static int
297in_pid_list_p (struct simple_pid_list *list, int pid)
298{
299 struct simple_pid_list *p;
300
301 for (p = list; p != NULL; p = p->next)
302 if (p->pid == pid)
303 return 1;
304 return 0;
305}
306
ae087d01 307static int
46a96992 308pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
309{
310 struct simple_pid_list **p;
311
312 for (p = listp; *p != NULL; p = &(*p)->next)
313 if ((*p)->pid == pid)
314 {
315 struct simple_pid_list *next = (*p)->next;
e0881a8e 316
46a96992 317 *statusp = (*p)->status;
ae087d01
DJ
318 xfree (*p);
319 *p = next;
320 return 1;
321 }
322 return 0;
323}
324
96d7229d 325/* Initialize ptrace warnings and check for supported ptrace
beed38b8
JB
326 features given PID.
327
328 ATTACHED should be nonzero iff we attached to the inferior. */
3993f6b1
DJ
329
330static void
beed38b8 331linux_init_ptrace (pid_t pid, int attached)
3993f6b1 332{
beed38b8 333 linux_enable_event_reporting (pid, attached);
96d7229d 334 linux_ptrace_init_warnings ();
4de4c07c
DJ
335}
336
6d8fd2b7 337static void
f045800c 338linux_child_post_attach (struct target_ops *self, int pid)
4de4c07c 339{
beed38b8 340 linux_init_ptrace (pid, 1);
4de4c07c
DJ
341}
342
10d6c8cd 343static void
2e97a79e 344linux_child_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4de4c07c 345{
beed38b8 346 linux_init_ptrace (ptid_get_pid (ptid), 0);
4de4c07c
DJ
347}
348
4403d8e9
JK
349/* Return the number of known LWPs in the tgid given by PID. */
350
351static int
352num_lwps (int pid)
353{
354 int count = 0;
355 struct lwp_info *lp;
356
357 for (lp = lwp_list; lp; lp = lp->next)
358 if (ptid_get_pid (lp->ptid) == pid)
359 count++;
360
361 return count;
362}
363
364/* Call delete_lwp with prototype compatible for make_cleanup. */
365
366static void
367delete_lwp_cleanup (void *lp_voidp)
368{
369 struct lwp_info *lp = lp_voidp;
370
371 delete_lwp (lp->ptid);
372}
373
d83ad864
DB
374/* Target hook for follow_fork. On entry inferior_ptid must be the
375 ptid of the followed inferior. At return, inferior_ptid will be
376 unchanged. */
377
6d8fd2b7 378static int
07107ca6
LM
379linux_child_follow_fork (struct target_ops *ops, int follow_child,
380 int detach_fork)
3993f6b1 381{
d83ad864 382 if (!follow_child)
4de4c07c 383 {
6c95b8df 384 struct lwp_info *child_lp = NULL;
d83ad864
DB
385 int status = W_STOPCODE (0);
386 struct cleanup *old_chain;
387 int has_vforked;
388 int parent_pid, child_pid;
389
390 has_vforked = (inferior_thread ()->pending_follow.kind
391 == TARGET_WAITKIND_VFORKED);
392 parent_pid = ptid_get_lwp (inferior_ptid);
393 if (parent_pid == 0)
394 parent_pid = ptid_get_pid (inferior_ptid);
395 child_pid
396 = ptid_get_pid (inferior_thread ()->pending_follow.value.related_pid);
397
4de4c07c 398
1777feb0 399 /* We're already attached to the parent, by default. */
d83ad864
DB
400 old_chain = save_inferior_ptid ();
401 inferior_ptid = ptid_build (child_pid, child_pid, 0);
402 child_lp = add_lwp (inferior_ptid);
403 child_lp->stopped = 1;
404 child_lp->last_resume_kind = resume_stop;
4de4c07c 405
ac264b3b
MS
406 /* Detach new forked process? */
407 if (detach_fork)
f75c00e4 408 {
4403d8e9
JK
409 make_cleanup (delete_lwp_cleanup, child_lp);
410
4403d8e9
JK
411 if (linux_nat_prepare_to_resume != NULL)
412 linux_nat_prepare_to_resume (child_lp);
c077881a
HZ
413
414 /* When debugging an inferior in an architecture that supports
415 hardware single stepping on a kernel without commit
416 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
417 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
418 set if the parent process had them set.
419 To work around this, single step the child process
420 once before detaching to clear the flags. */
421
422 if (!gdbarch_software_single_step_p (target_thread_architecture
423 (child_lp->ptid)))
424 {
c077881a
HZ
425 linux_disable_event_reporting (child_pid);
426 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
427 perror_with_name (_("Couldn't do single step"));
428 if (my_waitpid (child_pid, &status, 0) < 0)
429 perror_with_name (_("Couldn't wait vfork process"));
430 }
431
432 if (WIFSTOPPED (status))
9caaaa83
PA
433 {
434 int signo;
435
436 signo = WSTOPSIG (status);
437 if (signo != 0
438 && !signal_pass_state (gdb_signal_from_host (signo)))
439 signo = 0;
440 ptrace (PTRACE_DETACH, child_pid, 0, signo);
441 }
4403d8e9 442
d83ad864 443 /* Resets value of inferior_ptid to parent ptid. */
4403d8e9 444 do_cleanups (old_chain);
ac264b3b
MS
445 }
446 else
447 {
6c95b8df 448 /* Let the thread_db layer learn about this new process. */
2277426b 449 check_for_thread_db ();
ac264b3b 450 }
9016a515 451
d83ad864
DB
452 do_cleanups (old_chain);
453
9016a515
DJ
454 if (has_vforked)
455 {
3ced3da4 456 struct lwp_info *parent_lp;
6c95b8df 457
3ced3da4 458 parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
96d7229d 459 gdb_assert (linux_supports_tracefork () >= 0);
3ced3da4 460
96d7229d 461 if (linux_supports_tracevforkdone ())
9016a515 462 {
6c95b8df
PA
463 if (debug_linux_nat)
464 fprintf_unfiltered (gdb_stdlog,
465 "LCFF: waiting for VFORK_DONE on %d\n",
466 parent_pid);
3ced3da4 467 parent_lp->stopped = 1;
9016a515 468
6c95b8df
PA
469 /* We'll handle the VFORK_DONE event like any other
470 event, in target_wait. */
9016a515
DJ
471 }
472 else
473 {
474 /* We can't insert breakpoints until the child has
475 finished with the shared memory region. We need to
476 wait until that happens. Ideal would be to just
477 call:
478 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
479 - waitpid (parent_pid, &status, __WALL);
480 However, most architectures can't handle a syscall
481 being traced on the way out if it wasn't traced on
482 the way in.
483
484 We might also think to loop, continuing the child
485 until it exits or gets a SIGTRAP. One problem is
486 that the child might call ptrace with PTRACE_TRACEME.
487
488 There's no simple and reliable way to figure out when
489 the vforked child will be done with its copy of the
490 shared memory. We could step it out of the syscall,
491 two instructions, let it go, and then single-step the
492 parent once. When we have hardware single-step, this
493 would work; with software single-step it could still
494 be made to work but we'd have to be able to insert
495 single-step breakpoints in the child, and we'd have
496 to insert -just- the single-step breakpoint in the
497 parent. Very awkward.
498
499 In the end, the best we can do is to make sure it
500 runs for a little while. Hopefully it will be out of
501 range of any breakpoints we reinsert. Usually this
502 is only the single-step breakpoint at vfork's return
503 point. */
504
6c95b8df
PA
505 if (debug_linux_nat)
506 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
507 "LCFF: no VFORK_DONE "
508 "support, sleeping a bit\n");
6c95b8df 509
9016a515 510 usleep (10000);
9016a515 511
6c95b8df
PA
512 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
513 and leave it pending. The next linux_nat_resume call
514 will notice a pending event, and bypasses actually
515 resuming the inferior. */
3ced3da4
PA
516 parent_lp->status = 0;
517 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
518 parent_lp->stopped = 1;
6c95b8df
PA
519
520 /* If we're in async mode, need to tell the event loop
521 there's something here to process. */
522 if (target_can_async_p ())
523 async_file_mark ();
524 }
9016a515 525 }
4de4c07c 526 }
3993f6b1 527 else
4de4c07c 528 {
3ced3da4 529 struct lwp_info *child_lp;
4de4c07c 530
3ced3da4
PA
531 child_lp = add_lwp (inferior_ptid);
532 child_lp->stopped = 1;
25289eb2 533 child_lp->last_resume_kind = resume_stop;
6c95b8df 534
6c95b8df 535 /* Let the thread_db layer learn about this new process. */
ef29ce1a 536 check_for_thread_db ();
4de4c07c
DJ
537 }
538
539 return 0;
540}
541
4de4c07c 542\f
77b06cd7 543static int
a863b201 544linux_child_insert_fork_catchpoint (struct target_ops *self, int pid)
4de4c07c 545{
96d7229d 546 return !linux_supports_tracefork ();
3993f6b1
DJ
547}
548
eb73ad13 549static int
973fc227 550linux_child_remove_fork_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
551{
552 return 0;
553}
554
77b06cd7 555static int
3ecc7da0 556linux_child_insert_vfork_catchpoint (struct target_ops *self, int pid)
3993f6b1 557{
96d7229d 558 return !linux_supports_tracefork ();
3993f6b1
DJ
559}
560
eb73ad13 561static int
e98cf0cd 562linux_child_remove_vfork_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
563{
564 return 0;
565}
566
77b06cd7 567static int
ba025e51 568linux_child_insert_exec_catchpoint (struct target_ops *self, int pid)
3993f6b1 569{
96d7229d 570 return !linux_supports_tracefork ();
3993f6b1
DJ
571}
572
eb73ad13 573static int
758e29d2 574linux_child_remove_exec_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
575{
576 return 0;
577}
578
a96d9b2e 579static int
ff214e67
TT
580linux_child_set_syscall_catchpoint (struct target_ops *self,
581 int pid, int needed, int any_count,
a96d9b2e
SDJ
582 int table_size, int *table)
583{
96d7229d 584 if (!linux_supports_tracesysgood ())
77b06cd7
TJB
585 return 1;
586
a96d9b2e
SDJ
587 /* On GNU/Linux, we ignore the arguments. It means that we only
588 enable the syscall catchpoints, but do not disable them.
77b06cd7 589
a96d9b2e
SDJ
590 Also, we do not use the `table' information because we do not
591 filter system calls here. We let GDB do the logic for us. */
592 return 0;
593}
594
d6b0e80f
AC
595/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
596 are processes sharing the same VM space. A multi-threaded process
597 is basically a group of such processes. However, such a grouping
598 is almost entirely a user-space issue; the kernel doesn't enforce
599 such a grouping at all (this might change in the future). In
600 general, we'll rely on the threads library (i.e. the GNU/Linux
601 Threads library) to provide such a grouping.
602
603 It is perfectly well possible to write a multi-threaded application
604 without the assistance of a threads library, by using the clone
605 system call directly. This module should be able to give some
606 rudimentary support for debugging such applications if developers
607 specify the CLONE_PTRACE flag in the clone system call, and are
608 using the Linux kernel 2.4 or above.
609
610 Note that there are some peculiarities in GNU/Linux that affect
611 this code:
612
613 - In general one should specify the __WCLONE flag to waitpid in
614 order to make it report events for any of the cloned processes
615 (and leave it out for the initial process). However, if a cloned
616 process has exited the exit status is only reported if the
617 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
618 we cannot use it since GDB must work on older systems too.
619
620 - When a traced, cloned process exits and is waited for by the
621 debugger, the kernel reassigns it to the original parent and
622 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
623 library doesn't notice this, which leads to the "zombie problem":
624 When debugged a multi-threaded process that spawns a lot of
625 threads will run out of processes, even if the threads exit,
626 because the "zombies" stay around. */
627
628/* List of known LWPs. */
9f0bdab8 629struct lwp_info *lwp_list;
d6b0e80f
AC
630\f
631
d6b0e80f
AC
632/* Original signal mask. */
633static sigset_t normal_mask;
634
635/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
636 _initialize_linux_nat. */
637static sigset_t suspend_mask;
638
7feb7d06
PA
639/* Signals to block to make that sigsuspend work. */
640static sigset_t blocked_mask;
641
642/* SIGCHLD action. */
643struct sigaction sigchld_action;
b84876c2 644
7feb7d06
PA
645/* Block child signals (SIGCHLD and linux threads signals), and store
646 the previous mask in PREV_MASK. */
84e46146 647
7feb7d06
PA
648static void
649block_child_signals (sigset_t *prev_mask)
650{
651 /* Make sure SIGCHLD is blocked. */
652 if (!sigismember (&blocked_mask, SIGCHLD))
653 sigaddset (&blocked_mask, SIGCHLD);
654
655 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
656}
657
658/* Restore child signals mask, previously returned by
659 block_child_signals. */
660
661static void
662restore_child_signals_mask (sigset_t *prev_mask)
663{
664 sigprocmask (SIG_SETMASK, prev_mask, NULL);
665}
2455069d
UW
666
667/* Mask of signals to pass directly to the inferior. */
668static sigset_t pass_mask;
669
670/* Update signals to pass to the inferior. */
671static void
94bedb42
TT
672linux_nat_pass_signals (struct target_ops *self,
673 int numsigs, unsigned char *pass_signals)
2455069d
UW
674{
675 int signo;
676
677 sigemptyset (&pass_mask);
678
679 for (signo = 1; signo < NSIG; signo++)
680 {
2ea28649 681 int target_signo = gdb_signal_from_host (signo);
2455069d
UW
682 if (target_signo < numsigs && pass_signals[target_signo])
683 sigaddset (&pass_mask, signo);
684 }
685}
686
d6b0e80f
AC
687\f
688
689/* Prototypes for local functions. */
690static int stop_wait_callback (struct lwp_info *lp, void *data);
28439f5e 691static int linux_thread_alive (ptid_t ptid);
8dd27370 692static char *linux_child_pid_to_exec_file (struct target_ops *self, int pid);
710151dd 693
d6b0e80f 694\f
d6b0e80f 695
7b50312a
PA
696/* Destroy and free LP. */
697
698static void
699lwp_free (struct lwp_info *lp)
700{
701 xfree (lp->arch_private);
702 xfree (lp);
703}
704
d90e17a7
PA
705/* Remove all LWPs belong to PID from the lwp list. */
706
707static void
708purge_lwp_list (int pid)
709{
710 struct lwp_info *lp, *lpprev, *lpnext;
711
712 lpprev = NULL;
713
714 for (lp = lwp_list; lp; lp = lpnext)
715 {
716 lpnext = lp->next;
717
718 if (ptid_get_pid (lp->ptid) == pid)
719 {
720 if (lp == lwp_list)
721 lwp_list = lp->next;
722 else
723 lpprev->next = lp->next;
724
7b50312a 725 lwp_free (lp);
d90e17a7
PA
726 }
727 else
728 lpprev = lp;
729 }
730}
731
26cb8b7c
PA
732/* Add the LWP specified by PTID to the list. PTID is the first LWP
733 in the process. Return a pointer to the structure describing the
734 new LWP.
735
736 This differs from add_lwp in that we don't let the arch specific
737 bits know about this new thread. Current clients of this callback
738 take the opportunity to install watchpoints in the new thread, and
739 we shouldn't do that for the first thread. If we're spawning a
740 child ("run"), the thread executes the shell wrapper first, and we
741 shouldn't touch it until it execs the program we want to debug.
742 For "attach", it'd be okay to call the callback, but it's not
743 necessary, because watchpoints can't yet have been inserted into
744 the inferior. */
d6b0e80f
AC
745
746static struct lwp_info *
26cb8b7c 747add_initial_lwp (ptid_t ptid)
d6b0e80f
AC
748{
749 struct lwp_info *lp;
750
dfd4cc63 751 gdb_assert (ptid_lwp_p (ptid));
d6b0e80f
AC
752
753 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
754
755 memset (lp, 0, sizeof (struct lwp_info));
756
25289eb2 757 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
758 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
759
760 lp->ptid = ptid;
dc146f7c 761 lp->core = -1;
d6b0e80f
AC
762
763 lp->next = lwp_list;
764 lwp_list = lp;
d6b0e80f 765
26cb8b7c
PA
766 return lp;
767}
768
769/* Add the LWP specified by PID to the list. Return a pointer to the
770 structure describing the new LWP. The LWP should already be
771 stopped. */
772
773static struct lwp_info *
774add_lwp (ptid_t ptid)
775{
776 struct lwp_info *lp;
777
778 lp = add_initial_lwp (ptid);
779
6e012a6c
PA
780 /* Let the arch specific bits know about this new thread. Current
781 clients of this callback take the opportunity to install
26cb8b7c
PA
782 watchpoints in the new thread. We don't do this for the first
783 thread though. See add_initial_lwp. */
784 if (linux_nat_new_thread != NULL)
7b50312a 785 linux_nat_new_thread (lp);
9f0bdab8 786
d6b0e80f
AC
787 return lp;
788}
789
790/* Remove the LWP specified by PID from the list. */
791
792static void
793delete_lwp (ptid_t ptid)
794{
795 struct lwp_info *lp, *lpprev;
796
797 lpprev = NULL;
798
799 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
800 if (ptid_equal (lp->ptid, ptid))
801 break;
802
803 if (!lp)
804 return;
805
d6b0e80f
AC
806 if (lpprev)
807 lpprev->next = lp->next;
808 else
809 lwp_list = lp->next;
810
7b50312a 811 lwp_free (lp);
d6b0e80f
AC
812}
813
814/* Return a pointer to the structure describing the LWP corresponding
815 to PID. If no corresponding LWP could be found, return NULL. */
816
817static struct lwp_info *
818find_lwp_pid (ptid_t ptid)
819{
820 struct lwp_info *lp;
821 int lwp;
822
dfd4cc63
LM
823 if (ptid_lwp_p (ptid))
824 lwp = ptid_get_lwp (ptid);
d6b0e80f 825 else
dfd4cc63 826 lwp = ptid_get_pid (ptid);
d6b0e80f
AC
827
828 for (lp = lwp_list; lp; lp = lp->next)
dfd4cc63 829 if (lwp == ptid_get_lwp (lp->ptid))
d6b0e80f
AC
830 return lp;
831
832 return NULL;
833}
834
835/* Call CALLBACK with its second argument set to DATA for every LWP in
836 the list. If CALLBACK returns 1 for a particular LWP, return a
837 pointer to the structure describing that LWP immediately.
838 Otherwise return NULL. */
839
840struct lwp_info *
d90e17a7
PA
841iterate_over_lwps (ptid_t filter,
842 int (*callback) (struct lwp_info *, void *),
843 void *data)
d6b0e80f
AC
844{
845 struct lwp_info *lp, *lpnext;
846
847 for (lp = lwp_list; lp; lp = lpnext)
848 {
849 lpnext = lp->next;
d90e17a7
PA
850
851 if (ptid_match (lp->ptid, filter))
852 {
853 if ((*callback) (lp, data))
854 return lp;
855 }
d6b0e80f
AC
856 }
857
858 return NULL;
859}
860
2277426b
PA
861/* Update our internal state when changing from one checkpoint to
862 another indicated by NEW_PTID. We can only switch single-threaded
863 applications, so we only create one new LWP, and the previous list
864 is discarded. */
f973ed9c
DJ
865
866void
867linux_nat_switch_fork (ptid_t new_ptid)
868{
869 struct lwp_info *lp;
870
dfd4cc63 871 purge_lwp_list (ptid_get_pid (inferior_ptid));
2277426b 872
f973ed9c
DJ
873 lp = add_lwp (new_ptid);
874 lp->stopped = 1;
e26af52f 875
2277426b
PA
876 /* This changes the thread's ptid while preserving the gdb thread
877 num. Also changes the inferior pid, while preserving the
878 inferior num. */
879 thread_change_ptid (inferior_ptid, new_ptid);
880
881 /* We've just told GDB core that the thread changed target id, but,
882 in fact, it really is a different thread, with different register
883 contents. */
884 registers_changed ();
e26af52f
DJ
885}
886
e26af52f
DJ
887/* Handle the exit of a single thread LP. */
888
889static void
890exit_lwp (struct lwp_info *lp)
891{
e09875d4 892 struct thread_info *th = find_thread_ptid (lp->ptid);
063bfe2e
VP
893
894 if (th)
e26af52f 895 {
17faa917
DJ
896 if (print_thread_events)
897 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
898
4f8d22e3 899 delete_thread (lp->ptid);
e26af52f
DJ
900 }
901
902 delete_lwp (lp->ptid);
903}
904
a0ef4274
DJ
905/* Wait for the LWP specified by LP, which we have just attached to.
906 Returns a wait status for that LWP, to cache. */
907
908static int
909linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
910 int *signalled)
911{
dfd4cc63 912 pid_t new_pid, pid = ptid_get_lwp (ptid);
a0ef4274
DJ
913 int status;
914
644cebc9 915 if (linux_proc_pid_is_stopped (pid))
a0ef4274
DJ
916 {
917 if (debug_linux_nat)
918 fprintf_unfiltered (gdb_stdlog,
919 "LNPAW: Attaching to a stopped process\n");
920
921 /* The process is definitely stopped. It is in a job control
922 stop, unless the kernel predates the TASK_STOPPED /
923 TASK_TRACED distinction, in which case it might be in a
924 ptrace stop. Make sure it is in a ptrace stop; from there we
925 can kill it, signal it, et cetera.
926
927 First make sure there is a pending SIGSTOP. Since we are
928 already attached, the process can not transition from stopped
929 to running without a PTRACE_CONT; so we know this signal will
930 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
931 probably already in the queue (unless this kernel is old
932 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
933 is not an RT signal, it can only be queued once. */
934 kill_lwp (pid, SIGSTOP);
935
936 /* Finally, resume the stopped process. This will deliver the SIGSTOP
937 (or a higher priority signal, just like normal PTRACE_ATTACH). */
938 ptrace (PTRACE_CONT, pid, 0, 0);
939 }
940
941 /* Make sure the initial process is stopped. The user-level threads
942 layer might want to poke around in the inferior, and that won't
943 work if things haven't stabilized yet. */
944 new_pid = my_waitpid (pid, &status, 0);
945 if (new_pid == -1 && errno == ECHILD)
946 {
947 if (first)
948 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
949
950 /* Try again with __WCLONE to check cloned processes. */
951 new_pid = my_waitpid (pid, &status, __WCLONE);
952 *cloned = 1;
953 }
954
dacc9cb2
PP
955 gdb_assert (pid == new_pid);
956
957 if (!WIFSTOPPED (status))
958 {
959 /* The pid we tried to attach has apparently just exited. */
960 if (debug_linux_nat)
961 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
962 pid, status_to_str (status));
963 return status;
964 }
a0ef4274
DJ
965
966 if (WSTOPSIG (status) != SIGSTOP)
967 {
968 *signalled = 1;
969 if (debug_linux_nat)
970 fprintf_unfiltered (gdb_stdlog,
971 "LNPAW: Received %s after attaching\n",
972 status_to_str (status));
973 }
974
975 return status;
976}
977
84636d28
PA
978/* Attach to the LWP specified by PID. Return 0 if successful, -1 if
979 the new LWP could not be attached, or 1 if we're already auto
980 attached to this thread, but haven't processed the
981 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
982 its existance, without considering it an error. */
d6b0e80f 983
9ee57c33 984int
93815fbf 985lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 986{
9ee57c33 987 struct lwp_info *lp;
84636d28 988 int lwpid;
d6b0e80f 989
dfd4cc63 990 gdb_assert (ptid_lwp_p (ptid));
d6b0e80f 991
9ee57c33 992 lp = find_lwp_pid (ptid);
dfd4cc63 993 lwpid = ptid_get_lwp (ptid);
d6b0e80f
AC
994
995 /* We assume that we're already attached to any LWP that has an id
996 equal to the overall process id, and to any LWP that is already
997 in our list of LWPs. If we're not seeing exit events from threads
998 and we've had PID wraparound since we last tried to stop all threads,
999 this assumption might be wrong; fortunately, this is very unlikely
1000 to happen. */
dfd4cc63 1001 if (lwpid != ptid_get_pid (ptid) && lp == NULL)
d6b0e80f 1002 {
a0ef4274 1003 int status, cloned = 0, signalled = 0;
d6b0e80f 1004
84636d28 1005 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
9ee57c33 1006 {
96d7229d 1007 if (linux_supports_tracefork ())
84636d28
PA
1008 {
1009 /* If we haven't stopped all threads when we get here,
1010 we may have seen a thread listed in thread_db's list,
1011 but not processed the PTRACE_EVENT_CLONE yet. If
1012 that's the case, ignore this new thread, and let
1013 normal event handling discover it later. */
1014 if (in_pid_list_p (stopped_pids, lwpid))
1015 {
1016 /* We've already seen this thread stop, but we
1017 haven't seen the PTRACE_EVENT_CLONE extended
1018 event yet. */
84636d28
PA
1019 return 0;
1020 }
1021 else
1022 {
1023 int new_pid;
1024 int status;
1025
1026 /* See if we've got a stop for this new child
1027 pending. If so, we're already attached. */
a33e3959 1028 gdb_assert (lwpid > 0);
84636d28
PA
1029 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1030 if (new_pid == -1 && errno == ECHILD)
1031 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1032 if (new_pid != -1)
1033 {
1034 if (WIFSTOPPED (status))
1035 add_to_pid_list (&stopped_pids, lwpid, status);
84636d28
PA
1036 return 1;
1037 }
1038 }
1039 }
1040
9ee57c33
DJ
1041 /* If we fail to attach to the thread, issue a warning,
1042 but continue. One way this can happen is if thread
e9efe249 1043 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1044 bug may place threads in the thread list and then fail
1045 to create them. */
1046 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1047 safe_strerror (errno));
1048 return -1;
1049 }
1050
d6b0e80f
AC
1051 if (debug_linux_nat)
1052 fprintf_unfiltered (gdb_stdlog,
1053 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1054 target_pid_to_str (ptid));
1055
a0ef4274 1056 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
dacc9cb2 1057 if (!WIFSTOPPED (status))
12696c10 1058 return 1;
dacc9cb2 1059
a0ef4274
DJ
1060 lp = add_lwp (ptid);
1061 lp->stopped = 1;
1062 lp->cloned = cloned;
1063 lp->signalled = signalled;
1064 if (WSTOPSIG (status) != SIGSTOP)
d6b0e80f 1065 {
a0ef4274
DJ
1066 lp->resumed = 1;
1067 lp->status = status;
d6b0e80f
AC
1068 }
1069
dfd4cc63 1070 target_post_attach (ptid_get_lwp (lp->ptid));
d6b0e80f
AC
1071
1072 if (debug_linux_nat)
1073 {
1074 fprintf_unfiltered (gdb_stdlog,
1075 "LLAL: waitpid %s received %s\n",
1076 target_pid_to_str (ptid),
1077 status_to_str (status));
1078 }
1079 }
1080 else
1081 {
1082 /* We assume that the LWP representing the original process is
1083 already stopped. Mark it as stopped in the data structure
155bd5d1
AC
1084 that the GNU/linux ptrace layer uses to keep track of
1085 threads. Note that this won't have already been done since
1086 the main thread will have, we assume, been stopped by an
1087 attach from a different layer. */
9ee57c33
DJ
1088 if (lp == NULL)
1089 lp = add_lwp (ptid);
d6b0e80f
AC
1090 lp->stopped = 1;
1091 }
9ee57c33 1092
25289eb2 1093 lp->last_resume_kind = resume_stop;
9ee57c33 1094 return 0;
d6b0e80f
AC
1095}
1096
b84876c2 1097static void
136d6dae
VP
1098linux_nat_create_inferior (struct target_ops *ops,
1099 char *exec_file, char *allargs, char **env,
b84876c2
PA
1100 int from_tty)
1101{
10568435
JK
1102#ifdef HAVE_PERSONALITY
1103 int personality_orig = 0, personality_set = 0;
1104#endif /* HAVE_PERSONALITY */
b84876c2
PA
1105
1106 /* The fork_child mechanism is synchronous and calls target_wait, so
1107 we have to mask the async mode. */
1108
10568435
JK
1109#ifdef HAVE_PERSONALITY
1110 if (disable_randomization)
1111 {
1112 errno = 0;
1113 personality_orig = personality (0xffffffff);
1114 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1115 {
1116 personality_set = 1;
1117 personality (personality_orig | ADDR_NO_RANDOMIZE);
1118 }
1119 if (errno != 0 || (personality_set
1120 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1121 warning (_("Error disabling address space randomization: %s"),
1122 safe_strerror (errno));
1123 }
1124#endif /* HAVE_PERSONALITY */
1125
2455069d 1126 /* Make sure we report all signals during startup. */
94bedb42 1127 linux_nat_pass_signals (ops, 0, NULL);
2455069d 1128
136d6dae 1129 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
b84876c2 1130
10568435
JK
1131#ifdef HAVE_PERSONALITY
1132 if (personality_set)
1133 {
1134 errno = 0;
1135 personality (personality_orig);
1136 if (errno != 0)
1137 warning (_("Error restoring address space randomization: %s"),
1138 safe_strerror (errno));
1139 }
1140#endif /* HAVE_PERSONALITY */
b84876c2
PA
1141}
1142
8784d563
PA
1143/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1144 already attached. Returns true if a new LWP is found, false
1145 otherwise. */
1146
1147static int
1148attach_proc_task_lwp_callback (ptid_t ptid)
1149{
1150 struct lwp_info *lp;
1151
1152 /* Ignore LWPs we're already attached to. */
1153 lp = find_lwp_pid (ptid);
1154 if (lp == NULL)
1155 {
1156 int lwpid = ptid_get_lwp (ptid);
1157
1158 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1159 {
1160 int err = errno;
1161
1162 /* Be quiet if we simply raced with the thread exiting.
1163 EPERM is returned if the thread's task still exists, and
1164 is marked as exited or zombie, as well as other
1165 conditions, so in that case, confirm the status in
1166 /proc/PID/status. */
1167 if (err == ESRCH
1168 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1169 {
1170 if (debug_linux_nat)
1171 {
1172 fprintf_unfiltered (gdb_stdlog,
1173 "Cannot attach to lwp %d: "
1174 "thread is gone (%d: %s)\n",
1175 lwpid, err, safe_strerror (err));
1176 }
1177 }
1178 else
1179 {
1180 warning (_("Cannot attach to lwp %d: %s\n"),
1181 lwpid,
1182 linux_ptrace_attach_fail_reason_string (ptid,
1183 err));
1184 }
1185 }
1186 else
1187 {
1188 if (debug_linux_nat)
1189 fprintf_unfiltered (gdb_stdlog,
1190 "PTRACE_ATTACH %s, 0, 0 (OK)\n",
1191 target_pid_to_str (ptid));
1192
1193 lp = add_lwp (ptid);
1194 lp->cloned = 1;
1195
1196 /* The next time we wait for this LWP we'll see a SIGSTOP as
1197 PTRACE_ATTACH brings it to a halt. */
1198 lp->signalled = 1;
1199
1200 /* We need to wait for a stop before being able to make the
1201 next ptrace call on this LWP. */
1202 lp->must_set_ptrace_flags = 1;
1203 }
1204
1205 return 1;
1206 }
1207 return 0;
1208}
1209
d6b0e80f 1210static void
c0939df1 1211linux_nat_attach (struct target_ops *ops, const char *args, int from_tty)
d6b0e80f
AC
1212{
1213 struct lwp_info *lp;
d6b0e80f 1214 int status;
af990527 1215 ptid_t ptid;
87b0bb13 1216 volatile struct gdb_exception ex;
d6b0e80f 1217
2455069d 1218 /* Make sure we report all signals during attach. */
94bedb42 1219 linux_nat_pass_signals (ops, 0, NULL);
2455069d 1220
87b0bb13
JK
1221 TRY_CATCH (ex, RETURN_MASK_ERROR)
1222 {
1223 linux_ops->to_attach (ops, args, from_tty);
1224 }
1225 if (ex.reason < 0)
1226 {
1227 pid_t pid = parse_pid_to_attach (args);
1228 struct buffer buffer;
1229 char *message, *buffer_s;
1230
1231 message = xstrdup (ex.message);
1232 make_cleanup (xfree, message);
1233
1234 buffer_init (&buffer);
7ae1a6a6 1235 linux_ptrace_attach_fail_reason (pid, &buffer);
87b0bb13
JK
1236
1237 buffer_grow_str0 (&buffer, "");
1238 buffer_s = buffer_finish (&buffer);
1239 make_cleanup (xfree, buffer_s);
1240
7ae1a6a6
PA
1241 if (*buffer_s != '\0')
1242 throw_error (ex.error, "warning: %s\n%s", buffer_s, message);
1243 else
1244 throw_error (ex.error, "%s", message);
87b0bb13 1245 }
d6b0e80f 1246
af990527
PA
1247 /* The ptrace base target adds the main thread with (pid,0,0)
1248 format. Decorate it with lwp info. */
dfd4cc63
LM
1249 ptid = ptid_build (ptid_get_pid (inferior_ptid),
1250 ptid_get_pid (inferior_ptid),
1251 0);
af990527
PA
1252 thread_change_ptid (inferior_ptid, ptid);
1253
9f0bdab8 1254 /* Add the initial process as the first LWP to the list. */
26cb8b7c 1255 lp = add_initial_lwp (ptid);
a0ef4274
DJ
1256
1257 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1258 &lp->signalled);
dacc9cb2
PP
1259 if (!WIFSTOPPED (status))
1260 {
1261 if (WIFEXITED (status))
1262 {
1263 int exit_code = WEXITSTATUS (status);
1264
1265 target_terminal_ours ();
1266 target_mourn_inferior ();
1267 if (exit_code == 0)
1268 error (_("Unable to attach: program exited normally."));
1269 else
1270 error (_("Unable to attach: program exited with code %d."),
1271 exit_code);
1272 }
1273 else if (WIFSIGNALED (status))
1274 {
2ea28649 1275 enum gdb_signal signo;
dacc9cb2
PP
1276
1277 target_terminal_ours ();
1278 target_mourn_inferior ();
1279
2ea28649 1280 signo = gdb_signal_from_host (WTERMSIG (status));
dacc9cb2
PP
1281 error (_("Unable to attach: program terminated with signal "
1282 "%s, %s."),
2ea28649
PA
1283 gdb_signal_to_name (signo),
1284 gdb_signal_to_string (signo));
dacc9cb2
PP
1285 }
1286
1287 internal_error (__FILE__, __LINE__,
1288 _("unexpected status %d for PID %ld"),
dfd4cc63 1289 status, (long) ptid_get_lwp (ptid));
dacc9cb2
PP
1290 }
1291
a0ef4274 1292 lp->stopped = 1;
9f0bdab8 1293
a0ef4274 1294 /* Save the wait status to report later. */
d6b0e80f 1295 lp->resumed = 1;
a0ef4274
DJ
1296 if (debug_linux_nat)
1297 fprintf_unfiltered (gdb_stdlog,
1298 "LNA: waitpid %ld, saving status %s\n",
dfd4cc63 1299 (long) ptid_get_pid (lp->ptid), status_to_str (status));
710151dd 1300
7feb7d06
PA
1301 lp->status = status;
1302
8784d563
PA
1303 /* We must attach to every LWP. If /proc is mounted, use that to
1304 find them now. The inferior may be using raw clone instead of
1305 using pthreads. But even if it is using pthreads, thread_db
1306 walks structures in the inferior's address space to find the list
1307 of threads/LWPs, and those structures may well be corrupted.
1308 Note that once thread_db is loaded, we'll still use it to list
1309 threads and associate pthread info with each LWP. */
1310 linux_proc_attach_tgid_threads (ptid_get_pid (lp->ptid),
1311 attach_proc_task_lwp_callback);
1312
7feb7d06
PA
1313 if (target_can_async_p ())
1314 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1315}
1316
a0ef4274
DJ
1317/* Get pending status of LP. */
1318static int
1319get_pending_status (struct lwp_info *lp, int *status)
1320{
a493e3e2 1321 enum gdb_signal signo = GDB_SIGNAL_0;
ca2163eb
PA
1322
1323 /* If we paused threads momentarily, we may have stored pending
1324 events in lp->status or lp->waitstatus (see stop_wait_callback),
1325 and GDB core hasn't seen any signal for those threads.
1326 Otherwise, the last signal reported to the core is found in the
1327 thread object's stop_signal.
1328
1329 There's a corner case that isn't handled here at present. Only
1330 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1331 stop_signal make sense as a real signal to pass to the inferior.
1332 Some catchpoint related events, like
1333 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
a493e3e2 1334 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
ca2163eb
PA
1335 those traps are debug API (ptrace in our case) related and
1336 induced; the inferior wouldn't see them if it wasn't being
1337 traced. Hence, we should never pass them to the inferior, even
1338 when set to pass state. Since this corner case isn't handled by
1339 infrun.c when proceeding with a signal, for consistency, neither
1340 do we handle it here (or elsewhere in the file we check for
1341 signal pass state). Normally SIGTRAP isn't set to pass state, so
1342 this is really a corner case. */
1343
1344 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
a493e3e2 1345 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
ca2163eb 1346 else if (lp->status)
2ea28649 1347 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
ca2163eb
PA
1348 else if (non_stop && !is_executing (lp->ptid))
1349 {
1350 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1351
16c381f0 1352 signo = tp->suspend.stop_signal;
ca2163eb
PA
1353 }
1354 else if (!non_stop)
a0ef4274 1355 {
ca2163eb
PA
1356 struct target_waitstatus last;
1357 ptid_t last_ptid;
4c28f408 1358
ca2163eb 1359 get_last_target_status (&last_ptid, &last);
4c28f408 1360
dfd4cc63 1361 if (ptid_get_lwp (lp->ptid) == ptid_get_lwp (last_ptid))
ca2163eb 1362 {
e09875d4 1363 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1364
16c381f0 1365 signo = tp->suspend.stop_signal;
4c28f408 1366 }
ca2163eb 1367 }
4c28f408 1368
ca2163eb 1369 *status = 0;
4c28f408 1370
a493e3e2 1371 if (signo == GDB_SIGNAL_0)
ca2163eb
PA
1372 {
1373 if (debug_linux_nat)
1374 fprintf_unfiltered (gdb_stdlog,
1375 "GPT: lwp %s has no pending signal\n",
1376 target_pid_to_str (lp->ptid));
1377 }
1378 else if (!signal_pass_state (signo))
1379 {
1380 if (debug_linux_nat)
3e43a32a
MS
1381 fprintf_unfiltered (gdb_stdlog,
1382 "GPT: lwp %s had signal %s, "
1383 "but it is in no pass state\n",
ca2163eb 1384 target_pid_to_str (lp->ptid),
2ea28649 1385 gdb_signal_to_string (signo));
a0ef4274 1386 }
a0ef4274 1387 else
4c28f408 1388 {
2ea28649 1389 *status = W_STOPCODE (gdb_signal_to_host (signo));
ca2163eb
PA
1390
1391 if (debug_linux_nat)
1392 fprintf_unfiltered (gdb_stdlog,
1393 "GPT: lwp %s has pending signal %s\n",
1394 target_pid_to_str (lp->ptid),
2ea28649 1395 gdb_signal_to_string (signo));
4c28f408 1396 }
a0ef4274
DJ
1397
1398 return 0;
1399}
1400
d6b0e80f
AC
1401static int
1402detach_callback (struct lwp_info *lp, void *data)
1403{
1404 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1405
1406 if (debug_linux_nat && lp->status)
1407 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1408 strsignal (WSTOPSIG (lp->status)),
1409 target_pid_to_str (lp->ptid));
1410
a0ef4274
DJ
1411 /* If there is a pending SIGSTOP, get rid of it. */
1412 if (lp->signalled)
d6b0e80f 1413 {
d6b0e80f
AC
1414 if (debug_linux_nat)
1415 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1416 "DC: Sending SIGCONT to %s\n",
1417 target_pid_to_str (lp->ptid));
d6b0e80f 1418
dfd4cc63 1419 kill_lwp (ptid_get_lwp (lp->ptid), SIGCONT);
d6b0e80f 1420 lp->signalled = 0;
d6b0e80f
AC
1421 }
1422
1423 /* We don't actually detach from the LWP that has an id equal to the
1424 overall process id just yet. */
dfd4cc63 1425 if (ptid_get_lwp (lp->ptid) != ptid_get_pid (lp->ptid))
d6b0e80f 1426 {
a0ef4274
DJ
1427 int status = 0;
1428
1429 /* Pass on any pending signal for this LWP. */
1430 get_pending_status (lp, &status);
1431
7b50312a
PA
1432 if (linux_nat_prepare_to_resume != NULL)
1433 linux_nat_prepare_to_resume (lp);
d6b0e80f 1434 errno = 0;
dfd4cc63 1435 if (ptrace (PTRACE_DETACH, ptid_get_lwp (lp->ptid), 0,
a0ef4274 1436 WSTOPSIG (status)) < 0)
8a3fe4f8 1437 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1438 safe_strerror (errno));
1439
1440 if (debug_linux_nat)
1441 fprintf_unfiltered (gdb_stdlog,
1442 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1443 target_pid_to_str (lp->ptid),
7feb7d06 1444 strsignal (WSTOPSIG (status)));
d6b0e80f
AC
1445
1446 delete_lwp (lp->ptid);
1447 }
1448
1449 return 0;
1450}
1451
1452static void
52554a0e 1453linux_nat_detach (struct target_ops *ops, const char *args, int from_tty)
d6b0e80f 1454{
b84876c2 1455 int pid;
a0ef4274 1456 int status;
d90e17a7
PA
1457 struct lwp_info *main_lwp;
1458
dfd4cc63 1459 pid = ptid_get_pid (inferior_ptid);
a0ef4274 1460
ae5e0686
MK
1461 /* Don't unregister from the event loop, as there may be other
1462 inferiors running. */
b84876c2 1463
4c28f408
PA
1464 /* Stop all threads before detaching. ptrace requires that the
1465 thread is stopped to sucessfully detach. */
d90e17a7 1466 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
4c28f408
PA
1467 /* ... and wait until all of them have reported back that
1468 they're no longer running. */
d90e17a7 1469 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
4c28f408 1470
d90e17a7 1471 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
d6b0e80f
AC
1472
1473 /* Only the initial process should be left right now. */
dfd4cc63 1474 gdb_assert (num_lwps (ptid_get_pid (inferior_ptid)) == 1);
d90e17a7
PA
1475
1476 main_lwp = find_lwp_pid (pid_to_ptid (pid));
d6b0e80f 1477
a0ef4274
DJ
1478 /* Pass on any pending signal for the last LWP. */
1479 if ((args == NULL || *args == '\0')
d90e17a7 1480 && get_pending_status (main_lwp, &status) != -1
a0ef4274
DJ
1481 && WIFSTOPPED (status))
1482 {
52554a0e
TT
1483 char *tem;
1484
a0ef4274
DJ
1485 /* Put the signal number in ARGS so that inf_ptrace_detach will
1486 pass it along with PTRACE_DETACH. */
52554a0e 1487 tem = alloca (8);
cde33bf1 1488 xsnprintf (tem, 8, "%d", (int) WSTOPSIG (status));
52554a0e 1489 args = tem;
ddabfc73
TT
1490 if (debug_linux_nat)
1491 fprintf_unfiltered (gdb_stdlog,
1492 "LND: Sending signal %s to %s\n",
1493 args,
1494 target_pid_to_str (main_lwp->ptid));
a0ef4274
DJ
1495 }
1496
7b50312a
PA
1497 if (linux_nat_prepare_to_resume != NULL)
1498 linux_nat_prepare_to_resume (main_lwp);
d90e17a7 1499 delete_lwp (main_lwp->ptid);
b84876c2 1500
7a7d3353
PA
1501 if (forks_exist_p ())
1502 {
1503 /* Multi-fork case. The current inferior_ptid is being detached
1504 from, but there are other viable forks to debug. Detach from
1505 the current fork, and context-switch to the first
1506 available. */
1507 linux_fork_detach (args, from_tty);
7a7d3353
PA
1508 }
1509 else
1510 linux_ops->to_detach (ops, args, from_tty);
d6b0e80f
AC
1511}
1512
8a99810d
PA
1513/* Resume execution of the inferior process. If STEP is nonzero,
1514 single-step it. If SIGNAL is nonzero, give it that signal. */
1515
1516static void
1517linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1518{
1519 ptid_t ptid;
1520
1521 lp->step = step;
1522 if (linux_nat_prepare_to_resume != NULL)
1523 linux_nat_prepare_to_resume (lp);
1524 /* Convert to something the lower layer understands. */
1525 ptid = pid_to_ptid (ptid_get_lwp (lp->ptid));
1526 linux_ops->to_resume (linux_ops, ptid, step, signo);
1527 lp->stopped_by_watchpoint = 0;
1528 lp->stopped = 0;
1529 registers_changed_ptid (lp->ptid);
1530}
1531
d6b0e80f
AC
1532/* Resume LP. */
1533
25289eb2 1534static void
e5ef252a 1535resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
d6b0e80f 1536{
25289eb2 1537 if (lp->stopped)
6c95b8df 1538 {
c9657e70 1539 struct inferior *inf = find_inferior_ptid (lp->ptid);
25289eb2
PA
1540
1541 if (inf->vfork_child != NULL)
1542 {
1543 if (debug_linux_nat)
1544 fprintf_unfiltered (gdb_stdlog,
1545 "RC: Not resuming %s (vfork parent)\n",
1546 target_pid_to_str (lp->ptid));
1547 }
8a99810d 1548 else if (!lwp_status_pending_p (lp))
25289eb2
PA
1549 {
1550 if (debug_linux_nat)
1551 fprintf_unfiltered (gdb_stdlog,
e5ef252a
PA
1552 "RC: Resuming sibling %s, %s, %s\n",
1553 target_pid_to_str (lp->ptid),
1554 (signo != GDB_SIGNAL_0
1555 ? strsignal (gdb_signal_to_host (signo))
1556 : "0"),
1557 step ? "step" : "resume");
25289eb2 1558
8a99810d 1559 linux_resume_one_lwp (lp, step, signo);
25289eb2
PA
1560 }
1561 else
1562 {
1563 if (debug_linux_nat)
1564 fprintf_unfiltered (gdb_stdlog,
1565 "RC: Not resuming sibling %s (has pending)\n",
1566 target_pid_to_str (lp->ptid));
1567 }
6c95b8df 1568 }
25289eb2 1569 else
d6b0e80f 1570 {
d90e17a7
PA
1571 if (debug_linux_nat)
1572 fprintf_unfiltered (gdb_stdlog,
25289eb2 1573 "RC: Not resuming sibling %s (not stopped)\n",
d6b0e80f 1574 target_pid_to_str (lp->ptid));
d6b0e80f 1575 }
25289eb2 1576}
d6b0e80f 1577
8817a6f2
PA
1578/* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1579 Resume LWP with the last stop signal, if it is in pass state. */
e5ef252a 1580
25289eb2 1581static int
8817a6f2 1582linux_nat_resume_callback (struct lwp_info *lp, void *except)
25289eb2 1583{
e5ef252a
PA
1584 enum gdb_signal signo = GDB_SIGNAL_0;
1585
8817a6f2
PA
1586 if (lp == except)
1587 return 0;
1588
e5ef252a
PA
1589 if (lp->stopped)
1590 {
1591 struct thread_info *thread;
1592
1593 thread = find_thread_ptid (lp->ptid);
1594 if (thread != NULL)
1595 {
70509625 1596 signo = thread->suspend.stop_signal;
e5ef252a
PA
1597 thread->suspend.stop_signal = GDB_SIGNAL_0;
1598 }
1599 }
1600
1601 resume_lwp (lp, 0, signo);
d6b0e80f
AC
1602 return 0;
1603}
1604
1605static int
1606resume_clear_callback (struct lwp_info *lp, void *data)
1607{
1608 lp->resumed = 0;
25289eb2 1609 lp->last_resume_kind = resume_stop;
d6b0e80f
AC
1610 return 0;
1611}
1612
1613static int
1614resume_set_callback (struct lwp_info *lp, void *data)
1615{
1616 lp->resumed = 1;
25289eb2 1617 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1618 return 0;
1619}
1620
1621static void
28439f5e 1622linux_nat_resume (struct target_ops *ops,
2ea28649 1623 ptid_t ptid, int step, enum gdb_signal signo)
d6b0e80f
AC
1624{
1625 struct lwp_info *lp;
d90e17a7 1626 int resume_many;
d6b0e80f 1627
76f50ad1
DJ
1628 if (debug_linux_nat)
1629 fprintf_unfiltered (gdb_stdlog,
1630 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1631 step ? "step" : "resume",
1632 target_pid_to_str (ptid),
a493e3e2 1633 (signo != GDB_SIGNAL_0
2ea28649 1634 ? strsignal (gdb_signal_to_host (signo)) : "0"),
76f50ad1
DJ
1635 target_pid_to_str (inferior_ptid));
1636
d6b0e80f 1637 /* A specific PTID means `step only this process id'. */
d90e17a7
PA
1638 resume_many = (ptid_equal (minus_one_ptid, ptid)
1639 || ptid_is_pid (ptid));
4c28f408 1640
e3e9f5a2
PA
1641 /* Mark the lwps we're resuming as resumed. */
1642 iterate_over_lwps (ptid, resume_set_callback, NULL);
d6b0e80f 1643
d90e17a7
PA
1644 /* See if it's the current inferior that should be handled
1645 specially. */
1646 if (resume_many)
1647 lp = find_lwp_pid (inferior_ptid);
1648 else
1649 lp = find_lwp_pid (ptid);
9f0bdab8 1650 gdb_assert (lp != NULL);
d6b0e80f 1651
9f0bdab8 1652 /* Remember if we're stepping. */
25289eb2 1653 lp->last_resume_kind = step ? resume_step : resume_continue;
d6b0e80f 1654
9f0bdab8
DJ
1655 /* If we have a pending wait status for this thread, there is no
1656 point in resuming the process. But first make sure that
1657 linux_nat_wait won't preemptively handle the event - we
1658 should never take this short-circuit if we are going to
1659 leave LP running, since we have skipped resuming all the
1660 other threads. This bit of code needs to be synchronized
1661 with linux_nat_wait. */
76f50ad1 1662
9f0bdab8
DJ
1663 if (lp->status && WIFSTOPPED (lp->status))
1664 {
2455069d
UW
1665 if (!lp->step
1666 && WSTOPSIG (lp->status)
1667 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 1668 {
9f0bdab8
DJ
1669 if (debug_linux_nat)
1670 fprintf_unfiltered (gdb_stdlog,
1671 "LLR: Not short circuiting for ignored "
1672 "status 0x%x\n", lp->status);
1673
d6b0e80f
AC
1674 /* FIXME: What should we do if we are supposed to continue
1675 this thread with a signal? */
a493e3e2 1676 gdb_assert (signo == GDB_SIGNAL_0);
2ea28649 1677 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
1678 lp->status = 0;
1679 }
1680 }
76f50ad1 1681
8a99810d 1682 if (lwp_status_pending_p (lp))
9f0bdab8
DJ
1683 {
1684 /* FIXME: What should we do if we are supposed to continue
1685 this thread with a signal? */
a493e3e2 1686 gdb_assert (signo == GDB_SIGNAL_0);
76f50ad1 1687
9f0bdab8
DJ
1688 if (debug_linux_nat)
1689 fprintf_unfiltered (gdb_stdlog,
1690 "LLR: Short circuiting for status 0x%x\n",
1691 lp->status);
d6b0e80f 1692
7feb7d06
PA
1693 if (target_can_async_p ())
1694 {
1695 target_async (inferior_event_handler, 0);
1696 /* Tell the event loop we have something to process. */
1697 async_file_mark ();
1698 }
9f0bdab8 1699 return;
d6b0e80f
AC
1700 }
1701
d90e17a7 1702 if (resume_many)
8817a6f2 1703 iterate_over_lwps (ptid, linux_nat_resume_callback, lp);
d90e17a7 1704
8a99810d 1705 linux_resume_one_lwp (lp, step, signo);
9f0bdab8 1706
d6b0e80f
AC
1707 if (debug_linux_nat)
1708 fprintf_unfiltered (gdb_stdlog,
1709 "LLR: %s %s, %s (resume event thread)\n",
1710 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1711 target_pid_to_str (ptid),
a493e3e2 1712 (signo != GDB_SIGNAL_0
2ea28649 1713 ? strsignal (gdb_signal_to_host (signo)) : "0"));
b84876c2
PA
1714
1715 if (target_can_async_p ())
8ea051c5 1716 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1717}
1718
c5f62d5f 1719/* Send a signal to an LWP. */
d6b0e80f
AC
1720
1721static int
1722kill_lwp (int lwpid, int signo)
1723{
c5f62d5f
DE
1724 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1725 fails, then we are not using nptl threads and we should be using kill. */
d6b0e80f
AC
1726
1727#ifdef HAVE_TKILL_SYSCALL
c5f62d5f
DE
1728 {
1729 static int tkill_failed;
1730
1731 if (!tkill_failed)
1732 {
1733 int ret;
1734
1735 errno = 0;
1736 ret = syscall (__NR_tkill, lwpid, signo);
1737 if (errno != ENOSYS)
1738 return ret;
1739 tkill_failed = 1;
1740 }
1741 }
d6b0e80f
AC
1742#endif
1743
1744 return kill (lwpid, signo);
1745}
1746
ca2163eb
PA
1747/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1748 event, check if the core is interested in it: if not, ignore the
1749 event, and keep waiting; otherwise, we need to toggle the LWP's
1750 syscall entry/exit status, since the ptrace event itself doesn't
1751 indicate it, and report the trap to higher layers. */
1752
1753static int
1754linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1755{
1756 struct target_waitstatus *ourstatus = &lp->waitstatus;
1757 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
1758 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
1759
1760 if (stopping)
1761 {
1762 /* If we're stopping threads, there's a SIGSTOP pending, which
1763 makes it so that the LWP reports an immediate syscall return,
1764 followed by the SIGSTOP. Skip seeing that "return" using
1765 PTRACE_CONT directly, and let stop_wait_callback collect the
1766 SIGSTOP. Later when the thread is resumed, a new syscall
1767 entry event. If we didn't do this (and returned 0), we'd
1768 leave a syscall entry pending, and our caller, by using
1769 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1770 itself. Later, when the user re-resumes this LWP, we'd see
1771 another syscall entry event and we'd mistake it for a return.
1772
1773 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1774 (leaving immediately with LWP->signalled set, without issuing
1775 a PTRACE_CONT), it would still be problematic to leave this
1776 syscall enter pending, as later when the thread is resumed,
1777 it would then see the same syscall exit mentioned above,
1778 followed by the delayed SIGSTOP, while the syscall didn't
1779 actually get to execute. It seems it would be even more
1780 confusing to the user. */
1781
1782 if (debug_linux_nat)
1783 fprintf_unfiltered (gdb_stdlog,
1784 "LHST: ignoring syscall %d "
1785 "for LWP %ld (stopping threads), "
1786 "resuming with PTRACE_CONT for SIGSTOP\n",
1787 syscall_number,
dfd4cc63 1788 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1789
1790 lp->syscall_state = TARGET_WAITKIND_IGNORE;
dfd4cc63 1791 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
8817a6f2 1792 lp->stopped = 0;
ca2163eb
PA
1793 return 1;
1794 }
1795
1796 if (catch_syscall_enabled ())
1797 {
1798 /* Always update the entry/return state, even if this particular
1799 syscall isn't interesting to the core now. In async mode,
1800 the user could install a new catchpoint for this syscall
1801 between syscall enter/return, and we'll need to know to
1802 report a syscall return if that happens. */
1803 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1804 ? TARGET_WAITKIND_SYSCALL_RETURN
1805 : TARGET_WAITKIND_SYSCALL_ENTRY);
1806
1807 if (catching_syscall_number (syscall_number))
1808 {
1809 /* Alright, an event to report. */
1810 ourstatus->kind = lp->syscall_state;
1811 ourstatus->value.syscall_number = syscall_number;
1812
1813 if (debug_linux_nat)
1814 fprintf_unfiltered (gdb_stdlog,
1815 "LHST: stopping for %s of syscall %d"
1816 " for LWP %ld\n",
3e43a32a
MS
1817 lp->syscall_state
1818 == TARGET_WAITKIND_SYSCALL_ENTRY
ca2163eb
PA
1819 ? "entry" : "return",
1820 syscall_number,
dfd4cc63 1821 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1822 return 0;
1823 }
1824
1825 if (debug_linux_nat)
1826 fprintf_unfiltered (gdb_stdlog,
1827 "LHST: ignoring %s of syscall %d "
1828 "for LWP %ld\n",
1829 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1830 ? "entry" : "return",
1831 syscall_number,
dfd4cc63 1832 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1833 }
1834 else
1835 {
1836 /* If we had been syscall tracing, and hence used PT_SYSCALL
1837 before on this LWP, it could happen that the user removes all
1838 syscall catchpoints before we get to process this event.
1839 There are two noteworthy issues here:
1840
1841 - When stopped at a syscall entry event, resuming with
1842 PT_STEP still resumes executing the syscall and reports a
1843 syscall return.
1844
1845 - Only PT_SYSCALL catches syscall enters. If we last
1846 single-stepped this thread, then this event can't be a
1847 syscall enter. If we last single-stepped this thread, this
1848 has to be a syscall exit.
1849
1850 The points above mean that the next resume, be it PT_STEP or
1851 PT_CONTINUE, can not trigger a syscall trace event. */
1852 if (debug_linux_nat)
1853 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
1854 "LHST: caught syscall event "
1855 "with no syscall catchpoints."
ca2163eb
PA
1856 " %d for LWP %ld, ignoring\n",
1857 syscall_number,
dfd4cc63 1858 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1859 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1860 }
1861
1862 /* The core isn't interested in this event. For efficiency, avoid
1863 stopping all threads only to have the core resume them all again.
1864 Since we're not stopping threads, if we're still syscall tracing
1865 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1866 subsequent syscall. Simply resume using the inf-ptrace layer,
1867 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1868
8a99810d 1869 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
ca2163eb
PA
1870 return 1;
1871}
1872
3d799a95
DJ
1873/* Handle a GNU/Linux extended wait response. If we see a clone
1874 event, we need to add the new LWP to our list (and not report the
1875 trap to higher layers). This function returns non-zero if the
1876 event should be ignored and we should wait again. If STOPPING is
1877 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
1878
1879static int
3d799a95
DJ
1880linux_handle_extended_wait (struct lwp_info *lp, int status,
1881 int stopping)
d6b0e80f 1882{
dfd4cc63 1883 int pid = ptid_get_lwp (lp->ptid);
3d799a95 1884 struct target_waitstatus *ourstatus = &lp->waitstatus;
89a5711c 1885 int event = linux_ptrace_get_extended_event (status);
d6b0e80f 1886
3d799a95
DJ
1887 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1888 || event == PTRACE_EVENT_CLONE)
d6b0e80f 1889 {
3d799a95
DJ
1890 unsigned long new_pid;
1891 int ret;
1892
1893 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 1894
3d799a95
DJ
1895 /* If we haven't already seen the new PID stop, wait for it now. */
1896 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1897 {
1898 /* The new child has a pending SIGSTOP. We can't affect it until it
1899 hits the SIGSTOP, but we're already attached. */
1900 ret = my_waitpid (new_pid, &status,
1901 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1902 if (ret == -1)
1903 perror_with_name (_("waiting for new child"));
1904 else if (ret != new_pid)
1905 internal_error (__FILE__, __LINE__,
1906 _("wait returned unexpected PID %d"), ret);
1907 else if (!WIFSTOPPED (status))
1908 internal_error (__FILE__, __LINE__,
1909 _("wait returned unexpected status 0x%x"), status);
1910 }
1911
3a3e9ee3 1912 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95 1913
26cb8b7c
PA
1914 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1915 {
1916 /* The arch-specific native code may need to know about new
1917 forks even if those end up never mapped to an
1918 inferior. */
1919 if (linux_nat_new_fork != NULL)
1920 linux_nat_new_fork (lp, new_pid);
1921 }
1922
2277426b 1923 if (event == PTRACE_EVENT_FORK
dfd4cc63 1924 && linux_fork_checkpointing_p (ptid_get_pid (lp->ptid)))
2277426b 1925 {
2277426b
PA
1926 /* Handle checkpointing by linux-fork.c here as a special
1927 case. We don't want the follow-fork-mode or 'catch fork'
1928 to interfere with this. */
1929
1930 /* This won't actually modify the breakpoint list, but will
1931 physically remove the breakpoints from the child. */
d80ee84f 1932 detach_breakpoints (ptid_build (new_pid, new_pid, 0));
2277426b
PA
1933
1934 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
1935 if (!find_fork_pid (new_pid))
1936 add_fork (new_pid);
2277426b
PA
1937
1938 /* Report as spurious, so that infrun doesn't want to follow
1939 this fork. We're actually doing an infcall in
1940 linux-fork.c. */
1941 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2277426b
PA
1942
1943 /* Report the stop to the core. */
1944 return 0;
1945 }
1946
3d799a95
DJ
1947 if (event == PTRACE_EVENT_FORK)
1948 ourstatus->kind = TARGET_WAITKIND_FORKED;
1949 else if (event == PTRACE_EVENT_VFORK)
1950 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 1951 else
3d799a95 1952 {
78768c4a
JK
1953 struct lwp_info *new_lp;
1954
3d799a95 1955 ourstatus->kind = TARGET_WAITKIND_IGNORE;
78768c4a 1956
3c4d7e12
PA
1957 if (debug_linux_nat)
1958 fprintf_unfiltered (gdb_stdlog,
1959 "LHEW: Got clone event "
1960 "from LWP %d, new child is LWP %ld\n",
1961 pid, new_pid);
1962
dfd4cc63 1963 new_lp = add_lwp (ptid_build (ptid_get_pid (lp->ptid), new_pid, 0));
3d799a95 1964 new_lp->cloned = 1;
4c28f408 1965 new_lp->stopped = 1;
d6b0e80f 1966
3d799a95
DJ
1967 if (WSTOPSIG (status) != SIGSTOP)
1968 {
1969 /* This can happen if someone starts sending signals to
1970 the new thread before it gets a chance to run, which
1971 have a lower number than SIGSTOP (e.g. SIGUSR1).
1972 This is an unlikely case, and harder to handle for
1973 fork / vfork than for clone, so we do not try - but
1974 we handle it for clone events here. We'll send
1975 the other signal on to the thread below. */
1976
1977 new_lp->signalled = 1;
1978 }
1979 else
79395f92
PA
1980 {
1981 struct thread_info *tp;
1982
1983 /* When we stop for an event in some other thread, and
1984 pull the thread list just as this thread has cloned,
1985 we'll have seen the new thread in the thread_db list
1986 before handling the CLONE event (glibc's
1987 pthread_create adds the new thread to the thread list
1988 before clone'ing, and has the kernel fill in the
1989 thread's tid on the clone call with
1990 CLONE_PARENT_SETTID). If that happened, and the core
1991 had requested the new thread to stop, we'll have
1992 killed it with SIGSTOP. But since SIGSTOP is not an
1993 RT signal, it can only be queued once. We need to be
1994 careful to not resume the LWP if we wanted it to
1995 stop. In that case, we'll leave the SIGSTOP pending.
a493e3e2 1996 It will later be reported as GDB_SIGNAL_0. */
79395f92
PA
1997 tp = find_thread_ptid (new_lp->ptid);
1998 if (tp != NULL && tp->stop_requested)
1999 new_lp->last_resume_kind = resume_stop;
2000 else
2001 status = 0;
2002 }
d6b0e80f 2003
4c28f408 2004 if (non_stop)
3d799a95 2005 {
4c28f408
PA
2006 /* Add the new thread to GDB's lists as soon as possible
2007 so that:
2008
2009 1) the frontend doesn't have to wait for a stop to
2010 display them, and,
2011
2012 2) we tag it with the correct running state. */
2013
2014 /* If the thread_db layer is active, let it know about
2015 this new thread, and add it to GDB's list. */
2016 if (!thread_db_attach_lwp (new_lp->ptid))
2017 {
2018 /* We're not using thread_db. Add it to GDB's
2019 list. */
dfd4cc63 2020 target_post_attach (ptid_get_lwp (new_lp->ptid));
4c28f408
PA
2021 add_thread (new_lp->ptid);
2022 }
2023
2024 if (!stopping)
2025 {
2026 set_running (new_lp->ptid, 1);
2027 set_executing (new_lp->ptid, 1);
e21ffe51
PA
2028 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2029 resume_stop. */
2030 new_lp->last_resume_kind = resume_continue;
4c28f408
PA
2031 }
2032 }
2033
79395f92
PA
2034 if (status != 0)
2035 {
2036 /* We created NEW_LP so it cannot yet contain STATUS. */
2037 gdb_assert (new_lp->status == 0);
2038
2039 /* Save the wait status to report later. */
2040 if (debug_linux_nat)
2041 fprintf_unfiltered (gdb_stdlog,
2042 "LHEW: waitpid of new LWP %ld, "
2043 "saving status %s\n",
dfd4cc63 2044 (long) ptid_get_lwp (new_lp->ptid),
79395f92
PA
2045 status_to_str (status));
2046 new_lp->status = status;
2047 }
2048
ca2163eb
PA
2049 /* Note the need to use the low target ops to resume, to
2050 handle resuming with PT_SYSCALL if we have syscall
2051 catchpoints. */
4c28f408
PA
2052 if (!stopping)
2053 {
3d799a95 2054 new_lp->resumed = 1;
ca2163eb 2055
79395f92 2056 if (status == 0)
ad34eb2f 2057 {
e21ffe51 2058 gdb_assert (new_lp->last_resume_kind == resume_continue);
ad34eb2f
JK
2059 if (debug_linux_nat)
2060 fprintf_unfiltered (gdb_stdlog,
79395f92 2061 "LHEW: resuming new LWP %ld\n",
dfd4cc63 2062 ptid_get_lwp (new_lp->ptid));
8a99810d 2063 linux_resume_one_lwp (new_lp, 0, GDB_SIGNAL_0);
ad34eb2f
JK
2064 }
2065 }
d6b0e80f 2066
3d799a95
DJ
2067 if (debug_linux_nat)
2068 fprintf_unfiltered (gdb_stdlog,
3c4d7e12 2069 "LHEW: resuming parent LWP %d\n", pid);
8a99810d 2070 linux_resume_one_lwp (lp, 0, GDB_SIGNAL_0);
3d799a95
DJ
2071 return 1;
2072 }
2073
2074 return 0;
d6b0e80f
AC
2075 }
2076
3d799a95
DJ
2077 if (event == PTRACE_EVENT_EXEC)
2078 {
a75724bc
PA
2079 if (debug_linux_nat)
2080 fprintf_unfiltered (gdb_stdlog,
2081 "LHEW: Got exec event from LWP %ld\n",
dfd4cc63 2082 ptid_get_lwp (lp->ptid));
a75724bc 2083
3d799a95
DJ
2084 ourstatus->kind = TARGET_WAITKIND_EXECD;
2085 ourstatus->value.execd_pathname
8dd27370 2086 = xstrdup (linux_child_pid_to_exec_file (NULL, pid));
3d799a95 2087
8af756ef
PA
2088 /* The thread that execed must have been resumed, but, when a
2089 thread execs, it changes its tid to the tgid, and the old
2090 tgid thread might have not been resumed. */
2091 lp->resumed = 1;
6c95b8df
PA
2092 return 0;
2093 }
2094
2095 if (event == PTRACE_EVENT_VFORK_DONE)
2096 {
2097 if (current_inferior ()->waiting_for_vfork_done)
3d799a95 2098 {
6c95b8df 2099 if (debug_linux_nat)
3e43a32a
MS
2100 fprintf_unfiltered (gdb_stdlog,
2101 "LHEW: Got expected PTRACE_EVENT_"
2102 "VFORK_DONE from LWP %ld: stopping\n",
dfd4cc63 2103 ptid_get_lwp (lp->ptid));
3d799a95 2104
6c95b8df
PA
2105 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2106 return 0;
3d799a95
DJ
2107 }
2108
6c95b8df 2109 if (debug_linux_nat)
3e43a32a
MS
2110 fprintf_unfiltered (gdb_stdlog,
2111 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2112 "from LWP %ld: resuming\n",
dfd4cc63
LM
2113 ptid_get_lwp (lp->ptid));
2114 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
6c95b8df 2115 return 1;
3d799a95
DJ
2116 }
2117
2118 internal_error (__FILE__, __LINE__,
2119 _("unknown ptrace event %d"), event);
d6b0e80f
AC
2120}
2121
2122/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2123 exited. */
2124
2125static int
2126wait_lwp (struct lwp_info *lp)
2127{
2128 pid_t pid;
432b4d03 2129 int status = 0;
d6b0e80f 2130 int thread_dead = 0;
432b4d03 2131 sigset_t prev_mask;
d6b0e80f
AC
2132
2133 gdb_assert (!lp->stopped);
2134 gdb_assert (lp->status == 0);
2135
432b4d03
JK
2136 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2137 block_child_signals (&prev_mask);
2138
2139 for (;;)
d6b0e80f 2140 {
432b4d03
JK
2141 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2142 was right and we should just call sigsuspend. */
2143
dfd4cc63 2144 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, WNOHANG);
d6b0e80f 2145 if (pid == -1 && errno == ECHILD)
dfd4cc63 2146 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, __WCLONE | WNOHANG);
a9f4bb21
PA
2147 if (pid == -1 && errno == ECHILD)
2148 {
2149 /* The thread has previously exited. We need to delete it
2150 now because, for some vendor 2.4 kernels with NPTL
2151 support backported, there won't be an exit event unless
2152 it is the main thread. 2.6 kernels will report an exit
2153 event for each thread that exits, as expected. */
2154 thread_dead = 1;
2155 if (debug_linux_nat)
2156 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2157 target_pid_to_str (lp->ptid));
2158 }
432b4d03
JK
2159 if (pid != 0)
2160 break;
2161
2162 /* Bugs 10970, 12702.
2163 Thread group leader may have exited in which case we'll lock up in
2164 waitpid if there are other threads, even if they are all zombies too.
2165 Basically, we're not supposed to use waitpid this way.
2166 __WCLONE is not applicable for the leader so we can't use that.
2167 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2168 process; it gets ESRCH both for the zombie and for running processes.
2169
2170 As a workaround, check if we're waiting for the thread group leader and
2171 if it's a zombie, and avoid calling waitpid if it is.
2172
2173 This is racy, what if the tgl becomes a zombie right after we check?
2174 Therefore always use WNOHANG with sigsuspend - it is equivalent to
5f572dec 2175 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
432b4d03 2176
dfd4cc63
LM
2177 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid)
2178 && linux_proc_pid_is_zombie (ptid_get_lwp (lp->ptid)))
d6b0e80f 2179 {
d6b0e80f
AC
2180 thread_dead = 1;
2181 if (debug_linux_nat)
432b4d03
JK
2182 fprintf_unfiltered (gdb_stdlog,
2183 "WL: Thread group leader %s vanished.\n",
d6b0e80f 2184 target_pid_to_str (lp->ptid));
432b4d03 2185 break;
d6b0e80f 2186 }
432b4d03
JK
2187
2188 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2189 get invoked despite our caller had them intentionally blocked by
2190 block_child_signals. This is sensitive only to the loop of
2191 linux_nat_wait_1 and there if we get called my_waitpid gets called
2192 again before it gets to sigsuspend so we can safely let the handlers
2193 get executed here. */
2194
d36bf488
DE
2195 if (debug_linux_nat)
2196 fprintf_unfiltered (gdb_stdlog, "WL: about to sigsuspend\n");
432b4d03
JK
2197 sigsuspend (&suspend_mask);
2198 }
2199
2200 restore_child_signals_mask (&prev_mask);
2201
d6b0e80f
AC
2202 if (!thread_dead)
2203 {
dfd4cc63 2204 gdb_assert (pid == ptid_get_lwp (lp->ptid));
d6b0e80f
AC
2205
2206 if (debug_linux_nat)
2207 {
2208 fprintf_unfiltered (gdb_stdlog,
2209 "WL: waitpid %s received %s\n",
2210 target_pid_to_str (lp->ptid),
2211 status_to_str (status));
2212 }
d6b0e80f 2213
a9f4bb21
PA
2214 /* Check if the thread has exited. */
2215 if (WIFEXITED (status) || WIFSIGNALED (status))
2216 {
2217 thread_dead = 1;
2218 if (debug_linux_nat)
2219 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2220 target_pid_to_str (lp->ptid));
2221 }
d6b0e80f
AC
2222 }
2223
2224 if (thread_dead)
2225 {
e26af52f 2226 exit_lwp (lp);
d6b0e80f
AC
2227 return 0;
2228 }
2229
2230 gdb_assert (WIFSTOPPED (status));
8817a6f2 2231 lp->stopped = 1;
d6b0e80f 2232
8784d563
PA
2233 if (lp->must_set_ptrace_flags)
2234 {
2235 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
2236
2237 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), inf->attach_flag);
2238 lp->must_set_ptrace_flags = 0;
2239 }
2240
ca2163eb
PA
2241 /* Handle GNU/Linux's syscall SIGTRAPs. */
2242 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2243 {
2244 /* No longer need the sysgood bit. The ptrace event ends up
2245 recorded in lp->waitstatus if we care for it. We can carry
2246 on handling the event like a regular SIGTRAP from here
2247 on. */
2248 status = W_STOPCODE (SIGTRAP);
2249 if (linux_handle_syscall_trap (lp, 1))
2250 return wait_lwp (lp);
2251 }
2252
d6b0e80f 2253 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2254 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2255 && linux_is_extended_waitstatus (status))
d6b0e80f
AC
2256 {
2257 if (debug_linux_nat)
2258 fprintf_unfiltered (gdb_stdlog,
2259 "WL: Handling extended status 0x%06x\n",
2260 status);
3d799a95 2261 if (linux_handle_extended_wait (lp, status, 1))
d6b0e80f
AC
2262 return wait_lwp (lp);
2263 }
2264
2265 return status;
2266}
2267
2268/* Send a SIGSTOP to LP. */
2269
2270static int
2271stop_callback (struct lwp_info *lp, void *data)
2272{
2273 if (!lp->stopped && !lp->signalled)
2274 {
2275 int ret;
2276
2277 if (debug_linux_nat)
2278 {
2279 fprintf_unfiltered (gdb_stdlog,
2280 "SC: kill %s **<SIGSTOP>**\n",
2281 target_pid_to_str (lp->ptid));
2282 }
2283 errno = 0;
dfd4cc63 2284 ret = kill_lwp (ptid_get_lwp (lp->ptid), SIGSTOP);
d6b0e80f
AC
2285 if (debug_linux_nat)
2286 {
2287 fprintf_unfiltered (gdb_stdlog,
2288 "SC: lwp kill %d %s\n",
2289 ret,
2290 errno ? safe_strerror (errno) : "ERRNO-OK");
2291 }
2292
2293 lp->signalled = 1;
2294 gdb_assert (lp->status == 0);
2295 }
2296
2297 return 0;
2298}
2299
7b50312a
PA
2300/* Request a stop on LWP. */
2301
2302void
2303linux_stop_lwp (struct lwp_info *lwp)
2304{
2305 stop_callback (lwp, NULL);
2306}
2307
57380f4e 2308/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2309
2310static int
57380f4e
DJ
2311linux_nat_has_pending_sigint (int pid)
2312{
2313 sigset_t pending, blocked, ignored;
57380f4e
DJ
2314
2315 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2316
2317 if (sigismember (&pending, SIGINT)
2318 && !sigismember (&ignored, SIGINT))
2319 return 1;
2320
2321 return 0;
2322}
2323
2324/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2325
2326static int
2327set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2328{
57380f4e
DJ
2329 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2330 flag to consume the next one. */
2331 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2332 && WSTOPSIG (lp->status) == SIGINT)
2333 lp->status = 0;
2334 else
2335 lp->ignore_sigint = 1;
2336
2337 return 0;
2338}
2339
2340/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2341 This function is called after we know the LWP has stopped; if the LWP
2342 stopped before the expected SIGINT was delivered, then it will never have
2343 arrived. Also, if the signal was delivered to a shared queue and consumed
2344 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2345
57380f4e
DJ
2346static void
2347maybe_clear_ignore_sigint (struct lwp_info *lp)
2348{
2349 if (!lp->ignore_sigint)
2350 return;
2351
dfd4cc63 2352 if (!linux_nat_has_pending_sigint (ptid_get_lwp (lp->ptid)))
57380f4e
DJ
2353 {
2354 if (debug_linux_nat)
2355 fprintf_unfiltered (gdb_stdlog,
2356 "MCIS: Clearing bogus flag for %s\n",
2357 target_pid_to_str (lp->ptid));
2358 lp->ignore_sigint = 0;
2359 }
2360}
2361
ebec9a0f
PA
2362/* Fetch the possible triggered data watchpoint info and store it in
2363 LP.
2364
2365 On some archs, like x86, that use debug registers to set
2366 watchpoints, it's possible that the way to know which watched
2367 address trapped, is to check the register that is used to select
2368 which address to watch. Problem is, between setting the watchpoint
2369 and reading back which data address trapped, the user may change
2370 the set of watchpoints, and, as a consequence, GDB changes the
2371 debug registers in the inferior. To avoid reading back a stale
2372 stopped-data-address when that happens, we cache in LP the fact
2373 that a watchpoint trapped, and the corresponding data address, as
2374 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2375 registers meanwhile, we have the cached data we can rely on. */
2376
2377static void
2378save_sigtrap (struct lwp_info *lp)
2379{
2380 struct cleanup *old_chain;
2381
2382 if (linux_ops->to_stopped_by_watchpoint == NULL)
2383 {
2384 lp->stopped_by_watchpoint = 0;
2385 return;
2386 }
2387
2388 old_chain = save_inferior_ptid ();
2389 inferior_ptid = lp->ptid;
2390
6a109b6b 2391 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint (linux_ops);
ebec9a0f
PA
2392
2393 if (lp->stopped_by_watchpoint)
2394 {
2395 if (linux_ops->to_stopped_data_address != NULL)
2396 lp->stopped_data_address_p =
2397 linux_ops->to_stopped_data_address (&current_target,
2398 &lp->stopped_data_address);
2399 else
2400 lp->stopped_data_address_p = 0;
2401 }
2402
2403 do_cleanups (old_chain);
2404}
2405
2406/* See save_sigtrap. */
2407
2408static int
6a109b6b 2409linux_nat_stopped_by_watchpoint (struct target_ops *ops)
ebec9a0f
PA
2410{
2411 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2412
2413 gdb_assert (lp != NULL);
2414
2415 return lp->stopped_by_watchpoint;
2416}
2417
2418static int
2419linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2420{
2421 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2422
2423 gdb_assert (lp != NULL);
2424
2425 *addr_p = lp->stopped_data_address;
2426
2427 return lp->stopped_data_address_p;
2428}
2429
26ab7092
JK
2430/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2431
2432static int
2433sigtrap_is_event (int status)
2434{
2435 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2436}
2437
2438/* SIGTRAP-like events recognizer. */
2439
2440static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2441
00390b84
JK
2442/* Check for SIGTRAP-like events in LP. */
2443
2444static int
2445linux_nat_lp_status_is_event (struct lwp_info *lp)
2446{
2447 /* We check for lp->waitstatus in addition to lp->status, because we can
2448 have pending process exits recorded in lp->status
2449 and W_EXITCODE(0,0) == 0. We should probably have an additional
2450 lp->status_p flag. */
2451
2452 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2453 && linux_nat_status_is_event (lp->status));
2454}
2455
26ab7092
JK
2456/* Set alternative SIGTRAP-like events recognizer. If
2457 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2458 applied. */
2459
2460void
2461linux_nat_set_status_is_event (struct target_ops *t,
2462 int (*status_is_event) (int status))
2463{
2464 linux_nat_status_is_event = status_is_event;
2465}
2466
57380f4e
DJ
2467/* Wait until LP is stopped. */
2468
2469static int
2470stop_wait_callback (struct lwp_info *lp, void *data)
2471{
c9657e70 2472 struct inferior *inf = find_inferior_ptid (lp->ptid);
6c95b8df
PA
2473
2474 /* If this is a vfork parent, bail out, it is not going to report
2475 any SIGSTOP until the vfork is done with. */
2476 if (inf->vfork_child != NULL)
2477 return 0;
2478
d6b0e80f
AC
2479 if (!lp->stopped)
2480 {
2481 int status;
2482
2483 status = wait_lwp (lp);
2484 if (status == 0)
2485 return 0;
2486
57380f4e
DJ
2487 if (lp->ignore_sigint && WIFSTOPPED (status)
2488 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2489 {
57380f4e 2490 lp->ignore_sigint = 0;
d6b0e80f
AC
2491
2492 errno = 0;
dfd4cc63 2493 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
8817a6f2 2494 lp->stopped = 0;
d6b0e80f
AC
2495 if (debug_linux_nat)
2496 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2497 "PTRACE_CONT %s, 0, 0 (%s) "
2498 "(discarding SIGINT)\n",
d6b0e80f
AC
2499 target_pid_to_str (lp->ptid),
2500 errno ? safe_strerror (errno) : "OK");
2501
57380f4e 2502 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2503 }
2504
57380f4e
DJ
2505 maybe_clear_ignore_sigint (lp);
2506
d6b0e80f
AC
2507 if (WSTOPSIG (status) != SIGSTOP)
2508 {
e5ef252a 2509 /* The thread was stopped with a signal other than SIGSTOP. */
7feb7d06 2510
e5ef252a
PA
2511 save_sigtrap (lp);
2512
2513 if (debug_linux_nat)
2514 fprintf_unfiltered (gdb_stdlog,
2515 "SWC: Pending event %s in %s\n",
2516 status_to_str ((int) status),
2517 target_pid_to_str (lp->ptid));
2518
2519 /* Save the sigtrap event. */
2520 lp->status = status;
e5ef252a 2521 gdb_assert (lp->signalled);
d6b0e80f
AC
2522 }
2523 else
2524 {
2525 /* We caught the SIGSTOP that we intended to catch, so
2526 there's no SIGSTOP pending. */
e5ef252a
PA
2527
2528 if (debug_linux_nat)
2529 fprintf_unfiltered (gdb_stdlog,
2530 "SWC: Delayed SIGSTOP caught for %s.\n",
2531 target_pid_to_str (lp->ptid));
2532
e5ef252a
PA
2533 /* Reset SIGNALLED only after the stop_wait_callback call
2534 above as it does gdb_assert on SIGNALLED. */
d6b0e80f
AC
2535 lp->signalled = 0;
2536 }
2537 }
2538
2539 return 0;
2540}
2541
d6b0e80f
AC
2542/* Return non-zero if LP has a wait status pending. */
2543
2544static int
2545status_callback (struct lwp_info *lp, void *data)
2546{
2547 /* Only report a pending wait status if we pretend that this has
2548 indeed been resumed. */
ca2163eb
PA
2549 if (!lp->resumed)
2550 return 0;
2551
8a99810d 2552 return lwp_status_pending_p (lp);
d6b0e80f
AC
2553}
2554
2555/* Return non-zero if LP isn't stopped. */
2556
2557static int
2558running_callback (struct lwp_info *lp, void *data)
2559{
25289eb2 2560 return (!lp->stopped
8a99810d 2561 || (lwp_status_pending_p (lp) && lp->resumed));
d6b0e80f
AC
2562}
2563
2564/* Count the LWP's that have had events. */
2565
2566static int
2567count_events_callback (struct lwp_info *lp, void *data)
2568{
2569 int *count = data;
2570
2571 gdb_assert (count != NULL);
2572
e09490f1 2573 /* Count only resumed LWPs that have a SIGTRAP event pending. */
00390b84 2574 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
2575 (*count)++;
2576
2577 return 0;
2578}
2579
2580/* Select the LWP (if any) that is currently being single-stepped. */
2581
2582static int
2583select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2584{
25289eb2
PA
2585 if (lp->last_resume_kind == resume_step
2586 && lp->status != 0)
d6b0e80f
AC
2587 return 1;
2588 else
2589 return 0;
2590}
2591
8a99810d
PA
2592/* Returns true if LP has a status pending. */
2593
2594static int
2595lwp_status_pending_p (struct lwp_info *lp)
2596{
2597 /* We check for lp->waitstatus in addition to lp->status, because we
2598 can have pending process exits recorded in lp->status and
2599 W_EXITCODE(0,0) happens to be 0. */
2600 return lp->status != 0 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE;
2601}
2602
d6b0e80f
AC
2603/* Select the Nth LWP that has had a SIGTRAP event. */
2604
2605static int
2606select_event_lwp_callback (struct lwp_info *lp, void *data)
2607{
2608 int *selector = data;
2609
2610 gdb_assert (selector != NULL);
2611
1777feb0 2612 /* Select only resumed LWPs that have a SIGTRAP event pending. */
00390b84 2613 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
2614 if ((*selector)-- == 0)
2615 return 1;
2616
2617 return 0;
2618}
2619
710151dd
PA
2620static int
2621cancel_breakpoint (struct lwp_info *lp)
2622{
2623 /* Arrange for a breakpoint to be hit again later. We don't keep
2624 the SIGTRAP status and don't forward the SIGTRAP signal to the
2625 LWP. We will handle the current event, eventually we will resume
2626 this LWP, and this breakpoint will trap again.
2627
2628 If we do not do this, then we run the risk that the user will
2629 delete or disable the breakpoint, but the LWP will have already
2630 tripped on it. */
2631
515630c5
UW
2632 struct regcache *regcache = get_thread_regcache (lp->ptid);
2633 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2634 CORE_ADDR pc;
2635
118e6252 2636 pc = regcache_read_pc (regcache) - target_decr_pc_after_break (gdbarch);
6c95b8df 2637 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
710151dd
PA
2638 {
2639 if (debug_linux_nat)
2640 fprintf_unfiltered (gdb_stdlog,
2641 "CB: Push back breakpoint for %s\n",
2642 target_pid_to_str (lp->ptid));
2643
2644 /* Back up the PC if necessary. */
118e6252 2645 if (target_decr_pc_after_break (gdbarch))
515630c5
UW
2646 regcache_write_pc (regcache, pc);
2647
710151dd
PA
2648 return 1;
2649 }
2650 return 0;
2651}
2652
d6b0e80f
AC
2653static int
2654cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2655{
2656 struct lwp_info *event_lp = data;
2657
2658 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2659 if (lp == event_lp)
2660 return 0;
2661
2662 /* If a LWP other than the LWP that we're reporting an event for has
2663 hit a GDB breakpoint (as opposed to some random trap signal),
2664 then just arrange for it to hit it again later. We don't keep
2665 the SIGTRAP status and don't forward the SIGTRAP signal to the
2666 LWP. We will handle the current event, eventually we will resume
2667 all LWPs, and this one will get its breakpoint trap again.
2668
2669 If we do not do this, then we run the risk that the user will
2670 delete or disable the breakpoint, but the LWP will have already
2671 tripped on it. */
2672
00390b84 2673 if (linux_nat_lp_status_is_event (lp)
710151dd
PA
2674 && cancel_breakpoint (lp))
2675 /* Throw away the SIGTRAP. */
2676 lp->status = 0;
d6b0e80f
AC
2677
2678 return 0;
2679}
2680
2681/* Select one LWP out of those that have events pending. */
2682
2683static void
d90e17a7 2684select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
2685{
2686 int num_events = 0;
2687 int random_selector;
2688 struct lwp_info *event_lp;
2689
ac264b3b 2690 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2691 (*orig_lp)->status = *status;
2692
2693 /* Give preference to any LWP that is being single-stepped. */
d90e17a7
PA
2694 event_lp = iterate_over_lwps (filter,
2695 select_singlestep_lwp_callback, NULL);
d6b0e80f
AC
2696 if (event_lp != NULL)
2697 {
2698 if (debug_linux_nat)
2699 fprintf_unfiltered (gdb_stdlog,
2700 "SEL: Select single-step %s\n",
2701 target_pid_to_str (event_lp->ptid));
2702 }
2703 else
2704 {
2705 /* No single-stepping LWP. Select one at random, out of those
2706 which have had SIGTRAP events. */
2707
2708 /* First see how many SIGTRAP events we have. */
d90e17a7 2709 iterate_over_lwps (filter, count_events_callback, &num_events);
d6b0e80f
AC
2710
2711 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2712 random_selector = (int)
2713 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2714
2715 if (debug_linux_nat && num_events > 1)
2716 fprintf_unfiltered (gdb_stdlog,
2717 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2718 num_events, random_selector);
2719
d90e17a7
PA
2720 event_lp = iterate_over_lwps (filter,
2721 select_event_lwp_callback,
d6b0e80f
AC
2722 &random_selector);
2723 }
2724
2725 if (event_lp != NULL)
2726 {
2727 /* Switch the event LWP. */
2728 *orig_lp = event_lp;
2729 *status = event_lp->status;
2730 }
2731
2732 /* Flush the wait status for the event LWP. */
2733 (*orig_lp)->status = 0;
2734}
2735
2736/* Return non-zero if LP has been resumed. */
2737
2738static int
2739resumed_callback (struct lwp_info *lp, void *data)
2740{
2741 return lp->resumed;
2742}
2743
12d9289a
PA
2744/* Stop an active thread, verify it still exists, then resume it. If
2745 the thread ends up with a pending status, then it is not resumed,
2746 and *DATA (really a pointer to int), is set. */
d6b0e80f
AC
2747
2748static int
2749stop_and_resume_callback (struct lwp_info *lp, void *data)
2750{
12d9289a
PA
2751 int *new_pending_p = data;
2752
25289eb2 2753 if (!lp->stopped)
d6b0e80f 2754 {
25289eb2
PA
2755 ptid_t ptid = lp->ptid;
2756
d6b0e80f
AC
2757 stop_callback (lp, NULL);
2758 stop_wait_callback (lp, NULL);
25289eb2
PA
2759
2760 /* Resume if the lwp still exists, and the core wanted it
2761 running. */
12d9289a
PA
2762 lp = find_lwp_pid (ptid);
2763 if (lp != NULL)
25289eb2 2764 {
12d9289a 2765 if (lp->last_resume_kind == resume_stop
8a99810d 2766 && !lwp_status_pending_p (lp))
12d9289a
PA
2767 {
2768 /* The core wanted the LWP to stop. Even if it stopped
2769 cleanly (with SIGSTOP), leave the event pending. */
2770 if (debug_linux_nat)
2771 fprintf_unfiltered (gdb_stdlog,
2772 "SARC: core wanted LWP %ld stopped "
2773 "(leaving SIGSTOP pending)\n",
dfd4cc63 2774 ptid_get_lwp (lp->ptid));
12d9289a
PA
2775 lp->status = W_STOPCODE (SIGSTOP);
2776 }
2777
8a99810d 2778 if (!lwp_status_pending_p (lp))
12d9289a
PA
2779 {
2780 if (debug_linux_nat)
2781 fprintf_unfiltered (gdb_stdlog,
2782 "SARC: re-resuming LWP %ld\n",
dfd4cc63 2783 ptid_get_lwp (lp->ptid));
e5ef252a 2784 resume_lwp (lp, lp->step, GDB_SIGNAL_0);
12d9289a
PA
2785 }
2786 else
2787 {
2788 if (debug_linux_nat)
2789 fprintf_unfiltered (gdb_stdlog,
2790 "SARC: not re-resuming LWP %ld "
2791 "(has pending)\n",
dfd4cc63 2792 ptid_get_lwp (lp->ptid));
12d9289a
PA
2793 if (new_pending_p)
2794 *new_pending_p = 1;
2795 }
25289eb2 2796 }
d6b0e80f
AC
2797 }
2798 return 0;
2799}
2800
02f3fc28 2801/* Check if we should go on and pass this event to common code.
12d9289a
PA
2802 Return the affected lwp if we are, or NULL otherwise. If we stop
2803 all lwps temporarily, we may end up with new pending events in some
2804 other lwp. In that case set *NEW_PENDING_P to true. */
2805
02f3fc28 2806static struct lwp_info *
0e5bf2a8 2807linux_nat_filter_event (int lwpid, int status, int *new_pending_p)
02f3fc28
PA
2808{
2809 struct lwp_info *lp;
89a5711c 2810 int event = linux_ptrace_get_extended_event (status);
02f3fc28 2811
12d9289a
PA
2812 *new_pending_p = 0;
2813
02f3fc28
PA
2814 lp = find_lwp_pid (pid_to_ptid (lwpid));
2815
2816 /* Check for stop events reported by a process we didn't already
2817 know about - anything not already in our LWP list.
2818
2819 If we're expecting to receive stopped processes after
2820 fork, vfork, and clone events, then we'll just add the
2821 new one to our list and go back to waiting for the event
2822 to be reported - the stopped process might be returned
0e5bf2a8
PA
2823 from waitpid before or after the event is.
2824
2825 But note the case of a non-leader thread exec'ing after the
2826 leader having exited, and gone from our lists. The non-leader
2827 thread changes its tid to the tgid. */
2828
2829 if (WIFSTOPPED (status) && lp == NULL
89a5711c 2830 && (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC))
0e5bf2a8
PA
2831 {
2832 /* A multi-thread exec after we had seen the leader exiting. */
2833 if (debug_linux_nat)
2834 fprintf_unfiltered (gdb_stdlog,
2835 "LLW: Re-adding thread group leader LWP %d.\n",
2836 lwpid);
2837
dfd4cc63 2838 lp = add_lwp (ptid_build (lwpid, lwpid, 0));
0e5bf2a8
PA
2839 lp->stopped = 1;
2840 lp->resumed = 1;
2841 add_thread (lp->ptid);
2842 }
2843
02f3fc28
PA
2844 if (WIFSTOPPED (status) && !lp)
2845 {
84636d28 2846 add_to_pid_list (&stopped_pids, lwpid, status);
02f3fc28
PA
2847 return NULL;
2848 }
2849
2850 /* Make sure we don't report an event for the exit of an LWP not in
1777feb0 2851 our list, i.e. not part of the current process. This can happen
fd62cb89 2852 if we detach from a program we originally forked and then it
02f3fc28
PA
2853 exits. */
2854 if (!WIFSTOPPED (status) && !lp)
2855 return NULL;
2856
8817a6f2
PA
2857 /* This LWP is stopped now. (And if dead, this prevents it from
2858 ever being continued.) */
2859 lp->stopped = 1;
2860
8784d563
PA
2861 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
2862 {
2863 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
2864
2865 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), inf->attach_flag);
2866 lp->must_set_ptrace_flags = 0;
2867 }
2868
ca2163eb
PA
2869 /* Handle GNU/Linux's syscall SIGTRAPs. */
2870 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2871 {
2872 /* No longer need the sysgood bit. The ptrace event ends up
2873 recorded in lp->waitstatus if we care for it. We can carry
2874 on handling the event like a regular SIGTRAP from here
2875 on. */
2876 status = W_STOPCODE (SIGTRAP);
2877 if (linux_handle_syscall_trap (lp, 0))
2878 return NULL;
2879 }
02f3fc28 2880
ca2163eb 2881 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2882 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2883 && linux_is_extended_waitstatus (status))
02f3fc28
PA
2884 {
2885 if (debug_linux_nat)
2886 fprintf_unfiltered (gdb_stdlog,
2887 "LLW: Handling extended status 0x%06x\n",
2888 status);
2889 if (linux_handle_extended_wait (lp, status, 0))
2890 return NULL;
2891 }
2892
26ab7092 2893 if (linux_nat_status_is_event (status))
da559b09 2894 save_sigtrap (lp);
ca2163eb 2895
02f3fc28 2896 /* Check if the thread has exited. */
d90e17a7 2897 if ((WIFEXITED (status) || WIFSIGNALED (status))
dfd4cc63 2898 && num_lwps (ptid_get_pid (lp->ptid)) > 1)
02f3fc28 2899 {
9db03742
JB
2900 /* If this is the main thread, we must stop all threads and verify
2901 if they are still alive. This is because in the nptl thread model
2902 on Linux 2.4, there is no signal issued for exiting LWPs
02f3fc28
PA
2903 other than the main thread. We only get the main thread exit
2904 signal once all child threads have already exited. If we
2905 stop all the threads and use the stop_wait_callback to check
2906 if they have exited we can determine whether this signal
2907 should be ignored or whether it means the end of the debugged
2908 application, regardless of which threading model is being
5d3b6af6 2909 used. */
dfd4cc63 2910 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid))
02f3fc28 2911 {
dfd4cc63 2912 iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)),
12d9289a 2913 stop_and_resume_callback, new_pending_p);
02f3fc28
PA
2914 }
2915
2916 if (debug_linux_nat)
2917 fprintf_unfiltered (gdb_stdlog,
2918 "LLW: %s exited.\n",
2919 target_pid_to_str (lp->ptid));
2920
dfd4cc63 2921 if (num_lwps (ptid_get_pid (lp->ptid)) > 1)
9db03742
JB
2922 {
2923 /* If there is at least one more LWP, then the exit signal
2924 was not the end of the debugged application and should be
2925 ignored. */
2926 exit_lwp (lp);
2927 return NULL;
2928 }
02f3fc28
PA
2929 }
2930
2931 /* Check if the current LWP has previously exited. In the nptl
2932 thread model, LWPs other than the main thread do not issue
2933 signals when they exit so we must check whenever the thread has
2934 stopped. A similar check is made in stop_wait_callback(). */
dfd4cc63 2935 if (num_lwps (ptid_get_pid (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
02f3fc28 2936 {
dfd4cc63 2937 ptid_t ptid = pid_to_ptid (ptid_get_pid (lp->ptid));
d90e17a7 2938
02f3fc28
PA
2939 if (debug_linux_nat)
2940 fprintf_unfiltered (gdb_stdlog,
2941 "LLW: %s exited.\n",
2942 target_pid_to_str (lp->ptid));
2943
2944 exit_lwp (lp);
2945
2946 /* Make sure there is at least one thread running. */
d90e17a7 2947 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
02f3fc28
PA
2948
2949 /* Discard the event. */
2950 return NULL;
2951 }
2952
2953 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2954 an attempt to stop an LWP. */
2955 if (lp->signalled
2956 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2957 {
2958 if (debug_linux_nat)
2959 fprintf_unfiltered (gdb_stdlog,
2960 "LLW: Delayed SIGSTOP caught for %s.\n",
2961 target_pid_to_str (lp->ptid));
2962
02f3fc28
PA
2963 lp->signalled = 0;
2964
25289eb2
PA
2965 if (lp->last_resume_kind != resume_stop)
2966 {
2967 /* This is a delayed SIGSTOP. */
02f3fc28 2968
8a99810d 2969 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
25289eb2
PA
2970 if (debug_linux_nat)
2971 fprintf_unfiltered (gdb_stdlog,
2972 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2973 lp->step ?
2974 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2975 target_pid_to_str (lp->ptid));
02f3fc28 2976
25289eb2 2977 gdb_assert (lp->resumed);
02f3fc28 2978
25289eb2
PA
2979 /* Discard the event. */
2980 return NULL;
2981 }
02f3fc28
PA
2982 }
2983
57380f4e
DJ
2984 /* Make sure we don't report a SIGINT that we have already displayed
2985 for another thread. */
2986 if (lp->ignore_sigint
2987 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
2988 {
2989 if (debug_linux_nat)
2990 fprintf_unfiltered (gdb_stdlog,
2991 "LLW: Delayed SIGINT caught for %s.\n",
2992 target_pid_to_str (lp->ptid));
2993
2994 /* This is a delayed SIGINT. */
2995 lp->ignore_sigint = 0;
2996
8a99810d 2997 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
57380f4e
DJ
2998 if (debug_linux_nat)
2999 fprintf_unfiltered (gdb_stdlog,
3000 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3001 lp->step ?
3002 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3003 target_pid_to_str (lp->ptid));
57380f4e
DJ
3004 gdb_assert (lp->resumed);
3005
3006 /* Discard the event. */
3007 return NULL;
3008 }
3009
02f3fc28
PA
3010 /* An interesting event. */
3011 gdb_assert (lp);
ca2163eb 3012 lp->status = status;
02f3fc28
PA
3013 return lp;
3014}
3015
0e5bf2a8
PA
3016/* Detect zombie thread group leaders, and "exit" them. We can't reap
3017 their exits until all other threads in the group have exited. */
3018
3019static void
3020check_zombie_leaders (void)
3021{
3022 struct inferior *inf;
3023
3024 ALL_INFERIORS (inf)
3025 {
3026 struct lwp_info *leader_lp;
3027
3028 if (inf->pid == 0)
3029 continue;
3030
3031 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3032 if (leader_lp != NULL
3033 /* Check if there are other threads in the group, as we may
3034 have raced with the inferior simply exiting. */
3035 && num_lwps (inf->pid) > 1
5f572dec 3036 && linux_proc_pid_is_zombie (inf->pid))
0e5bf2a8
PA
3037 {
3038 if (debug_linux_nat)
3039 fprintf_unfiltered (gdb_stdlog,
3040 "CZL: Thread group leader %d zombie "
3041 "(it exited, or another thread execd).\n",
3042 inf->pid);
3043
3044 /* A leader zombie can mean one of two things:
3045
3046 - It exited, and there's an exit status pending
3047 available, or only the leader exited (not the whole
3048 program). In the latter case, we can't waitpid the
3049 leader's exit status until all other threads are gone.
3050
3051 - There are 3 or more threads in the group, and a thread
3052 other than the leader exec'd. On an exec, the Linux
3053 kernel destroys all other threads (except the execing
3054 one) in the thread group, and resets the execing thread's
3055 tid to the tgid. No exit notification is sent for the
3056 execing thread -- from the ptracer's perspective, it
3057 appears as though the execing thread just vanishes.
3058 Until we reap all other threads except the leader and the
3059 execing thread, the leader will be zombie, and the
3060 execing thread will be in `D (disc sleep)'. As soon as
3061 all other threads are reaped, the execing thread changes
3062 it's tid to the tgid, and the previous (zombie) leader
3063 vanishes, giving place to the "new" leader. We could try
3064 distinguishing the exit and exec cases, by waiting once
3065 more, and seeing if something comes out, but it doesn't
3066 sound useful. The previous leader _does_ go away, and
3067 we'll re-add the new one once we see the exec event
3068 (which is just the same as what would happen if the
3069 previous leader did exit voluntarily before some other
3070 thread execs). */
3071
3072 if (debug_linux_nat)
3073 fprintf_unfiltered (gdb_stdlog,
3074 "CZL: Thread group leader %d vanished.\n",
3075 inf->pid);
3076 exit_lwp (leader_lp);
3077 }
3078 }
3079}
3080
d6b0e80f 3081static ptid_t
7feb7d06 3082linux_nat_wait_1 (struct target_ops *ops,
47608cb1
PA
3083 ptid_t ptid, struct target_waitstatus *ourstatus,
3084 int target_options)
d6b0e80f 3085{
fc9b8e47 3086 sigset_t prev_mask;
4b60df3d 3087 enum resume_kind last_resume_kind;
12d9289a 3088 struct lwp_info *lp;
12d9289a 3089 int status;
d6b0e80f 3090
01124a23 3091 if (debug_linux_nat)
b84876c2
PA
3092 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3093
f973ed9c
DJ
3094 /* The first time we get here after starting a new inferior, we may
3095 not have added it to the LWP list yet - this is the earliest
3096 moment at which we know its PID. */
d90e17a7 3097 if (ptid_is_pid (inferior_ptid))
f973ed9c 3098 {
27c9d204
PA
3099 /* Upgrade the main thread's ptid. */
3100 thread_change_ptid (inferior_ptid,
dfd4cc63
LM
3101 ptid_build (ptid_get_pid (inferior_ptid),
3102 ptid_get_pid (inferior_ptid), 0));
27c9d204 3103
26cb8b7c 3104 lp = add_initial_lwp (inferior_ptid);
f973ed9c
DJ
3105 lp->resumed = 1;
3106 }
3107
12696c10 3108 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
7feb7d06 3109 block_child_signals (&prev_mask);
d6b0e80f
AC
3110
3111retry:
d90e17a7 3112 status = 0;
d6b0e80f
AC
3113
3114 /* First check if there is a LWP with a wait status pending. */
8a99810d
PA
3115 lp = iterate_over_lwps (ptid, status_callback, NULL);
3116 if (lp != NULL)
d6b0e80f
AC
3117 {
3118 if (debug_linux_nat)
d6b0e80f
AC
3119 fprintf_unfiltered (gdb_stdlog,
3120 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3121 status_to_str (lp->status),
d6b0e80f 3122 target_pid_to_str (lp->ptid));
d6b0e80f
AC
3123 }
3124
b84876c2
PA
3125 if (!target_can_async_p ())
3126 {
3127 /* Causes SIGINT to be passed on to the attached process. */
3128 set_sigint_trap ();
b84876c2 3129 }
d6b0e80f 3130
0e5bf2a8 3131 /* But if we don't find a pending event, we'll have to wait. */
7feb7d06 3132
d90e17a7 3133 while (lp == NULL)
d6b0e80f
AC
3134 {
3135 pid_t lwpid;
3136
0e5bf2a8
PA
3137 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3138 quirks:
3139
3140 - If the thread group leader exits while other threads in the
3141 thread group still exist, waitpid(TGID, ...) hangs. That
3142 waitpid won't return an exit status until the other threads
3143 in the group are reapped.
3144
3145 - When a non-leader thread execs, that thread just vanishes
3146 without reporting an exit (so we'd hang if we waited for it
3147 explicitly in that case). The exec event is reported to
3148 the TGID pid. */
3149
3150 errno = 0;
3151 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3152 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3153 lwpid = my_waitpid (-1, &status, WNOHANG);
3154
3155 if (debug_linux_nat)
3156 fprintf_unfiltered (gdb_stdlog,
3157 "LNW: waitpid(-1, ...) returned %d, %s\n",
3158 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
b84876c2 3159
d6b0e80f
AC
3160 if (lwpid > 0)
3161 {
12d9289a
PA
3162 /* If this is true, then we paused LWPs momentarily, and may
3163 now have pending events to handle. */
3164 int new_pending;
3165
d6b0e80f
AC
3166 if (debug_linux_nat)
3167 {
3168 fprintf_unfiltered (gdb_stdlog,
3169 "LLW: waitpid %ld received %s\n",
3170 (long) lwpid, status_to_str (status));
3171 }
3172
0e5bf2a8 3173 lp = linux_nat_filter_event (lwpid, status, &new_pending);
d90e17a7 3174
33355866
JK
3175 /* STATUS is now no longer valid, use LP->STATUS instead. */
3176 status = 0;
3177
0e5bf2a8 3178 if (lp && !ptid_match (lp->ptid, ptid))
d6b0e80f 3179 {
e3e9f5a2
PA
3180 gdb_assert (lp->resumed);
3181
d90e17a7 3182 if (debug_linux_nat)
2f693f9d
SDJ
3183 fprintf_unfiltered (gdb_stdlog,
3184 "LWP %ld got an event %06x, "
3185 "leaving pending.\n",
3186 ptid_get_lwp (lp->ptid), lp->status);
d90e17a7 3187
ca2163eb 3188 if (WIFSTOPPED (lp->status))
d90e17a7 3189 {
ca2163eb 3190 if (WSTOPSIG (lp->status) != SIGSTOP)
d90e17a7 3191 {
e3e9f5a2
PA
3192 /* Cancel breakpoint hits. The breakpoint may
3193 be removed before we fetch events from this
3194 process to report to the core. It is best
3195 not to assume the moribund breakpoints
3196 heuristic always handles these cases --- it
3197 could be too many events go through to the
3198 core before this one is handled. All-stop
3199 always cancels breakpoint hits in all
3200 threads. */
3201 if (non_stop
00390b84 3202 && linux_nat_lp_status_is_event (lp)
e3e9f5a2
PA
3203 && cancel_breakpoint (lp))
3204 {
3205 /* Throw away the SIGTRAP. */
3206 lp->status = 0;
3207
3208 if (debug_linux_nat)
2f693f9d
SDJ
3209 fprintf_unfiltered (gdb_stdlog,
3210 "LLW: LWP %ld hit a "
3211 "breakpoint while "
3212 "waiting for another "
3213 "process; "
3214 "cancelled it\n",
3215 ptid_get_lwp (lp->ptid));
e3e9f5a2 3216 }
d90e17a7
PA
3217 }
3218 else
8817a6f2 3219 lp->signalled = 0;
d90e17a7 3220 }
33355866 3221 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
d90e17a7
PA
3222 {
3223 if (debug_linux_nat)
2f693f9d
SDJ
3224 fprintf_unfiltered (gdb_stdlog,
3225 "Process %ld exited while stopping "
3226 "LWPs\n",
3227 ptid_get_lwp (lp->ptid));
d90e17a7
PA
3228
3229 /* This was the last lwp in the process. Since
3230 events are serialized to GDB core, and we can't
3231 report this one right now, but GDB core and the
3232 other target layers will want to be notified
3233 about the exit code/signal, leave the status
3234 pending for the next time we're able to report
3235 it. */
d90e17a7 3236
d90e17a7
PA
3237 /* Dead LWP's aren't expected to reported a pending
3238 sigstop. */
3239 lp->signalled = 0;
3240
3241 /* Store the pending event in the waitstatus as
3242 well, because W_EXITCODE(0,0) == 0. */
ca2163eb 3243 store_waitstatus (&lp->waitstatus, lp->status);
d90e17a7
PA
3244 }
3245
3246 /* Keep looking. */
3247 lp = NULL;
d6b0e80f
AC
3248 }
3249
0e5bf2a8 3250 if (new_pending)
d90e17a7 3251 {
0e5bf2a8
PA
3252 /* Some LWP now has a pending event. Go all the way
3253 back to check it. */
3254 goto retry;
3255 }
12d9289a 3256
0e5bf2a8
PA
3257 if (lp)
3258 {
3259 /* We got an event to report to the core. */
3260 break;
d90e17a7 3261 }
0e5bf2a8
PA
3262
3263 /* Retry until nothing comes out of waitpid. A single
3264 SIGCHLD can indicate more than one child stopped. */
3265 continue;
d6b0e80f
AC
3266 }
3267
0e5bf2a8
PA
3268 /* Check for zombie thread group leaders. Those can't be reaped
3269 until all other threads in the thread group are. */
3270 check_zombie_leaders ();
d6b0e80f 3271
0e5bf2a8
PA
3272 /* If there are no resumed children left, bail. We'd be stuck
3273 forever in the sigsuspend call below otherwise. */
3274 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3275 {
3276 if (debug_linux_nat)
3277 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
b84876c2 3278
0e5bf2a8 3279 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
b84876c2 3280
0e5bf2a8
PA
3281 if (!target_can_async_p ())
3282 clear_sigint_trap ();
b84876c2 3283
0e5bf2a8
PA
3284 restore_child_signals_mask (&prev_mask);
3285 return minus_one_ptid;
d6b0e80f 3286 }
28736962 3287
0e5bf2a8
PA
3288 /* No interesting event to report to the core. */
3289
3290 if (target_options & TARGET_WNOHANG)
3291 {
01124a23 3292 if (debug_linux_nat)
28736962
PA
3293 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3294
0e5bf2a8 3295 ourstatus->kind = TARGET_WAITKIND_IGNORE;
28736962
PA
3296 restore_child_signals_mask (&prev_mask);
3297 return minus_one_ptid;
3298 }
d6b0e80f
AC
3299
3300 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3301 gdb_assert (lp == NULL);
0e5bf2a8
PA
3302
3303 /* Block until we get an event reported with SIGCHLD. */
d36bf488
DE
3304 if (debug_linux_nat)
3305 fprintf_unfiltered (gdb_stdlog, "LNW: about to sigsuspend\n");
0e5bf2a8 3306 sigsuspend (&suspend_mask);
d6b0e80f
AC
3307 }
3308
b84876c2 3309 if (!target_can_async_p ())
d26b5354 3310 clear_sigint_trap ();
d6b0e80f
AC
3311
3312 gdb_assert (lp);
3313
ca2163eb
PA
3314 status = lp->status;
3315 lp->status = 0;
3316
d6b0e80f
AC
3317 /* Don't report signals that GDB isn't interested in, such as
3318 signals that are neither printed nor stopped upon. Stopping all
3319 threads can be a bit time-consuming so if we want decent
3320 performance with heavily multi-threaded programs, especially when
3321 they're using a high frequency timer, we'd better avoid it if we
3322 can. */
3323
3324 if (WIFSTOPPED (status))
3325 {
2ea28649 3326 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
d6b0e80f 3327
2455069d
UW
3328 /* When using hardware single-step, we need to report every signal.
3329 Otherwise, signals in pass_mask may be short-circuited. */
d539ed7e 3330 if (!lp->step
2455069d 3331 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
d6b0e80f
AC
3332 {
3333 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3334 here? It is not clear we should. GDB may not expect
3335 other threads to run. On the other hand, not resuming
3336 newly attached threads may cause an unwanted delay in
3337 getting them running. */
8a99810d 3338 linux_resume_one_lwp (lp, lp->step, signo);
d6b0e80f
AC
3339 if (debug_linux_nat)
3340 fprintf_unfiltered (gdb_stdlog,
3341 "LLW: %s %s, %s (preempt 'handle')\n",
3342 lp->step ?
3343 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3344 target_pid_to_str (lp->ptid),
a493e3e2 3345 (signo != GDB_SIGNAL_0
2ea28649 3346 ? strsignal (gdb_signal_to_host (signo))
423ec54c 3347 : "0"));
d6b0e80f
AC
3348 goto retry;
3349 }
3350
1ad15515 3351 if (!non_stop)
d6b0e80f 3352 {
1ad15515
PA
3353 /* Only do the below in all-stop, as we currently use SIGINT
3354 to implement target_stop (see linux_nat_stop) in
3355 non-stop. */
a493e3e2 3356 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
1ad15515
PA
3357 {
3358 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3359 forwarded to the entire process group, that is, all LWPs
3360 will receive it - unless they're using CLONE_THREAD to
3361 share signals. Since we only want to report it once, we
3362 mark it as ignored for all LWPs except this one. */
d90e17a7
PA
3363 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3364 set_ignore_sigint, NULL);
1ad15515
PA
3365 lp->ignore_sigint = 0;
3366 }
3367 else
3368 maybe_clear_ignore_sigint (lp);
d6b0e80f
AC
3369 }
3370 }
3371
3372 /* This LWP is stopped now. */
3373 lp->stopped = 1;
3374
3375 if (debug_linux_nat)
3376 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3377 status_to_str (status), target_pid_to_str (lp->ptid));
3378
4c28f408
PA
3379 if (!non_stop)
3380 {
3381 /* Now stop all other LWP's ... */
d90e17a7 3382 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
4c28f408
PA
3383
3384 /* ... and wait until all of them have reported back that
3385 they're no longer running. */
d90e17a7 3386 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
4c28f408
PA
3387
3388 /* If we're not waiting for a specific LWP, choose an event LWP
3389 from among those that have had events. Giving equal priority
3390 to all LWPs that have had events helps prevent
3391 starvation. */
0e5bf2a8 3392 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
d90e17a7 3393 select_event_lwp (ptid, &lp, &status);
d6b0e80f 3394
e3e9f5a2
PA
3395 /* Now that we've selected our final event LWP, cancel any
3396 breakpoints in other LWPs that have hit a GDB breakpoint.
3397 See the comment in cancel_breakpoints_callback to find out
3398 why. */
3399 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3400
4b60df3d
PA
3401 /* We'll need this to determine whether to report a SIGSTOP as
3402 TARGET_WAITKIND_0. Need to take a copy because
3403 resume_clear_callback clears it. */
3404 last_resume_kind = lp->last_resume_kind;
3405
e3e9f5a2
PA
3406 /* In all-stop, from the core's perspective, all LWPs are now
3407 stopped until a new resume action is sent over. */
3408 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3409 }
3410 else
25289eb2 3411 {
4b60df3d
PA
3412 /* See above. */
3413 last_resume_kind = lp->last_resume_kind;
3414 resume_clear_callback (lp, NULL);
25289eb2 3415 }
d6b0e80f 3416
26ab7092 3417 if (linux_nat_status_is_event (status))
d6b0e80f 3418 {
d6b0e80f
AC
3419 if (debug_linux_nat)
3420 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3421 "LLW: trap ptid is %s.\n",
3422 target_pid_to_str (lp->ptid));
d6b0e80f 3423 }
d6b0e80f
AC
3424
3425 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3426 {
3427 *ourstatus = lp->waitstatus;
3428 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3429 }
3430 else
3431 store_waitstatus (ourstatus, status);
3432
01124a23 3433 if (debug_linux_nat)
b84876c2
PA
3434 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3435
7feb7d06 3436 restore_child_signals_mask (&prev_mask);
1e225492 3437
4b60df3d 3438 if (last_resume_kind == resume_stop
25289eb2
PA
3439 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3440 && WSTOPSIG (status) == SIGSTOP)
3441 {
3442 /* A thread that has been requested to stop by GDB with
3443 target_stop, and it stopped cleanly, so report as SIG0. The
3444 use of SIGSTOP is an implementation detail. */
a493e3e2 3445 ourstatus->value.sig = GDB_SIGNAL_0;
25289eb2
PA
3446 }
3447
1e225492
JK
3448 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3449 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3450 lp->core = -1;
3451 else
2e794194 3452 lp->core = linux_common_core_of_thread (lp->ptid);
1e225492 3453
f973ed9c 3454 return lp->ptid;
d6b0e80f
AC
3455}
3456
e3e9f5a2
PA
3457/* Resume LWPs that are currently stopped without any pending status
3458 to report, but are resumed from the core's perspective. */
3459
3460static int
3461resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3462{
3463 ptid_t *wait_ptid_p = data;
3464
3465 if (lp->stopped
3466 && lp->resumed
8a99810d 3467 && !lwp_status_pending_p (lp))
e3e9f5a2 3468 {
336060f3
PA
3469 struct regcache *regcache = get_thread_regcache (lp->ptid);
3470 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3471 CORE_ADDR pc = regcache_read_pc (regcache);
3472
e3e9f5a2
PA
3473 gdb_assert (is_executing (lp->ptid));
3474
3475 /* Don't bother if there's a breakpoint at PC that we'd hit
3476 immediately, and we're not waiting for this LWP. */
3477 if (!ptid_match (lp->ptid, *wait_ptid_p))
3478 {
e3e9f5a2
PA
3479 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3480 return 0;
3481 }
3482
3483 if (debug_linux_nat)
3484 fprintf_unfiltered (gdb_stdlog,
336060f3
PA
3485 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
3486 target_pid_to_str (lp->ptid),
3487 paddress (gdbarch, pc),
3488 lp->step);
e3e9f5a2 3489
8a99810d 3490 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
e3e9f5a2
PA
3491 }
3492
3493 return 0;
3494}
3495
7feb7d06
PA
3496static ptid_t
3497linux_nat_wait (struct target_ops *ops,
47608cb1
PA
3498 ptid_t ptid, struct target_waitstatus *ourstatus,
3499 int target_options)
7feb7d06
PA
3500{
3501 ptid_t event_ptid;
3502
3503 if (debug_linux_nat)
09826ec5
PA
3504 {
3505 char *options_string;
3506
3507 options_string = target_options_to_string (target_options);
3508 fprintf_unfiltered (gdb_stdlog,
3509 "linux_nat_wait: [%s], [%s]\n",
3510 target_pid_to_str (ptid),
3511 options_string);
3512 xfree (options_string);
3513 }
7feb7d06
PA
3514
3515 /* Flush the async file first. */
3516 if (target_can_async_p ())
3517 async_file_flush ();
3518
e3e9f5a2
PA
3519 /* Resume LWPs that are currently stopped without any pending status
3520 to report, but are resumed from the core's perspective. LWPs get
3521 in this state if we find them stopping at a time we're not
3522 interested in reporting the event (target_wait on a
3523 specific_process, for example, see linux_nat_wait_1), and
3524 meanwhile the event became uninteresting. Don't bother resuming
3525 LWPs we're not going to wait for if they'd stop immediately. */
3526 if (non_stop)
3527 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3528
47608cb1 3529 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
7feb7d06
PA
3530
3531 /* If we requested any event, and something came out, assume there
3532 may be more. If we requested a specific lwp or process, also
3533 assume there may be more. */
3534 if (target_can_async_p ()
6953d224
PA
3535 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3536 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
7feb7d06
PA
3537 || !ptid_equal (ptid, minus_one_ptid)))
3538 async_file_mark ();
3539
3540 /* Get ready for the next event. */
3541 if (target_can_async_p ())
3542 target_async (inferior_event_handler, 0);
3543
3544 return event_ptid;
3545}
3546
d6b0e80f
AC
3547static int
3548kill_callback (struct lwp_info *lp, void *data)
3549{
ed731959
JK
3550 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3551
3552 errno = 0;
69ff6be5 3553 kill_lwp (ptid_get_lwp (lp->ptid), SIGKILL);
ed731959 3554 if (debug_linux_nat)
57745c90
PA
3555 {
3556 int save_errno = errno;
3557
3558 fprintf_unfiltered (gdb_stdlog,
3559 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
3560 target_pid_to_str (lp->ptid),
3561 save_errno ? safe_strerror (save_errno) : "OK");
3562 }
ed731959
JK
3563
3564 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3565
d6b0e80f 3566 errno = 0;
dfd4cc63 3567 ptrace (PTRACE_KILL, ptid_get_lwp (lp->ptid), 0, 0);
d6b0e80f 3568 if (debug_linux_nat)
57745c90
PA
3569 {
3570 int save_errno = errno;
3571
3572 fprintf_unfiltered (gdb_stdlog,
3573 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3574 target_pid_to_str (lp->ptid),
3575 save_errno ? safe_strerror (save_errno) : "OK");
3576 }
d6b0e80f
AC
3577
3578 return 0;
3579}
3580
3581static int
3582kill_wait_callback (struct lwp_info *lp, void *data)
3583{
3584 pid_t pid;
3585
3586 /* We must make sure that there are no pending events (delayed
3587 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3588 program doesn't interfere with any following debugging session. */
3589
3590 /* For cloned processes we must check both with __WCLONE and
3591 without, since the exit status of a cloned process isn't reported
3592 with __WCLONE. */
3593 if (lp->cloned)
3594 {
3595 do
3596 {
dfd4cc63 3597 pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, __WCLONE);
e85a822c 3598 if (pid != (pid_t) -1)
d6b0e80f 3599 {
e85a822c
DJ
3600 if (debug_linux_nat)
3601 fprintf_unfiltered (gdb_stdlog,
3602 "KWC: wait %s received unknown.\n",
3603 target_pid_to_str (lp->ptid));
3604 /* The Linux kernel sometimes fails to kill a thread
3605 completely after PTRACE_KILL; that goes from the stop
3606 point in do_fork out to the one in
3607 get_signal_to_deliever and waits again. So kill it
3608 again. */
3609 kill_callback (lp, NULL);
d6b0e80f
AC
3610 }
3611 }
dfd4cc63 3612 while (pid == ptid_get_lwp (lp->ptid));
d6b0e80f
AC
3613
3614 gdb_assert (pid == -1 && errno == ECHILD);
3615 }
3616
3617 do
3618 {
dfd4cc63 3619 pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, 0);
e85a822c 3620 if (pid != (pid_t) -1)
d6b0e80f 3621 {
e85a822c
DJ
3622 if (debug_linux_nat)
3623 fprintf_unfiltered (gdb_stdlog,
3624 "KWC: wait %s received unk.\n",
3625 target_pid_to_str (lp->ptid));
3626 /* See the call to kill_callback above. */
3627 kill_callback (lp, NULL);
d6b0e80f
AC
3628 }
3629 }
dfd4cc63 3630 while (pid == ptid_get_lwp (lp->ptid));
d6b0e80f
AC
3631
3632 gdb_assert (pid == -1 && errno == ECHILD);
3633 return 0;
3634}
3635
3636static void
7d85a9c0 3637linux_nat_kill (struct target_ops *ops)
d6b0e80f 3638{
f973ed9c
DJ
3639 struct target_waitstatus last;
3640 ptid_t last_ptid;
3641 int status;
d6b0e80f 3642
f973ed9c
DJ
3643 /* If we're stopped while forking and we haven't followed yet,
3644 kill the other task. We need to do this first because the
3645 parent will be sleeping if this is a vfork. */
d6b0e80f 3646
f973ed9c 3647 get_last_target_status (&last_ptid, &last);
d6b0e80f 3648
f973ed9c
DJ
3649 if (last.kind == TARGET_WAITKIND_FORKED
3650 || last.kind == TARGET_WAITKIND_VFORKED)
3651 {
dfd4cc63 3652 ptrace (PT_KILL, ptid_get_pid (last.value.related_pid), 0, 0);
f973ed9c 3653 wait (&status);
26cb8b7c
PA
3654
3655 /* Let the arch-specific native code know this process is
3656 gone. */
dfd4cc63 3657 linux_nat_forget_process (ptid_get_pid (last.value.related_pid));
f973ed9c
DJ
3658 }
3659
3660 if (forks_exist_p ())
7feb7d06 3661 linux_fork_killall ();
f973ed9c
DJ
3662 else
3663 {
d90e17a7 3664 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
e0881a8e 3665
4c28f408
PA
3666 /* Stop all threads before killing them, since ptrace requires
3667 that the thread is stopped to sucessfully PTRACE_KILL. */
d90e17a7 3668 iterate_over_lwps (ptid, stop_callback, NULL);
4c28f408
PA
3669 /* ... and wait until all of them have reported back that
3670 they're no longer running. */
d90e17a7 3671 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4c28f408 3672
f973ed9c 3673 /* Kill all LWP's ... */
d90e17a7 3674 iterate_over_lwps (ptid, kill_callback, NULL);
f973ed9c
DJ
3675
3676 /* ... and wait until we've flushed all events. */
d90e17a7 3677 iterate_over_lwps (ptid, kill_wait_callback, NULL);
f973ed9c
DJ
3678 }
3679
3680 target_mourn_inferior ();
d6b0e80f
AC
3681}
3682
3683static void
136d6dae 3684linux_nat_mourn_inferior (struct target_ops *ops)
d6b0e80f 3685{
26cb8b7c
PA
3686 int pid = ptid_get_pid (inferior_ptid);
3687
3688 purge_lwp_list (pid);
d6b0e80f 3689
f973ed9c 3690 if (! forks_exist_p ())
d90e17a7
PA
3691 /* Normal case, no other forks available. */
3692 linux_ops->to_mourn_inferior (ops);
f973ed9c
DJ
3693 else
3694 /* Multi-fork case. The current inferior_ptid has exited, but
3695 there are other viable forks to debug. Delete the exiting
3696 one and context-switch to the first available. */
3697 linux_fork_mourn_inferior ();
26cb8b7c
PA
3698
3699 /* Let the arch-specific native code know this process is gone. */
3700 linux_nat_forget_process (pid);
d6b0e80f
AC
3701}
3702
5b009018
PA
3703/* Convert a native/host siginfo object, into/from the siginfo in the
3704 layout of the inferiors' architecture. */
3705
3706static void
a5362b9a 3707siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
5b009018
PA
3708{
3709 int done = 0;
3710
3711 if (linux_nat_siginfo_fixup != NULL)
3712 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3713
3714 /* If there was no callback, or the callback didn't do anything,
3715 then just do a straight memcpy. */
3716 if (!done)
3717 {
3718 if (direction == 1)
a5362b9a 3719 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5b009018 3720 else
a5362b9a 3721 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5b009018
PA
3722 }
3723}
3724
9b409511 3725static enum target_xfer_status
4aa995e1
PA
3726linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
3727 const char *annex, gdb_byte *readbuf,
9b409511
YQ
3728 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3729 ULONGEST *xfered_len)
4aa995e1 3730{
4aa995e1 3731 int pid;
a5362b9a
TS
3732 siginfo_t siginfo;
3733 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1
PA
3734
3735 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3736 gdb_assert (readbuf || writebuf);
3737
dfd4cc63 3738 pid = ptid_get_lwp (inferior_ptid);
4aa995e1 3739 if (pid == 0)
dfd4cc63 3740 pid = ptid_get_pid (inferior_ptid);
4aa995e1
PA
3741
3742 if (offset > sizeof (siginfo))
2ed4b548 3743 return TARGET_XFER_E_IO;
4aa995e1
PA
3744
3745 errno = 0;
3746 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3747 if (errno != 0)
2ed4b548 3748 return TARGET_XFER_E_IO;
4aa995e1 3749
5b009018
PA
3750 /* When GDB is built as a 64-bit application, ptrace writes into
3751 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3752 inferior with a 64-bit GDB should look the same as debugging it
3753 with a 32-bit GDB, we need to convert it. GDB core always sees
3754 the converted layout, so any read/write will have to be done
3755 post-conversion. */
3756 siginfo_fixup (&siginfo, inf_siginfo, 0);
3757
4aa995e1
PA
3758 if (offset + len > sizeof (siginfo))
3759 len = sizeof (siginfo) - offset;
3760
3761 if (readbuf != NULL)
5b009018 3762 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
3763 else
3764 {
5b009018
PA
3765 memcpy (inf_siginfo + offset, writebuf, len);
3766
3767 /* Convert back to ptrace layout before flushing it out. */
3768 siginfo_fixup (&siginfo, inf_siginfo, 1);
3769
4aa995e1
PA
3770 errno = 0;
3771 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3772 if (errno != 0)
2ed4b548 3773 return TARGET_XFER_E_IO;
4aa995e1
PA
3774 }
3775
9b409511
YQ
3776 *xfered_len = len;
3777 return TARGET_XFER_OK;
4aa995e1
PA
3778}
3779
9b409511 3780static enum target_xfer_status
10d6c8cd
DJ
3781linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3782 const char *annex, gdb_byte *readbuf,
3783 const gdb_byte *writebuf,
9b409511 3784 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
d6b0e80f 3785{
4aa995e1 3786 struct cleanup *old_chain;
9b409511 3787 enum target_xfer_status xfer;
d6b0e80f 3788
4aa995e1
PA
3789 if (object == TARGET_OBJECT_SIGNAL_INFO)
3790 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
9b409511 3791 offset, len, xfered_len);
4aa995e1 3792
c35b1492
PA
3793 /* The target is connected but no live inferior is selected. Pass
3794 this request down to a lower stratum (e.g., the executable
3795 file). */
3796 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
9b409511 3797 return TARGET_XFER_EOF;
c35b1492 3798
4aa995e1
PA
3799 old_chain = save_inferior_ptid ();
3800
dfd4cc63
LM
3801 if (ptid_lwp_p (inferior_ptid))
3802 inferior_ptid = pid_to_ptid (ptid_get_lwp (inferior_ptid));
d6b0e80f 3803
10d6c8cd 3804 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511 3805 offset, len, xfered_len);
d6b0e80f
AC
3806
3807 do_cleanups (old_chain);
3808 return xfer;
3809}
3810
3811static int
28439f5e 3812linux_thread_alive (ptid_t ptid)
d6b0e80f 3813{
8c6a60d1 3814 int err, tmp_errno;
4c28f408 3815
dfd4cc63 3816 gdb_assert (ptid_lwp_p (ptid));
d6b0e80f 3817
4c28f408
PA
3818 /* Send signal 0 instead of anything ptrace, because ptracing a
3819 running thread errors out claiming that the thread doesn't
3820 exist. */
dfd4cc63 3821 err = kill_lwp (ptid_get_lwp (ptid), 0);
8c6a60d1 3822 tmp_errno = errno;
d6b0e80f
AC
3823 if (debug_linux_nat)
3824 fprintf_unfiltered (gdb_stdlog,
4c28f408 3825 "LLTA: KILL(SIG0) %s (%s)\n",
d6b0e80f 3826 target_pid_to_str (ptid),
8c6a60d1 3827 err ? safe_strerror (tmp_errno) : "OK");
9c0dd46b 3828
4c28f408 3829 if (err != 0)
d6b0e80f
AC
3830 return 0;
3831
3832 return 1;
3833}
3834
28439f5e
PA
3835static int
3836linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
3837{
3838 return linux_thread_alive (ptid);
3839}
3840
d6b0e80f 3841static char *
117de6a9 3842linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
d6b0e80f
AC
3843{
3844 static char buf[64];
3845
dfd4cc63
LM
3846 if (ptid_lwp_p (ptid)
3847 && (ptid_get_pid (ptid) != ptid_get_lwp (ptid)
3848 || num_lwps (ptid_get_pid (ptid)) > 1))
d6b0e80f 3849 {
dfd4cc63 3850 snprintf (buf, sizeof (buf), "LWP %ld", ptid_get_lwp (ptid));
d6b0e80f
AC
3851 return buf;
3852 }
3853
3854 return normal_pid_to_str (ptid);
3855}
3856
4694da01 3857static char *
503a628d 3858linux_nat_thread_name (struct target_ops *self, struct thread_info *thr)
4694da01
TT
3859{
3860 int pid = ptid_get_pid (thr->ptid);
3861 long lwp = ptid_get_lwp (thr->ptid);
3862#define FORMAT "/proc/%d/task/%ld/comm"
3863 char buf[sizeof (FORMAT) + 30];
3864 FILE *comm_file;
3865 char *result = NULL;
3866
3867 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
614c279d 3868 comm_file = gdb_fopen_cloexec (buf, "r");
4694da01
TT
3869 if (comm_file)
3870 {
3871 /* Not exported by the kernel, so we define it here. */
3872#define COMM_LEN 16
3873 static char line[COMM_LEN + 1];
3874
3875 if (fgets (line, sizeof (line), comm_file))
3876 {
3877 char *nl = strchr (line, '\n');
3878
3879 if (nl)
3880 *nl = '\0';
3881 if (*line != '\0')
3882 result = line;
3883 }
3884
3885 fclose (comm_file);
3886 }
3887
3888#undef COMM_LEN
3889#undef FORMAT
3890
3891 return result;
3892}
3893
dba24537
AC
3894/* Accepts an integer PID; Returns a string representing a file that
3895 can be opened to get the symbols for the child process. */
3896
6d8fd2b7 3897static char *
8dd27370 3898linux_child_pid_to_exec_file (struct target_ops *self, int pid)
dba24537 3899{
b4ab256d
HZ
3900 static char buf[PATH_MAX];
3901 char name[PATH_MAX];
dba24537 3902
b4ab256d
HZ
3903 xsnprintf (name, PATH_MAX, "/proc/%d/exe", pid);
3904 memset (buf, 0, PATH_MAX);
3905 if (readlink (name, buf, PATH_MAX - 1) <= 0)
3906 strcpy (buf, name);
dba24537 3907
b4ab256d 3908 return buf;
dba24537
AC
3909}
3910
10d6c8cd
DJ
3911/* Implement the to_xfer_partial interface for memory reads using the /proc
3912 filesystem. Because we can use a single read() call for /proc, this
3913 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3914 but it doesn't support writes. */
3915
9b409511 3916static enum target_xfer_status
10d6c8cd
DJ
3917linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3918 const char *annex, gdb_byte *readbuf,
3919 const gdb_byte *writebuf,
9b409511 3920 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
dba24537 3921{
10d6c8cd
DJ
3922 LONGEST ret;
3923 int fd;
dba24537
AC
3924 char filename[64];
3925
10d6c8cd 3926 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
3927 return 0;
3928
3929 /* Don't bother for one word. */
3930 if (len < 3 * sizeof (long))
9b409511 3931 return TARGET_XFER_EOF;
dba24537
AC
3932
3933 /* We could keep this file open and cache it - possibly one per
3934 thread. That requires some juggling, but is even faster. */
cde33bf1
YQ
3935 xsnprintf (filename, sizeof filename, "/proc/%d/mem",
3936 ptid_get_pid (inferior_ptid));
614c279d 3937 fd = gdb_open_cloexec (filename, O_RDONLY | O_LARGEFILE, 0);
dba24537 3938 if (fd == -1)
9b409511 3939 return TARGET_XFER_EOF;
dba24537
AC
3940
3941 /* If pread64 is available, use it. It's faster if the kernel
3942 supports it (only one syscall), and it's 64-bit safe even on
3943 32-bit platforms (for instance, SPARC debugging a SPARC64
3944 application). */
3945#ifdef HAVE_PREAD64
10d6c8cd 3946 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 3947#else
10d6c8cd 3948 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
3949#endif
3950 ret = 0;
3951 else
3952 ret = len;
3953
3954 close (fd);
9b409511
YQ
3955
3956 if (ret == 0)
3957 return TARGET_XFER_EOF;
3958 else
3959 {
3960 *xfered_len = ret;
3961 return TARGET_XFER_OK;
3962 }
dba24537
AC
3963}
3964
efcbbd14
UW
3965
3966/* Enumerate spufs IDs for process PID. */
3967static LONGEST
b55e14c7 3968spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, ULONGEST len)
efcbbd14 3969{
f5656ead 3970 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
efcbbd14
UW
3971 LONGEST pos = 0;
3972 LONGEST written = 0;
3973 char path[128];
3974 DIR *dir;
3975 struct dirent *entry;
3976
3977 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
3978 dir = opendir (path);
3979 if (!dir)
3980 return -1;
3981
3982 rewinddir (dir);
3983 while ((entry = readdir (dir)) != NULL)
3984 {
3985 struct stat st;
3986 struct statfs stfs;
3987 int fd;
3988
3989 fd = atoi (entry->d_name);
3990 if (!fd)
3991 continue;
3992
3993 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
3994 if (stat (path, &st) != 0)
3995 continue;
3996 if (!S_ISDIR (st.st_mode))
3997 continue;
3998
3999 if (statfs (path, &stfs) != 0)
4000 continue;
4001 if (stfs.f_type != SPUFS_MAGIC)
4002 continue;
4003
4004 if (pos >= offset && pos + 4 <= offset + len)
4005 {
4006 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4007 written += 4;
4008 }
4009 pos += 4;
4010 }
4011
4012 closedir (dir);
4013 return written;
4014}
4015
4016/* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4017 object type, using the /proc file system. */
9b409511
YQ
4018
4019static enum target_xfer_status
efcbbd14
UW
4020linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4021 const char *annex, gdb_byte *readbuf,
4022 const gdb_byte *writebuf,
9b409511 4023 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
efcbbd14
UW
4024{
4025 char buf[128];
4026 int fd = 0;
4027 int ret = -1;
dfd4cc63 4028 int pid = ptid_get_pid (inferior_ptid);
efcbbd14
UW
4029
4030 if (!annex)
4031 {
4032 if (!readbuf)
2ed4b548 4033 return TARGET_XFER_E_IO;
efcbbd14 4034 else
9b409511
YQ
4035 {
4036 LONGEST l = spu_enumerate_spu_ids (pid, readbuf, offset, len);
4037
4038 if (l < 0)
4039 return TARGET_XFER_E_IO;
4040 else if (l == 0)
4041 return TARGET_XFER_EOF;
4042 else
4043 {
4044 *xfered_len = (ULONGEST) l;
4045 return TARGET_XFER_OK;
4046 }
4047 }
efcbbd14
UW
4048 }
4049
4050 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
614c279d 4051 fd = gdb_open_cloexec (buf, writebuf? O_WRONLY : O_RDONLY, 0);
efcbbd14 4052 if (fd <= 0)
2ed4b548 4053 return TARGET_XFER_E_IO;
efcbbd14
UW
4054
4055 if (offset != 0
4056 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4057 {
4058 close (fd);
9b409511 4059 return TARGET_XFER_EOF;
efcbbd14
UW
4060 }
4061
4062 if (writebuf)
4063 ret = write (fd, writebuf, (size_t) len);
4064 else if (readbuf)
4065 ret = read (fd, readbuf, (size_t) len);
4066
4067 close (fd);
9b409511
YQ
4068
4069 if (ret < 0)
4070 return TARGET_XFER_E_IO;
4071 else if (ret == 0)
4072 return TARGET_XFER_EOF;
4073 else
4074 {
4075 *xfered_len = (ULONGEST) ret;
4076 return TARGET_XFER_OK;
4077 }
efcbbd14
UW
4078}
4079
4080
dba24537
AC
4081/* Parse LINE as a signal set and add its set bits to SIGS. */
4082
4083static void
4084add_line_to_sigset (const char *line, sigset_t *sigs)
4085{
4086 int len = strlen (line) - 1;
4087 const char *p;
4088 int signum;
4089
4090 if (line[len] != '\n')
8a3fe4f8 4091 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4092
4093 p = line;
4094 signum = len * 4;
4095 while (len-- > 0)
4096 {
4097 int digit;
4098
4099 if (*p >= '0' && *p <= '9')
4100 digit = *p - '0';
4101 else if (*p >= 'a' && *p <= 'f')
4102 digit = *p - 'a' + 10;
4103 else
8a3fe4f8 4104 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4105
4106 signum -= 4;
4107
4108 if (digit & 1)
4109 sigaddset (sigs, signum + 1);
4110 if (digit & 2)
4111 sigaddset (sigs, signum + 2);
4112 if (digit & 4)
4113 sigaddset (sigs, signum + 3);
4114 if (digit & 8)
4115 sigaddset (sigs, signum + 4);
4116
4117 p++;
4118 }
4119}
4120
4121/* Find process PID's pending signals from /proc/pid/status and set
4122 SIGS to match. */
4123
4124void
3e43a32a
MS
4125linux_proc_pending_signals (int pid, sigset_t *pending,
4126 sigset_t *blocked, sigset_t *ignored)
dba24537
AC
4127{
4128 FILE *procfile;
d8d2a3ee 4129 char buffer[PATH_MAX], fname[PATH_MAX];
7c8a8b04 4130 struct cleanup *cleanup;
dba24537
AC
4131
4132 sigemptyset (pending);
4133 sigemptyset (blocked);
4134 sigemptyset (ignored);
cde33bf1 4135 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
614c279d 4136 procfile = gdb_fopen_cloexec (fname, "r");
dba24537 4137 if (procfile == NULL)
8a3fe4f8 4138 error (_("Could not open %s"), fname);
7c8a8b04 4139 cleanup = make_cleanup_fclose (procfile);
dba24537 4140
d8d2a3ee 4141 while (fgets (buffer, PATH_MAX, procfile) != NULL)
dba24537
AC
4142 {
4143 /* Normal queued signals are on the SigPnd line in the status
4144 file. However, 2.6 kernels also have a "shared" pending
4145 queue for delivering signals to a thread group, so check for
4146 a ShdPnd line also.
4147
4148 Unfortunately some Red Hat kernels include the shared pending
4149 queue but not the ShdPnd status field. */
4150
4151 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
4152 add_line_to_sigset (buffer + 8, pending);
4153 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
4154 add_line_to_sigset (buffer + 8, pending);
4155 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
4156 add_line_to_sigset (buffer + 8, blocked);
4157 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
4158 add_line_to_sigset (buffer + 8, ignored);
4159 }
4160
7c8a8b04 4161 do_cleanups (cleanup);
dba24537
AC
4162}
4163
9b409511 4164static enum target_xfer_status
07e059b5 4165linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
e0881a8e 4166 const char *annex, gdb_byte *readbuf,
9b409511
YQ
4167 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4168 ULONGEST *xfered_len)
07e059b5 4169{
07e059b5
VP
4170 gdb_assert (object == TARGET_OBJECT_OSDATA);
4171
9b409511
YQ
4172 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4173 if (*xfered_len == 0)
4174 return TARGET_XFER_EOF;
4175 else
4176 return TARGET_XFER_OK;
07e059b5
VP
4177}
4178
9b409511 4179static enum target_xfer_status
10d6c8cd
DJ
4180linux_xfer_partial (struct target_ops *ops, enum target_object object,
4181 const char *annex, gdb_byte *readbuf,
9b409511
YQ
4182 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4183 ULONGEST *xfered_len)
10d6c8cd 4184{
9b409511 4185 enum target_xfer_status xfer;
10d6c8cd
DJ
4186
4187 if (object == TARGET_OBJECT_AUXV)
9f2982ff 4188 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
9b409511 4189 offset, len, xfered_len);
10d6c8cd 4190
07e059b5
VP
4191 if (object == TARGET_OBJECT_OSDATA)
4192 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
9b409511 4193 offset, len, xfered_len);
07e059b5 4194
efcbbd14
UW
4195 if (object == TARGET_OBJECT_SPU)
4196 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
9b409511 4197 offset, len, xfered_len);
efcbbd14 4198
8f313923
JK
4199 /* GDB calculates all the addresses in possibly larget width of the address.
4200 Address width needs to be masked before its final use - either by
4201 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4202
4203 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4204
4205 if (object == TARGET_OBJECT_MEMORY)
4206 {
f5656ead 4207 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
8f313923
JK
4208
4209 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4210 offset &= ((ULONGEST) 1 << addr_bit) - 1;
4211 }
4212
10d6c8cd 4213 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511
YQ
4214 offset, len, xfered_len);
4215 if (xfer != TARGET_XFER_EOF)
10d6c8cd
DJ
4216 return xfer;
4217
4218 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511 4219 offset, len, xfered_len);
10d6c8cd
DJ
4220}
4221
5808517f
YQ
4222static void
4223cleanup_target_stop (void *arg)
4224{
4225 ptid_t *ptid = (ptid_t *) arg;
4226
4227 gdb_assert (arg != NULL);
4228
4229 /* Unpause all */
a493e3e2 4230 target_resume (*ptid, 0, GDB_SIGNAL_0);
5808517f
YQ
4231}
4232
4233static VEC(static_tracepoint_marker_p) *
c686c57f
TT
4234linux_child_static_tracepoint_markers_by_strid (struct target_ops *self,
4235 const char *strid)
5808517f
YQ
4236{
4237 char s[IPA_CMD_BUF_SIZE];
4238 struct cleanup *old_chain;
4239 int pid = ptid_get_pid (inferior_ptid);
4240 VEC(static_tracepoint_marker_p) *markers = NULL;
4241 struct static_tracepoint_marker *marker = NULL;
4242 char *p = s;
4243 ptid_t ptid = ptid_build (pid, 0, 0);
4244
4245 /* Pause all */
4246 target_stop (ptid);
4247
4248 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4249 s[sizeof ("qTfSTM")] = 0;
4250
42476b70 4251 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4252
4253 old_chain = make_cleanup (free_current_marker, &marker);
4254 make_cleanup (cleanup_target_stop, &ptid);
4255
4256 while (*p++ == 'm')
4257 {
4258 if (marker == NULL)
4259 marker = XCNEW (struct static_tracepoint_marker);
4260
4261 do
4262 {
4263 parse_static_tracepoint_marker_definition (p, &p, marker);
4264
4265 if (strid == NULL || strcmp (strid, marker->str_id) == 0)
4266 {
4267 VEC_safe_push (static_tracepoint_marker_p,
4268 markers, marker);
4269 marker = NULL;
4270 }
4271 else
4272 {
4273 release_static_tracepoint_marker (marker);
4274 memset (marker, 0, sizeof (*marker));
4275 }
4276 }
4277 while (*p++ == ','); /* comma-separated list */
4278
4279 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4280 s[sizeof ("qTsSTM")] = 0;
42476b70 4281 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4282 p = s;
4283 }
4284
4285 do_cleanups (old_chain);
4286
4287 return markers;
4288}
4289
e9efe249 4290/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
4291 it with local methods. */
4292
910122bf
UW
4293static void
4294linux_target_install_ops (struct target_ops *t)
10d6c8cd 4295{
6d8fd2b7 4296 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
eb73ad13 4297 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
6d8fd2b7 4298 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
eb73ad13 4299 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
6d8fd2b7 4300 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
eb73ad13 4301 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
a96d9b2e 4302 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
6d8fd2b7 4303 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 4304 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
4305 t->to_post_attach = linux_child_post_attach;
4306 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
4307
4308 super_xfer_partial = t->to_xfer_partial;
4309 t->to_xfer_partial = linux_xfer_partial;
5808517f
YQ
4310
4311 t->to_static_tracepoint_markers_by_strid
4312 = linux_child_static_tracepoint_markers_by_strid;
910122bf
UW
4313}
4314
4315struct target_ops *
4316linux_target (void)
4317{
4318 struct target_ops *t;
4319
4320 t = inf_ptrace_target ();
4321 linux_target_install_ops (t);
4322
4323 return t;
4324}
4325
4326struct target_ops *
7714d83a 4327linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
4328{
4329 struct target_ops *t;
4330
4331 t = inf_ptrace_trad_target (register_u_offset);
4332 linux_target_install_ops (t);
10d6c8cd 4333
10d6c8cd
DJ
4334 return t;
4335}
4336
b84876c2
PA
4337/* target_is_async_p implementation. */
4338
4339static int
6a109b6b 4340linux_nat_is_async_p (struct target_ops *ops)
b84876c2
PA
4341{
4342 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 4343 it explicitly with the "set target-async" command.
b84876c2 4344 Someday, linux will always be async. */
3dd5b83d 4345 return target_async_permitted;
b84876c2
PA
4346}
4347
4348/* target_can_async_p implementation. */
4349
4350static int
6a109b6b 4351linux_nat_can_async_p (struct target_ops *ops)
b84876c2
PA
4352{
4353 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 4354 it explicitly with the "set target-async" command.
b84876c2 4355 Someday, linux will always be async. */
3dd5b83d 4356 return target_async_permitted;
b84876c2
PA
4357}
4358
9908b566 4359static int
2a9a2795 4360linux_nat_supports_non_stop (struct target_ops *self)
9908b566
VP
4361{
4362 return 1;
4363}
4364
d90e17a7
PA
4365/* True if we want to support multi-process. To be removed when GDB
4366 supports multi-exec. */
4367
2277426b 4368int linux_multi_process = 1;
d90e17a7
PA
4369
4370static int
86ce2668 4371linux_nat_supports_multi_process (struct target_ops *self)
d90e17a7
PA
4372{
4373 return linux_multi_process;
4374}
4375
03583c20 4376static int
2bfc0540 4377linux_nat_supports_disable_randomization (struct target_ops *self)
03583c20
UW
4378{
4379#ifdef HAVE_PERSONALITY
4380 return 1;
4381#else
4382 return 0;
4383#endif
4384}
4385
b84876c2
PA
4386static int async_terminal_is_ours = 1;
4387
4d4ca2a1
DE
4388/* target_terminal_inferior implementation.
4389
4390 This is a wrapper around child_terminal_inferior to add async support. */
b84876c2
PA
4391
4392static void
d2f640d4 4393linux_nat_terminal_inferior (struct target_ops *self)
b84876c2
PA
4394{
4395 if (!target_is_async_p ())
4396 {
4397 /* Async mode is disabled. */
d6b64346 4398 child_terminal_inferior (self);
b84876c2
PA
4399 return;
4400 }
4401
d6b64346 4402 child_terminal_inferior (self);
b84876c2 4403
d9d2d8b6 4404 /* Calls to target_terminal_*() are meant to be idempotent. */
b84876c2
PA
4405 if (!async_terminal_is_ours)
4406 return;
4407
4408 delete_file_handler (input_fd);
4409 async_terminal_is_ours = 0;
4410 set_sigint_trap ();
4411}
4412
4d4ca2a1
DE
4413/* target_terminal_ours implementation.
4414
4415 This is a wrapper around child_terminal_ours to add async support (and
4416 implement the target_terminal_ours vs target_terminal_ours_for_output
4417 distinction). child_terminal_ours is currently no different than
4418 child_terminal_ours_for_output.
4419 We leave target_terminal_ours_for_output alone, leaving it to
4420 child_terminal_ours_for_output. */
b84876c2 4421
2c0b251b 4422static void
e3594fd1 4423linux_nat_terminal_ours (struct target_ops *self)
b84876c2
PA
4424{
4425 if (!target_is_async_p ())
4426 {
4427 /* Async mode is disabled. */
d6b64346 4428 child_terminal_ours (self);
b84876c2
PA
4429 return;
4430 }
4431
4432 /* GDB should never give the terminal to the inferior if the
4433 inferior is running in the background (run&, continue&, etc.),
4434 but claiming it sure should. */
d6b64346 4435 child_terminal_ours (self);
b84876c2 4436
b84876c2
PA
4437 if (async_terminal_is_ours)
4438 return;
4439
4440 clear_sigint_trap ();
4441 add_file_handler (input_fd, stdin_event_handler, 0);
4442 async_terminal_is_ours = 1;
4443}
4444
4445static void (*async_client_callback) (enum inferior_event_type event_type,
4446 void *context);
4447static void *async_client_context;
4448
7feb7d06
PA
4449/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4450 so we notice when any child changes state, and notify the
4451 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4452 above to wait for the arrival of a SIGCHLD. */
4453
b84876c2 4454static void
7feb7d06 4455sigchld_handler (int signo)
b84876c2 4456{
7feb7d06
PA
4457 int old_errno = errno;
4458
01124a23
DE
4459 if (debug_linux_nat)
4460 ui_file_write_async_safe (gdb_stdlog,
4461 "sigchld\n", sizeof ("sigchld\n") - 1);
7feb7d06
PA
4462
4463 if (signo == SIGCHLD
4464 && linux_nat_event_pipe[0] != -1)
4465 async_file_mark (); /* Let the event loop know that there are
4466 events to handle. */
4467
4468 errno = old_errno;
4469}
4470
4471/* Callback registered with the target events file descriptor. */
4472
4473static void
4474handle_target_event (int error, gdb_client_data client_data)
4475{
4476 (*async_client_callback) (INF_REG_EVENT, async_client_context);
4477}
4478
4479/* Create/destroy the target events pipe. Returns previous state. */
4480
4481static int
4482linux_async_pipe (int enable)
4483{
4484 int previous = (linux_nat_event_pipe[0] != -1);
4485
4486 if (previous != enable)
4487 {
4488 sigset_t prev_mask;
4489
12696c10
PA
4490 /* Block child signals while we create/destroy the pipe, as
4491 their handler writes to it. */
7feb7d06
PA
4492 block_child_signals (&prev_mask);
4493
4494 if (enable)
4495 {
614c279d 4496 if (gdb_pipe_cloexec (linux_nat_event_pipe) == -1)
7feb7d06
PA
4497 internal_error (__FILE__, __LINE__,
4498 "creating event pipe failed.");
4499
4500 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4501 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4502 }
4503 else
4504 {
4505 close (linux_nat_event_pipe[0]);
4506 close (linux_nat_event_pipe[1]);
4507 linux_nat_event_pipe[0] = -1;
4508 linux_nat_event_pipe[1] = -1;
4509 }
4510
4511 restore_child_signals_mask (&prev_mask);
4512 }
4513
4514 return previous;
b84876c2
PA
4515}
4516
4517/* target_async implementation. */
4518
4519static void
6a109b6b
TT
4520linux_nat_async (struct target_ops *ops,
4521 void (*callback) (enum inferior_event_type event_type,
4522 void *context),
4523 void *context)
b84876c2 4524{
b84876c2
PA
4525 if (callback != NULL)
4526 {
4527 async_client_callback = callback;
4528 async_client_context = context;
7feb7d06
PA
4529 if (!linux_async_pipe (1))
4530 {
4531 add_file_handler (linux_nat_event_pipe[0],
4532 handle_target_event, NULL);
4533 /* There may be pending events to handle. Tell the event loop
4534 to poll them. */
4535 async_file_mark ();
4536 }
b84876c2
PA
4537 }
4538 else
4539 {
4540 async_client_callback = callback;
4541 async_client_context = context;
b84876c2 4542 delete_file_handler (linux_nat_event_pipe[0]);
7feb7d06 4543 linux_async_pipe (0);
b84876c2
PA
4544 }
4545 return;
4546}
4547
a493e3e2 4548/* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
252fbfc8
PA
4549 event came out. */
4550
4c28f408 4551static int
252fbfc8 4552linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4c28f408 4553{
d90e17a7 4554 if (!lwp->stopped)
252fbfc8 4555 {
d90e17a7
PA
4556 if (debug_linux_nat)
4557 fprintf_unfiltered (gdb_stdlog,
4558 "LNSL: running -> suspending %s\n",
4559 target_pid_to_str (lwp->ptid));
252fbfc8 4560
252fbfc8 4561
25289eb2
PA
4562 if (lwp->last_resume_kind == resume_stop)
4563 {
4564 if (debug_linux_nat)
4565 fprintf_unfiltered (gdb_stdlog,
4566 "linux-nat: already stopping LWP %ld at "
4567 "GDB's request\n",
4568 ptid_get_lwp (lwp->ptid));
4569 return 0;
4570 }
252fbfc8 4571
25289eb2
PA
4572 stop_callback (lwp, NULL);
4573 lwp->last_resume_kind = resume_stop;
d90e17a7
PA
4574 }
4575 else
4576 {
4577 /* Already known to be stopped; do nothing. */
252fbfc8 4578
d90e17a7
PA
4579 if (debug_linux_nat)
4580 {
e09875d4 4581 if (find_thread_ptid (lwp->ptid)->stop_requested)
3e43a32a
MS
4582 fprintf_unfiltered (gdb_stdlog,
4583 "LNSL: already stopped/stop_requested %s\n",
d90e17a7
PA
4584 target_pid_to_str (lwp->ptid));
4585 else
3e43a32a
MS
4586 fprintf_unfiltered (gdb_stdlog,
4587 "LNSL: already stopped/no "
4588 "stop_requested yet %s\n",
d90e17a7 4589 target_pid_to_str (lwp->ptid));
252fbfc8
PA
4590 }
4591 }
4c28f408
PA
4592 return 0;
4593}
4594
4595static void
1eab8a48 4596linux_nat_stop (struct target_ops *self, ptid_t ptid)
4c28f408
PA
4597{
4598 if (non_stop)
d90e17a7 4599 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4c28f408 4600 else
1eab8a48 4601 linux_ops->to_stop (linux_ops, ptid);
4c28f408
PA
4602}
4603
d90e17a7 4604static void
de90e03d 4605linux_nat_close (struct target_ops *self)
d90e17a7
PA
4606{
4607 /* Unregister from the event loop. */
9debeba0
DE
4608 if (linux_nat_is_async_p (self))
4609 linux_nat_async (self, NULL, NULL);
d90e17a7 4610
d90e17a7 4611 if (linux_ops->to_close)
de90e03d 4612 linux_ops->to_close (linux_ops);
6a3cb8e8
PA
4613
4614 super_close (self);
d90e17a7
PA
4615}
4616
c0694254
PA
4617/* When requests are passed down from the linux-nat layer to the
4618 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4619 used. The address space pointer is stored in the inferior object,
4620 but the common code that is passed such ptid can't tell whether
4621 lwpid is a "main" process id or not (it assumes so). We reverse
4622 look up the "main" process id from the lwp here. */
4623
70221824 4624static struct address_space *
c0694254
PA
4625linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
4626{
4627 struct lwp_info *lwp;
4628 struct inferior *inf;
4629 int pid;
4630
dfd4cc63 4631 if (ptid_get_lwp (ptid) == 0)
c0694254
PA
4632 {
4633 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4634 tgid. */
4635 lwp = find_lwp_pid (ptid);
dfd4cc63 4636 pid = ptid_get_pid (lwp->ptid);
c0694254
PA
4637 }
4638 else
4639 {
4640 /* A (pid,lwpid,0) ptid. */
dfd4cc63 4641 pid = ptid_get_pid (ptid);
c0694254
PA
4642 }
4643
4644 inf = find_inferior_pid (pid);
4645 gdb_assert (inf != NULL);
4646 return inf->aspace;
4647}
4648
dc146f7c
VP
4649/* Return the cached value of the processor core for thread PTID. */
4650
70221824 4651static int
dc146f7c
VP
4652linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
4653{
4654 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 4655
dc146f7c
VP
4656 if (info)
4657 return info->core;
4658 return -1;
4659}
4660
f973ed9c
DJ
4661void
4662linux_nat_add_target (struct target_ops *t)
4663{
f973ed9c
DJ
4664 /* Save the provided single-threaded target. We save this in a separate
4665 variable because another target we've inherited from (e.g. inf-ptrace)
4666 may have saved a pointer to T; we want to use it for the final
4667 process stratum target. */
4668 linux_ops_saved = *t;
4669 linux_ops = &linux_ops_saved;
4670
4671 /* Override some methods for multithreading. */
b84876c2 4672 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
4673 t->to_attach = linux_nat_attach;
4674 t->to_detach = linux_nat_detach;
4675 t->to_resume = linux_nat_resume;
4676 t->to_wait = linux_nat_wait;
2455069d 4677 t->to_pass_signals = linux_nat_pass_signals;
f973ed9c
DJ
4678 t->to_xfer_partial = linux_nat_xfer_partial;
4679 t->to_kill = linux_nat_kill;
4680 t->to_mourn_inferior = linux_nat_mourn_inferior;
4681 t->to_thread_alive = linux_nat_thread_alive;
4682 t->to_pid_to_str = linux_nat_pid_to_str;
4694da01 4683 t->to_thread_name = linux_nat_thread_name;
f973ed9c 4684 t->to_has_thread_control = tc_schedlock;
c0694254 4685 t->to_thread_address_space = linux_nat_thread_address_space;
ebec9a0f
PA
4686 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
4687 t->to_stopped_data_address = linux_nat_stopped_data_address;
f973ed9c 4688
b84876c2
PA
4689 t->to_can_async_p = linux_nat_can_async_p;
4690 t->to_is_async_p = linux_nat_is_async_p;
9908b566 4691 t->to_supports_non_stop = linux_nat_supports_non_stop;
b84876c2 4692 t->to_async = linux_nat_async;
b84876c2
PA
4693 t->to_terminal_inferior = linux_nat_terminal_inferior;
4694 t->to_terminal_ours = linux_nat_terminal_ours;
6a3cb8e8
PA
4695
4696 super_close = t->to_close;
d90e17a7 4697 t->to_close = linux_nat_close;
b84876c2 4698
4c28f408
PA
4699 /* Methods for non-stop support. */
4700 t->to_stop = linux_nat_stop;
4701
d90e17a7
PA
4702 t->to_supports_multi_process = linux_nat_supports_multi_process;
4703
03583c20
UW
4704 t->to_supports_disable_randomization
4705 = linux_nat_supports_disable_randomization;
4706
dc146f7c
VP
4707 t->to_core_of_thread = linux_nat_core_of_thread;
4708
f973ed9c
DJ
4709 /* We don't change the stratum; this target will sit at
4710 process_stratum and thread_db will set at thread_stratum. This
4711 is a little strange, since this is a multi-threaded-capable
4712 target, but we want to be on the stack below thread_db, and we
4713 also want to be used for single-threaded processes. */
4714
4715 add_target (t);
f973ed9c
DJ
4716}
4717
9f0bdab8
DJ
4718/* Register a method to call whenever a new thread is attached. */
4719void
7b50312a
PA
4720linux_nat_set_new_thread (struct target_ops *t,
4721 void (*new_thread) (struct lwp_info *))
9f0bdab8
DJ
4722{
4723 /* Save the pointer. We only support a single registered instance
4724 of the GNU/Linux native target, so we do not need to map this to
4725 T. */
4726 linux_nat_new_thread = new_thread;
4727}
4728
26cb8b7c
PA
4729/* See declaration in linux-nat.h. */
4730
4731void
4732linux_nat_set_new_fork (struct target_ops *t,
4733 linux_nat_new_fork_ftype *new_fork)
4734{
4735 /* Save the pointer. */
4736 linux_nat_new_fork = new_fork;
4737}
4738
4739/* See declaration in linux-nat.h. */
4740
4741void
4742linux_nat_set_forget_process (struct target_ops *t,
4743 linux_nat_forget_process_ftype *fn)
4744{
4745 /* Save the pointer. */
4746 linux_nat_forget_process_hook = fn;
4747}
4748
4749/* See declaration in linux-nat.h. */
4750
4751void
4752linux_nat_forget_process (pid_t pid)
4753{
4754 if (linux_nat_forget_process_hook != NULL)
4755 linux_nat_forget_process_hook (pid);
4756}
4757
5b009018
PA
4758/* Register a method that converts a siginfo object between the layout
4759 that ptrace returns, and the layout in the architecture of the
4760 inferior. */
4761void
4762linux_nat_set_siginfo_fixup (struct target_ops *t,
a5362b9a 4763 int (*siginfo_fixup) (siginfo_t *,
5b009018
PA
4764 gdb_byte *,
4765 int))
4766{
4767 /* Save the pointer. */
4768 linux_nat_siginfo_fixup = siginfo_fixup;
4769}
4770
7b50312a
PA
4771/* Register a method to call prior to resuming a thread. */
4772
4773void
4774linux_nat_set_prepare_to_resume (struct target_ops *t,
4775 void (*prepare_to_resume) (struct lwp_info *))
4776{
4777 /* Save the pointer. */
4778 linux_nat_prepare_to_resume = prepare_to_resume;
4779}
4780
f865ee35
JK
4781/* See linux-nat.h. */
4782
4783int
4784linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
9f0bdab8 4785{
da559b09 4786 int pid;
9f0bdab8 4787
dfd4cc63 4788 pid = ptid_get_lwp (ptid);
da559b09 4789 if (pid == 0)
dfd4cc63 4790 pid = ptid_get_pid (ptid);
f865ee35 4791
da559b09
JK
4792 errno = 0;
4793 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
4794 if (errno != 0)
4795 {
4796 memset (siginfo, 0, sizeof (*siginfo));
4797 return 0;
4798 }
f865ee35 4799 return 1;
9f0bdab8
DJ
4800}
4801
2c0b251b
PA
4802/* Provide a prototype to silence -Wmissing-prototypes. */
4803extern initialize_file_ftype _initialize_linux_nat;
4804
d6b0e80f
AC
4805void
4806_initialize_linux_nat (void)
4807{
ccce17b0
YQ
4808 add_setshow_zuinteger_cmd ("lin-lwp", class_maintenance,
4809 &debug_linux_nat, _("\
b84876c2
PA
4810Set debugging of GNU/Linux lwp module."), _("\
4811Show debugging of GNU/Linux lwp module."), _("\
4812Enables printf debugging output."),
ccce17b0
YQ
4813 NULL,
4814 show_debug_linux_nat,
4815 &setdebuglist, &showdebuglist);
b84876c2 4816
b84876c2 4817 /* Save this mask as the default. */
d6b0e80f
AC
4818 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4819
7feb7d06
PA
4820 /* Install a SIGCHLD handler. */
4821 sigchld_action.sa_handler = sigchld_handler;
4822 sigemptyset (&sigchld_action.sa_mask);
4823 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
4824
4825 /* Make it the default. */
7feb7d06 4826 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
4827
4828 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4829 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4830 sigdelset (&suspend_mask, SIGCHLD);
4831
7feb7d06 4832 sigemptyset (&blocked_mask);
8009206a
TT
4833
4834 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to
4835 support read-only process state. */
4836 linux_ptrace_set_additional_flags (PTRACE_O_TRACESYSGOOD
4837 | PTRACE_O_TRACEVFORKDONE
4838 | PTRACE_O_TRACEVFORK
4839 | PTRACE_O_TRACEFORK
4840 | PTRACE_O_TRACEEXEC);
d6b0e80f
AC
4841}
4842\f
4843
4844/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4845 the GNU/Linux Threads library and therefore doesn't really belong
4846 here. */
4847
4848/* Read variable NAME in the target and return its value if found.
4849 Otherwise return zero. It is assumed that the type of the variable
4850 is `int'. */
4851
4852static int
4853get_signo (const char *name)
4854{
3b7344d5 4855 struct bound_minimal_symbol ms;
d6b0e80f
AC
4856 int signo;
4857
4858 ms = lookup_minimal_symbol (name, NULL, NULL);
3b7344d5 4859 if (ms.minsym == NULL)
d6b0e80f
AC
4860 return 0;
4861
77e371c0 4862 if (target_read_memory (BMSYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
4863 sizeof (signo)) != 0)
4864 return 0;
4865
4866 return signo;
4867}
4868
4869/* Return the set of signals used by the threads library in *SET. */
4870
4871void
4872lin_thread_get_thread_signals (sigset_t *set)
4873{
4874 struct sigaction action;
4875 int restart, cancel;
4876
b84876c2 4877 sigemptyset (&blocked_mask);
d6b0e80f
AC
4878 sigemptyset (set);
4879
4880 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
4881 cancel = get_signo ("__pthread_sig_cancel");
4882
4883 /* LinuxThreads normally uses the first two RT signals, but in some legacy
4884 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
4885 not provide any way for the debugger to query the signal numbers -
4886 fortunately they don't change! */
4887
d6b0e80f 4888 if (restart == 0)
17fbb0bd 4889 restart = __SIGRTMIN;
d6b0e80f 4890
d6b0e80f 4891 if (cancel == 0)
17fbb0bd 4892 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
4893
4894 sigaddset (set, restart);
4895 sigaddset (set, cancel);
4896
4897 /* The GNU/Linux Threads library makes terminating threads send a
4898 special "cancel" signal instead of SIGCHLD. Make sure we catch
4899 those (to prevent them from terminating GDB itself, which is
4900 likely to be their default action) and treat them the same way as
4901 SIGCHLD. */
4902
4903 action.sa_handler = sigchld_handler;
4904 sigemptyset (&action.sa_mask);
58aecb61 4905 action.sa_flags = SA_RESTART;
d6b0e80f
AC
4906 sigaction (cancel, &action, NULL);
4907
4908 /* We block the "cancel" signal throughout this code ... */
4909 sigaddset (&blocked_mask, cancel);
4910 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
4911
4912 /* ... except during a sigsuspend. */
4913 sigdelset (&suspend_mask, cancel);
4914}