]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/linux-nat.c
libthread_db: debug output should go to gdb_stdlog
[thirdparty/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
32d0add0 3 Copyright (C) 2001-2015 Free Software Foundation, Inc.
3993f6b1
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
19
20#include "defs.h"
21#include "inferior.h"
45741a9c 22#include "infrun.h"
3993f6b1 23#include "target.h"
96d7229d
LM
24#include "nat/linux-nat.h"
25#include "nat/linux-waitpid.h"
3993f6b1 26#include "gdb_wait.h"
d6b0e80f
AC
27#ifdef HAVE_TKILL_SYSCALL
28#include <unistd.h>
29#include <sys/syscall.h>
30#endif
3993f6b1 31#include <sys/ptrace.h>
0274a8ce 32#include "linux-nat.h"
125f8a3d
GB
33#include "nat/linux-ptrace.h"
34#include "nat/linux-procfs.h"
ac264b3b 35#include "linux-fork.h"
d6b0e80f
AC
36#include "gdbthread.h"
37#include "gdbcmd.h"
38#include "regcache.h"
4f844a66 39#include "regset.h"
dab06dbe 40#include "inf-child.h"
10d6c8cd
DJ
41#include "inf-ptrace.h"
42#include "auxv.h"
1777feb0 43#include <sys/procfs.h> /* for elf_gregset etc. */
dba24537
AC
44#include "elf-bfd.h" /* for elfcore_write_* */
45#include "gregset.h" /* for gregset */
46#include "gdbcore.h" /* for get_exec_file */
47#include <ctype.h> /* for isdigit */
53ce3c39 48#include <sys/stat.h> /* for struct stat */
dba24537 49#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
50#include "inf-loop.h"
51#include "event-loop.h"
52#include "event-top.h"
07e059b5
VP
53#include <pwd.h>
54#include <sys/types.h>
2978b111 55#include <dirent.h>
07e059b5 56#include "xml-support.h"
efcbbd14 57#include <sys/vfs.h>
6c95b8df 58#include "solib.h"
125f8a3d 59#include "nat/linux-osdata.h"
6432734d 60#include "linux-tdep.h"
7dcd53a0 61#include "symfile.h"
5808517f
YQ
62#include "agent.h"
63#include "tracepoint.h"
87b0bb13 64#include "buffer.h"
6ecd4729 65#include "target-descriptions.h"
614c279d 66#include "filestuff.h"
77e371c0 67#include "objfiles.h"
efcbbd14
UW
68
69#ifndef SPUFS_MAGIC
70#define SPUFS_MAGIC 0x23c9b64e
71#endif
dba24537 72
10568435
JK
73#ifdef HAVE_PERSONALITY
74# include <sys/personality.h>
75# if !HAVE_DECL_ADDR_NO_RANDOMIZE
76# define ADDR_NO_RANDOMIZE 0x0040000
77# endif
78#endif /* HAVE_PERSONALITY */
79
1777feb0 80/* This comment documents high-level logic of this file.
8a77dff3
VP
81
82Waiting for events in sync mode
83===============================
84
85When waiting for an event in a specific thread, we just use waitpid, passing
86the specific pid, and not passing WNOHANG.
87
1777feb0 88When waiting for an event in all threads, waitpid is not quite good. Prior to
8a77dff3 89version 2.4, Linux can either wait for event in main thread, or in secondary
1777feb0 90threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
8a77dff3
VP
91miss an event. The solution is to use non-blocking waitpid, together with
92sigsuspend. First, we use non-blocking waitpid to get an event in the main
1777feb0 93process, if any. Second, we use non-blocking waitpid with the __WCLONED
8a77dff3
VP
94flag to check for events in cloned processes. If nothing is found, we use
95sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
96happened to a child process -- and SIGCHLD will be delivered both for events
97in main debugged process and in cloned processes. As soon as we know there's
3e43a32a
MS
98an event, we get back to calling nonblocking waitpid with and without
99__WCLONED.
8a77dff3
VP
100
101Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
1777feb0 102so that we don't miss a signal. If SIGCHLD arrives in between, when it's
8a77dff3
VP
103blocked, the signal becomes pending and sigsuspend immediately
104notices it and returns.
105
106Waiting for events in async mode
107================================
108
7feb7d06
PA
109In async mode, GDB should always be ready to handle both user input
110and target events, so neither blocking waitpid nor sigsuspend are
111viable options. Instead, we should asynchronously notify the GDB main
112event loop whenever there's an unprocessed event from the target. We
113detect asynchronous target events by handling SIGCHLD signals. To
114notify the event loop about target events, the self-pipe trick is used
115--- a pipe is registered as waitable event source in the event loop,
116the event loop select/poll's on the read end of this pipe (as well on
117other event sources, e.g., stdin), and the SIGCHLD handler writes a
118byte to this pipe. This is more portable than relying on
119pselect/ppoll, since on kernels that lack those syscalls, libc
120emulates them with select/poll+sigprocmask, and that is racy
121(a.k.a. plain broken).
122
123Obviously, if we fail to notify the event loop if there's a target
124event, it's bad. OTOH, if we notify the event loop when there's no
125event from the target, linux_nat_wait will detect that there's no real
126event to report, and return event of type TARGET_WAITKIND_IGNORE.
127This is mostly harmless, but it will waste time and is better avoided.
128
129The main design point is that every time GDB is outside linux-nat.c,
130we have a SIGCHLD handler installed that is called when something
131happens to the target and notifies the GDB event loop. Whenever GDB
132core decides to handle the event, and calls into linux-nat.c, we
133process things as in sync mode, except that the we never block in
134sigsuspend.
135
136While processing an event, we may end up momentarily blocked in
137waitpid calls. Those waitpid calls, while blocking, are guarantied to
138return quickly. E.g., in all-stop mode, before reporting to the core
139that an LWP hit a breakpoint, all LWPs are stopped by sending them
140SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
141Note that this is different from blocking indefinitely waiting for the
142next event --- here, we're already handling an event.
8a77dff3
VP
143
144Use of signals
145==============
146
147We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
148signal is not entirely significant; we just need for a signal to be delivered,
149so that we can intercept it. SIGSTOP's advantage is that it can not be
150blocked. A disadvantage is that it is not a real-time signal, so it can only
151be queued once; we do not keep track of other sources of SIGSTOP.
152
153Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
154use them, because they have special behavior when the signal is generated -
155not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
156kills the entire thread group.
157
158A delivered SIGSTOP would stop the entire thread group, not just the thread we
159tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
160cancel it (by PTRACE_CONT without passing SIGSTOP).
161
162We could use a real-time signal instead. This would solve those problems; we
163could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
164But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
165generates it, and there are races with trying to find a signal that is not
166blocked. */
a0ef4274 167
dba24537
AC
168#ifndef O_LARGEFILE
169#define O_LARGEFILE 0
170#endif
0274a8ce 171
10d6c8cd
DJ
172/* The single-threaded native GNU/Linux target_ops. We save a pointer for
173 the use of the multi-threaded target. */
174static struct target_ops *linux_ops;
f973ed9c 175static struct target_ops linux_ops_saved;
10d6c8cd 176
9f0bdab8 177/* The method to call, if any, when a new thread is attached. */
7b50312a
PA
178static void (*linux_nat_new_thread) (struct lwp_info *);
179
26cb8b7c
PA
180/* The method to call, if any, when a new fork is attached. */
181static linux_nat_new_fork_ftype *linux_nat_new_fork;
182
183/* The method to call, if any, when a process is no longer
184 attached. */
185static linux_nat_forget_process_ftype *linux_nat_forget_process_hook;
186
7b50312a
PA
187/* Hook to call prior to resuming a thread. */
188static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
9f0bdab8 189
5b009018
PA
190/* The method to call, if any, when the siginfo object needs to be
191 converted between the layout returned by ptrace, and the layout in
192 the architecture of the inferior. */
a5362b9a 193static int (*linux_nat_siginfo_fixup) (siginfo_t *,
5b009018
PA
194 gdb_byte *,
195 int);
196
ac264b3b
MS
197/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
198 Called by our to_xfer_partial. */
4ac248ca 199static target_xfer_partial_ftype *super_xfer_partial;
10d6c8cd 200
6a3cb8e8
PA
201/* The saved to_close method, inherited from inf-ptrace.c.
202 Called by our to_close. */
203static void (*super_close) (struct target_ops *);
204
ccce17b0 205static unsigned int debug_linux_nat;
920d2a44
AC
206static void
207show_debug_linux_nat (struct ui_file *file, int from_tty,
208 struct cmd_list_element *c, const char *value)
209{
210 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
211 value);
212}
d6b0e80f 213
ae087d01
DJ
214struct simple_pid_list
215{
216 int pid;
3d799a95 217 int status;
ae087d01
DJ
218 struct simple_pid_list *next;
219};
220struct simple_pid_list *stopped_pids;
221
3dd5b83d
PA
222/* Async mode support. */
223
b84876c2
PA
224/* The read/write ends of the pipe registered as waitable file in the
225 event loop. */
226static int linux_nat_event_pipe[2] = { -1, -1 };
227
7feb7d06 228/* Flush the event pipe. */
b84876c2 229
7feb7d06
PA
230static void
231async_file_flush (void)
b84876c2 232{
7feb7d06
PA
233 int ret;
234 char buf;
b84876c2 235
7feb7d06 236 do
b84876c2 237 {
7feb7d06 238 ret = read (linux_nat_event_pipe[0], &buf, 1);
b84876c2 239 }
7feb7d06 240 while (ret >= 0 || (ret == -1 && errno == EINTR));
b84876c2
PA
241}
242
7feb7d06
PA
243/* Put something (anything, doesn't matter what, or how much) in event
244 pipe, so that the select/poll in the event-loop realizes we have
245 something to process. */
252fbfc8 246
b84876c2 247static void
7feb7d06 248async_file_mark (void)
b84876c2 249{
7feb7d06 250 int ret;
b84876c2 251
7feb7d06
PA
252 /* It doesn't really matter what the pipe contains, as long we end
253 up with something in it. Might as well flush the previous
254 left-overs. */
255 async_file_flush ();
b84876c2 256
7feb7d06 257 do
b84876c2 258 {
7feb7d06 259 ret = write (linux_nat_event_pipe[1], "+", 1);
b84876c2 260 }
7feb7d06 261 while (ret == -1 && errno == EINTR);
b84876c2 262
7feb7d06
PA
263 /* Ignore EAGAIN. If the pipe is full, the event loop will already
264 be awakened anyway. */
b84876c2
PA
265}
266
7feb7d06
PA
267static int kill_lwp (int lwpid, int signo);
268
269static int stop_callback (struct lwp_info *lp, void *data);
270
271static void block_child_signals (sigset_t *prev_mask);
272static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
273
274struct lwp_info;
275static struct lwp_info *add_lwp (ptid_t ptid);
276static void purge_lwp_list (int pid);
4403d8e9 277static void delete_lwp (ptid_t ptid);
2277426b
PA
278static struct lwp_info *find_lwp_pid (ptid_t ptid);
279
ae087d01
DJ
280\f
281/* Trivial list manipulation functions to keep track of a list of
282 new stopped processes. */
283static void
3d799a95 284add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
285{
286 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
e0881a8e 287
ae087d01 288 new_pid->pid = pid;
3d799a95 289 new_pid->status = status;
ae087d01
DJ
290 new_pid->next = *listp;
291 *listp = new_pid;
292}
293
84636d28
PA
294static int
295in_pid_list_p (struct simple_pid_list *list, int pid)
296{
297 struct simple_pid_list *p;
298
299 for (p = list; p != NULL; p = p->next)
300 if (p->pid == pid)
301 return 1;
302 return 0;
303}
304
ae087d01 305static int
46a96992 306pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
307{
308 struct simple_pid_list **p;
309
310 for (p = listp; *p != NULL; p = &(*p)->next)
311 if ((*p)->pid == pid)
312 {
313 struct simple_pid_list *next = (*p)->next;
e0881a8e 314
46a96992 315 *statusp = (*p)->status;
ae087d01
DJ
316 xfree (*p);
317 *p = next;
318 return 1;
319 }
320 return 0;
321}
322
96d7229d 323/* Initialize ptrace warnings and check for supported ptrace
beed38b8
JB
324 features given PID.
325
326 ATTACHED should be nonzero iff we attached to the inferior. */
3993f6b1
DJ
327
328static void
beed38b8 329linux_init_ptrace (pid_t pid, int attached)
3993f6b1 330{
beed38b8 331 linux_enable_event_reporting (pid, attached);
96d7229d 332 linux_ptrace_init_warnings ();
4de4c07c
DJ
333}
334
6d8fd2b7 335static void
f045800c 336linux_child_post_attach (struct target_ops *self, int pid)
4de4c07c 337{
beed38b8 338 linux_init_ptrace (pid, 1);
4de4c07c
DJ
339}
340
10d6c8cd 341static void
2e97a79e 342linux_child_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4de4c07c 343{
beed38b8 344 linux_init_ptrace (ptid_get_pid (ptid), 0);
4de4c07c
DJ
345}
346
4403d8e9
JK
347/* Return the number of known LWPs in the tgid given by PID. */
348
349static int
350num_lwps (int pid)
351{
352 int count = 0;
353 struct lwp_info *lp;
354
355 for (lp = lwp_list; lp; lp = lp->next)
356 if (ptid_get_pid (lp->ptid) == pid)
357 count++;
358
359 return count;
360}
361
362/* Call delete_lwp with prototype compatible for make_cleanup. */
363
364static void
365delete_lwp_cleanup (void *lp_voidp)
366{
367 struct lwp_info *lp = lp_voidp;
368
369 delete_lwp (lp->ptid);
370}
371
d83ad864
DB
372/* Target hook for follow_fork. On entry inferior_ptid must be the
373 ptid of the followed inferior. At return, inferior_ptid will be
374 unchanged. */
375
6d8fd2b7 376static int
07107ca6
LM
377linux_child_follow_fork (struct target_ops *ops, int follow_child,
378 int detach_fork)
3993f6b1 379{
d83ad864 380 if (!follow_child)
4de4c07c 381 {
6c95b8df 382 struct lwp_info *child_lp = NULL;
d83ad864
DB
383 int status = W_STOPCODE (0);
384 struct cleanup *old_chain;
385 int has_vforked;
386 int parent_pid, child_pid;
387
388 has_vforked = (inferior_thread ()->pending_follow.kind
389 == TARGET_WAITKIND_VFORKED);
390 parent_pid = ptid_get_lwp (inferior_ptid);
391 if (parent_pid == 0)
392 parent_pid = ptid_get_pid (inferior_ptid);
393 child_pid
394 = ptid_get_pid (inferior_thread ()->pending_follow.value.related_pid);
395
4de4c07c 396
1777feb0 397 /* We're already attached to the parent, by default. */
d83ad864
DB
398 old_chain = save_inferior_ptid ();
399 inferior_ptid = ptid_build (child_pid, child_pid, 0);
400 child_lp = add_lwp (inferior_ptid);
401 child_lp->stopped = 1;
402 child_lp->last_resume_kind = resume_stop;
4de4c07c 403
ac264b3b
MS
404 /* Detach new forked process? */
405 if (detach_fork)
f75c00e4 406 {
4403d8e9
JK
407 make_cleanup (delete_lwp_cleanup, child_lp);
408
4403d8e9
JK
409 if (linux_nat_prepare_to_resume != NULL)
410 linux_nat_prepare_to_resume (child_lp);
c077881a
HZ
411
412 /* When debugging an inferior in an architecture that supports
413 hardware single stepping on a kernel without commit
414 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
415 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
416 set if the parent process had them set.
417 To work around this, single step the child process
418 once before detaching to clear the flags. */
419
420 if (!gdbarch_software_single_step_p (target_thread_architecture
421 (child_lp->ptid)))
422 {
c077881a
HZ
423 linux_disable_event_reporting (child_pid);
424 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
425 perror_with_name (_("Couldn't do single step"));
426 if (my_waitpid (child_pid, &status, 0) < 0)
427 perror_with_name (_("Couldn't wait vfork process"));
428 }
429
430 if (WIFSTOPPED (status))
9caaaa83
PA
431 {
432 int signo;
433
434 signo = WSTOPSIG (status);
435 if (signo != 0
436 && !signal_pass_state (gdb_signal_from_host (signo)))
437 signo = 0;
438 ptrace (PTRACE_DETACH, child_pid, 0, signo);
439 }
4403d8e9 440
d83ad864 441 /* Resets value of inferior_ptid to parent ptid. */
4403d8e9 442 do_cleanups (old_chain);
ac264b3b
MS
443 }
444 else
445 {
6c95b8df 446 /* Let the thread_db layer learn about this new process. */
2277426b 447 check_for_thread_db ();
ac264b3b 448 }
9016a515 449
d83ad864
DB
450 do_cleanups (old_chain);
451
9016a515
DJ
452 if (has_vforked)
453 {
3ced3da4 454 struct lwp_info *parent_lp;
6c95b8df 455
3ced3da4 456 parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
96d7229d 457 gdb_assert (linux_supports_tracefork () >= 0);
3ced3da4 458
96d7229d 459 if (linux_supports_tracevforkdone ())
9016a515 460 {
6c95b8df
PA
461 if (debug_linux_nat)
462 fprintf_unfiltered (gdb_stdlog,
463 "LCFF: waiting for VFORK_DONE on %d\n",
464 parent_pid);
3ced3da4 465 parent_lp->stopped = 1;
9016a515 466
6c95b8df
PA
467 /* We'll handle the VFORK_DONE event like any other
468 event, in target_wait. */
9016a515
DJ
469 }
470 else
471 {
472 /* We can't insert breakpoints until the child has
473 finished with the shared memory region. We need to
474 wait until that happens. Ideal would be to just
475 call:
476 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
477 - waitpid (parent_pid, &status, __WALL);
478 However, most architectures can't handle a syscall
479 being traced on the way out if it wasn't traced on
480 the way in.
481
482 We might also think to loop, continuing the child
483 until it exits or gets a SIGTRAP. One problem is
484 that the child might call ptrace with PTRACE_TRACEME.
485
486 There's no simple and reliable way to figure out when
487 the vforked child will be done with its copy of the
488 shared memory. We could step it out of the syscall,
489 two instructions, let it go, and then single-step the
490 parent once. When we have hardware single-step, this
491 would work; with software single-step it could still
492 be made to work but we'd have to be able to insert
493 single-step breakpoints in the child, and we'd have
494 to insert -just- the single-step breakpoint in the
495 parent. Very awkward.
496
497 In the end, the best we can do is to make sure it
498 runs for a little while. Hopefully it will be out of
499 range of any breakpoints we reinsert. Usually this
500 is only the single-step breakpoint at vfork's return
501 point. */
502
6c95b8df
PA
503 if (debug_linux_nat)
504 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
505 "LCFF: no VFORK_DONE "
506 "support, sleeping a bit\n");
6c95b8df 507
9016a515 508 usleep (10000);
9016a515 509
6c95b8df
PA
510 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
511 and leave it pending. The next linux_nat_resume call
512 will notice a pending event, and bypasses actually
513 resuming the inferior. */
3ced3da4
PA
514 parent_lp->status = 0;
515 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
516 parent_lp->stopped = 1;
6c95b8df
PA
517
518 /* If we're in async mode, need to tell the event loop
519 there's something here to process. */
520 if (target_can_async_p ())
521 async_file_mark ();
522 }
9016a515 523 }
4de4c07c 524 }
3993f6b1 525 else
4de4c07c 526 {
3ced3da4 527 struct lwp_info *child_lp;
4de4c07c 528
3ced3da4
PA
529 child_lp = add_lwp (inferior_ptid);
530 child_lp->stopped = 1;
25289eb2 531 child_lp->last_resume_kind = resume_stop;
6c95b8df 532
6c95b8df 533 /* Let the thread_db layer learn about this new process. */
ef29ce1a 534 check_for_thread_db ();
4de4c07c
DJ
535 }
536
537 return 0;
538}
539
4de4c07c 540\f
77b06cd7 541static int
a863b201 542linux_child_insert_fork_catchpoint (struct target_ops *self, int pid)
4de4c07c 543{
96d7229d 544 return !linux_supports_tracefork ();
3993f6b1
DJ
545}
546
eb73ad13 547static int
973fc227 548linux_child_remove_fork_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
549{
550 return 0;
551}
552
77b06cd7 553static int
3ecc7da0 554linux_child_insert_vfork_catchpoint (struct target_ops *self, int pid)
3993f6b1 555{
96d7229d 556 return !linux_supports_tracefork ();
3993f6b1
DJ
557}
558
eb73ad13 559static int
e98cf0cd 560linux_child_remove_vfork_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
561{
562 return 0;
563}
564
77b06cd7 565static int
ba025e51 566linux_child_insert_exec_catchpoint (struct target_ops *self, int pid)
3993f6b1 567{
96d7229d 568 return !linux_supports_tracefork ();
3993f6b1
DJ
569}
570
eb73ad13 571static int
758e29d2 572linux_child_remove_exec_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
573{
574 return 0;
575}
576
a96d9b2e 577static int
ff214e67
TT
578linux_child_set_syscall_catchpoint (struct target_ops *self,
579 int pid, int needed, int any_count,
a96d9b2e
SDJ
580 int table_size, int *table)
581{
96d7229d 582 if (!linux_supports_tracesysgood ())
77b06cd7
TJB
583 return 1;
584
a96d9b2e
SDJ
585 /* On GNU/Linux, we ignore the arguments. It means that we only
586 enable the syscall catchpoints, but do not disable them.
77b06cd7 587
a96d9b2e
SDJ
588 Also, we do not use the `table' information because we do not
589 filter system calls here. We let GDB do the logic for us. */
590 return 0;
591}
592
d6b0e80f
AC
593/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
594 are processes sharing the same VM space. A multi-threaded process
595 is basically a group of such processes. However, such a grouping
596 is almost entirely a user-space issue; the kernel doesn't enforce
597 such a grouping at all (this might change in the future). In
598 general, we'll rely on the threads library (i.e. the GNU/Linux
599 Threads library) to provide such a grouping.
600
601 It is perfectly well possible to write a multi-threaded application
602 without the assistance of a threads library, by using the clone
603 system call directly. This module should be able to give some
604 rudimentary support for debugging such applications if developers
605 specify the CLONE_PTRACE flag in the clone system call, and are
606 using the Linux kernel 2.4 or above.
607
608 Note that there are some peculiarities in GNU/Linux that affect
609 this code:
610
611 - In general one should specify the __WCLONE flag to waitpid in
612 order to make it report events for any of the cloned processes
613 (and leave it out for the initial process). However, if a cloned
614 process has exited the exit status is only reported if the
615 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
616 we cannot use it since GDB must work on older systems too.
617
618 - When a traced, cloned process exits and is waited for by the
619 debugger, the kernel reassigns it to the original parent and
620 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
621 library doesn't notice this, which leads to the "zombie problem":
622 When debugged a multi-threaded process that spawns a lot of
623 threads will run out of processes, even if the threads exit,
624 because the "zombies" stay around. */
625
626/* List of known LWPs. */
9f0bdab8 627struct lwp_info *lwp_list;
d6b0e80f
AC
628\f
629
d6b0e80f
AC
630/* Original signal mask. */
631static sigset_t normal_mask;
632
633/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
634 _initialize_linux_nat. */
635static sigset_t suspend_mask;
636
7feb7d06
PA
637/* Signals to block to make that sigsuspend work. */
638static sigset_t blocked_mask;
639
640/* SIGCHLD action. */
641struct sigaction sigchld_action;
b84876c2 642
7feb7d06
PA
643/* Block child signals (SIGCHLD and linux threads signals), and store
644 the previous mask in PREV_MASK. */
84e46146 645
7feb7d06
PA
646static void
647block_child_signals (sigset_t *prev_mask)
648{
649 /* Make sure SIGCHLD is blocked. */
650 if (!sigismember (&blocked_mask, SIGCHLD))
651 sigaddset (&blocked_mask, SIGCHLD);
652
653 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
654}
655
656/* Restore child signals mask, previously returned by
657 block_child_signals. */
658
659static void
660restore_child_signals_mask (sigset_t *prev_mask)
661{
662 sigprocmask (SIG_SETMASK, prev_mask, NULL);
663}
2455069d
UW
664
665/* Mask of signals to pass directly to the inferior. */
666static sigset_t pass_mask;
667
668/* Update signals to pass to the inferior. */
669static void
94bedb42
TT
670linux_nat_pass_signals (struct target_ops *self,
671 int numsigs, unsigned char *pass_signals)
2455069d
UW
672{
673 int signo;
674
675 sigemptyset (&pass_mask);
676
677 for (signo = 1; signo < NSIG; signo++)
678 {
2ea28649 679 int target_signo = gdb_signal_from_host (signo);
2455069d
UW
680 if (target_signo < numsigs && pass_signals[target_signo])
681 sigaddset (&pass_mask, signo);
682 }
683}
684
d6b0e80f
AC
685\f
686
687/* Prototypes for local functions. */
688static int stop_wait_callback (struct lwp_info *lp, void *data);
28439f5e 689static int linux_thread_alive (ptid_t ptid);
8dd27370 690static char *linux_child_pid_to_exec_file (struct target_ops *self, int pid);
710151dd 691
d6b0e80f 692\f
d6b0e80f 693
7b50312a
PA
694/* Destroy and free LP. */
695
696static void
697lwp_free (struct lwp_info *lp)
698{
699 xfree (lp->arch_private);
700 xfree (lp);
701}
702
d90e17a7
PA
703/* Remove all LWPs belong to PID from the lwp list. */
704
705static void
706purge_lwp_list (int pid)
707{
708 struct lwp_info *lp, *lpprev, *lpnext;
709
710 lpprev = NULL;
711
712 for (lp = lwp_list; lp; lp = lpnext)
713 {
714 lpnext = lp->next;
715
716 if (ptid_get_pid (lp->ptid) == pid)
717 {
718 if (lp == lwp_list)
719 lwp_list = lp->next;
720 else
721 lpprev->next = lp->next;
722
7b50312a 723 lwp_free (lp);
d90e17a7
PA
724 }
725 else
726 lpprev = lp;
727 }
728}
729
26cb8b7c
PA
730/* Add the LWP specified by PTID to the list. PTID is the first LWP
731 in the process. Return a pointer to the structure describing the
732 new LWP.
733
734 This differs from add_lwp in that we don't let the arch specific
735 bits know about this new thread. Current clients of this callback
736 take the opportunity to install watchpoints in the new thread, and
737 we shouldn't do that for the first thread. If we're spawning a
738 child ("run"), the thread executes the shell wrapper first, and we
739 shouldn't touch it until it execs the program we want to debug.
740 For "attach", it'd be okay to call the callback, but it's not
741 necessary, because watchpoints can't yet have been inserted into
742 the inferior. */
d6b0e80f
AC
743
744static struct lwp_info *
26cb8b7c 745add_initial_lwp (ptid_t ptid)
d6b0e80f
AC
746{
747 struct lwp_info *lp;
748
dfd4cc63 749 gdb_assert (ptid_lwp_p (ptid));
d6b0e80f
AC
750
751 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
752
753 memset (lp, 0, sizeof (struct lwp_info));
754
25289eb2 755 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
756 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
757
758 lp->ptid = ptid;
dc146f7c 759 lp->core = -1;
d6b0e80f
AC
760
761 lp->next = lwp_list;
762 lwp_list = lp;
d6b0e80f 763
26cb8b7c
PA
764 return lp;
765}
766
767/* Add the LWP specified by PID to the list. Return a pointer to the
768 structure describing the new LWP. The LWP should already be
769 stopped. */
770
771static struct lwp_info *
772add_lwp (ptid_t ptid)
773{
774 struct lwp_info *lp;
775
776 lp = add_initial_lwp (ptid);
777
6e012a6c
PA
778 /* Let the arch specific bits know about this new thread. Current
779 clients of this callback take the opportunity to install
26cb8b7c
PA
780 watchpoints in the new thread. We don't do this for the first
781 thread though. See add_initial_lwp. */
782 if (linux_nat_new_thread != NULL)
7b50312a 783 linux_nat_new_thread (lp);
9f0bdab8 784
d6b0e80f
AC
785 return lp;
786}
787
788/* Remove the LWP specified by PID from the list. */
789
790static void
791delete_lwp (ptid_t ptid)
792{
793 struct lwp_info *lp, *lpprev;
794
795 lpprev = NULL;
796
797 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
798 if (ptid_equal (lp->ptid, ptid))
799 break;
800
801 if (!lp)
802 return;
803
d6b0e80f
AC
804 if (lpprev)
805 lpprev->next = lp->next;
806 else
807 lwp_list = lp->next;
808
7b50312a 809 lwp_free (lp);
d6b0e80f
AC
810}
811
812/* Return a pointer to the structure describing the LWP corresponding
813 to PID. If no corresponding LWP could be found, return NULL. */
814
815static struct lwp_info *
816find_lwp_pid (ptid_t ptid)
817{
818 struct lwp_info *lp;
819 int lwp;
820
dfd4cc63
LM
821 if (ptid_lwp_p (ptid))
822 lwp = ptid_get_lwp (ptid);
d6b0e80f 823 else
dfd4cc63 824 lwp = ptid_get_pid (ptid);
d6b0e80f
AC
825
826 for (lp = lwp_list; lp; lp = lp->next)
dfd4cc63 827 if (lwp == ptid_get_lwp (lp->ptid))
d6b0e80f
AC
828 return lp;
829
830 return NULL;
831}
832
833/* Call CALLBACK with its second argument set to DATA for every LWP in
834 the list. If CALLBACK returns 1 for a particular LWP, return a
835 pointer to the structure describing that LWP immediately.
836 Otherwise return NULL. */
837
838struct lwp_info *
d90e17a7
PA
839iterate_over_lwps (ptid_t filter,
840 int (*callback) (struct lwp_info *, void *),
841 void *data)
d6b0e80f
AC
842{
843 struct lwp_info *lp, *lpnext;
844
845 for (lp = lwp_list; lp; lp = lpnext)
846 {
847 lpnext = lp->next;
d90e17a7
PA
848
849 if (ptid_match (lp->ptid, filter))
850 {
851 if ((*callback) (lp, data))
852 return lp;
853 }
d6b0e80f
AC
854 }
855
856 return NULL;
857}
858
2277426b
PA
859/* Update our internal state when changing from one checkpoint to
860 another indicated by NEW_PTID. We can only switch single-threaded
861 applications, so we only create one new LWP, and the previous list
862 is discarded. */
f973ed9c
DJ
863
864void
865linux_nat_switch_fork (ptid_t new_ptid)
866{
867 struct lwp_info *lp;
868
dfd4cc63 869 purge_lwp_list (ptid_get_pid (inferior_ptid));
2277426b 870
f973ed9c
DJ
871 lp = add_lwp (new_ptid);
872 lp->stopped = 1;
e26af52f 873
2277426b
PA
874 /* This changes the thread's ptid while preserving the gdb thread
875 num. Also changes the inferior pid, while preserving the
876 inferior num. */
877 thread_change_ptid (inferior_ptid, new_ptid);
878
879 /* We've just told GDB core that the thread changed target id, but,
880 in fact, it really is a different thread, with different register
881 contents. */
882 registers_changed ();
e26af52f
DJ
883}
884
e26af52f
DJ
885/* Handle the exit of a single thread LP. */
886
887static void
888exit_lwp (struct lwp_info *lp)
889{
e09875d4 890 struct thread_info *th = find_thread_ptid (lp->ptid);
063bfe2e
VP
891
892 if (th)
e26af52f 893 {
17faa917
DJ
894 if (print_thread_events)
895 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
896
4f8d22e3 897 delete_thread (lp->ptid);
e26af52f
DJ
898 }
899
900 delete_lwp (lp->ptid);
901}
902
a0ef4274
DJ
903/* Wait for the LWP specified by LP, which we have just attached to.
904 Returns a wait status for that LWP, to cache. */
905
906static int
907linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
908 int *signalled)
909{
dfd4cc63 910 pid_t new_pid, pid = ptid_get_lwp (ptid);
a0ef4274
DJ
911 int status;
912
644cebc9 913 if (linux_proc_pid_is_stopped (pid))
a0ef4274
DJ
914 {
915 if (debug_linux_nat)
916 fprintf_unfiltered (gdb_stdlog,
917 "LNPAW: Attaching to a stopped process\n");
918
919 /* The process is definitely stopped. It is in a job control
920 stop, unless the kernel predates the TASK_STOPPED /
921 TASK_TRACED distinction, in which case it might be in a
922 ptrace stop. Make sure it is in a ptrace stop; from there we
923 can kill it, signal it, et cetera.
924
925 First make sure there is a pending SIGSTOP. Since we are
926 already attached, the process can not transition from stopped
927 to running without a PTRACE_CONT; so we know this signal will
928 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
929 probably already in the queue (unless this kernel is old
930 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
931 is not an RT signal, it can only be queued once. */
932 kill_lwp (pid, SIGSTOP);
933
934 /* Finally, resume the stopped process. This will deliver the SIGSTOP
935 (or a higher priority signal, just like normal PTRACE_ATTACH). */
936 ptrace (PTRACE_CONT, pid, 0, 0);
937 }
938
939 /* Make sure the initial process is stopped. The user-level threads
940 layer might want to poke around in the inferior, and that won't
941 work if things haven't stabilized yet. */
942 new_pid = my_waitpid (pid, &status, 0);
943 if (new_pid == -1 && errno == ECHILD)
944 {
945 if (first)
946 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
947
948 /* Try again with __WCLONE to check cloned processes. */
949 new_pid = my_waitpid (pid, &status, __WCLONE);
950 *cloned = 1;
951 }
952
dacc9cb2
PP
953 gdb_assert (pid == new_pid);
954
955 if (!WIFSTOPPED (status))
956 {
957 /* The pid we tried to attach has apparently just exited. */
958 if (debug_linux_nat)
959 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
960 pid, status_to_str (status));
961 return status;
962 }
a0ef4274
DJ
963
964 if (WSTOPSIG (status) != SIGSTOP)
965 {
966 *signalled = 1;
967 if (debug_linux_nat)
968 fprintf_unfiltered (gdb_stdlog,
969 "LNPAW: Received %s after attaching\n",
970 status_to_str (status));
971 }
972
973 return status;
974}
975
84636d28
PA
976/* Attach to the LWP specified by PID. Return 0 if successful, -1 if
977 the new LWP could not be attached, or 1 if we're already auto
978 attached to this thread, but haven't processed the
979 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
980 its existance, without considering it an error. */
d6b0e80f 981
9ee57c33 982int
93815fbf 983lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 984{
9ee57c33 985 struct lwp_info *lp;
84636d28 986 int lwpid;
d6b0e80f 987
dfd4cc63 988 gdb_assert (ptid_lwp_p (ptid));
d6b0e80f 989
9ee57c33 990 lp = find_lwp_pid (ptid);
dfd4cc63 991 lwpid = ptid_get_lwp (ptid);
d6b0e80f
AC
992
993 /* We assume that we're already attached to any LWP that has an id
994 equal to the overall process id, and to any LWP that is already
995 in our list of LWPs. If we're not seeing exit events from threads
996 and we've had PID wraparound since we last tried to stop all threads,
997 this assumption might be wrong; fortunately, this is very unlikely
998 to happen. */
dfd4cc63 999 if (lwpid != ptid_get_pid (ptid) && lp == NULL)
d6b0e80f 1000 {
a0ef4274 1001 int status, cloned = 0, signalled = 0;
d6b0e80f 1002
84636d28 1003 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
9ee57c33 1004 {
96d7229d 1005 if (linux_supports_tracefork ())
84636d28
PA
1006 {
1007 /* If we haven't stopped all threads when we get here,
1008 we may have seen a thread listed in thread_db's list,
1009 but not processed the PTRACE_EVENT_CLONE yet. If
1010 that's the case, ignore this new thread, and let
1011 normal event handling discover it later. */
1012 if (in_pid_list_p (stopped_pids, lwpid))
1013 {
1014 /* We've already seen this thread stop, but we
1015 haven't seen the PTRACE_EVENT_CLONE extended
1016 event yet. */
84636d28
PA
1017 return 0;
1018 }
1019 else
1020 {
1021 int new_pid;
1022 int status;
1023
1024 /* See if we've got a stop for this new child
1025 pending. If so, we're already attached. */
1026 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1027 if (new_pid == -1 && errno == ECHILD)
1028 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1029 if (new_pid != -1)
1030 {
1031 if (WIFSTOPPED (status))
1032 add_to_pid_list (&stopped_pids, lwpid, status);
84636d28
PA
1033 return 1;
1034 }
1035 }
1036 }
1037
9ee57c33
DJ
1038 /* If we fail to attach to the thread, issue a warning,
1039 but continue. One way this can happen is if thread
e9efe249 1040 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1041 bug may place threads in the thread list and then fail
1042 to create them. */
1043 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1044 safe_strerror (errno));
1045 return -1;
1046 }
1047
d6b0e80f
AC
1048 if (debug_linux_nat)
1049 fprintf_unfiltered (gdb_stdlog,
1050 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1051 target_pid_to_str (ptid));
1052
a0ef4274 1053 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
dacc9cb2 1054 if (!WIFSTOPPED (status))
12696c10 1055 return 1;
dacc9cb2 1056
a0ef4274
DJ
1057 lp = add_lwp (ptid);
1058 lp->stopped = 1;
1059 lp->cloned = cloned;
1060 lp->signalled = signalled;
1061 if (WSTOPSIG (status) != SIGSTOP)
d6b0e80f 1062 {
a0ef4274
DJ
1063 lp->resumed = 1;
1064 lp->status = status;
d6b0e80f
AC
1065 }
1066
dfd4cc63 1067 target_post_attach (ptid_get_lwp (lp->ptid));
d6b0e80f
AC
1068
1069 if (debug_linux_nat)
1070 {
1071 fprintf_unfiltered (gdb_stdlog,
1072 "LLAL: waitpid %s received %s\n",
1073 target_pid_to_str (ptid),
1074 status_to_str (status));
1075 }
1076 }
1077 else
1078 {
1079 /* We assume that the LWP representing the original process is
1080 already stopped. Mark it as stopped in the data structure
155bd5d1
AC
1081 that the GNU/linux ptrace layer uses to keep track of
1082 threads. Note that this won't have already been done since
1083 the main thread will have, we assume, been stopped by an
1084 attach from a different layer. */
9ee57c33
DJ
1085 if (lp == NULL)
1086 lp = add_lwp (ptid);
d6b0e80f
AC
1087 lp->stopped = 1;
1088 }
9ee57c33 1089
25289eb2 1090 lp->last_resume_kind = resume_stop;
9ee57c33 1091 return 0;
d6b0e80f
AC
1092}
1093
b84876c2 1094static void
136d6dae
VP
1095linux_nat_create_inferior (struct target_ops *ops,
1096 char *exec_file, char *allargs, char **env,
b84876c2
PA
1097 int from_tty)
1098{
10568435
JK
1099#ifdef HAVE_PERSONALITY
1100 int personality_orig = 0, personality_set = 0;
1101#endif /* HAVE_PERSONALITY */
b84876c2
PA
1102
1103 /* The fork_child mechanism is synchronous and calls target_wait, so
1104 we have to mask the async mode. */
1105
10568435
JK
1106#ifdef HAVE_PERSONALITY
1107 if (disable_randomization)
1108 {
1109 errno = 0;
1110 personality_orig = personality (0xffffffff);
1111 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1112 {
1113 personality_set = 1;
1114 personality (personality_orig | ADDR_NO_RANDOMIZE);
1115 }
1116 if (errno != 0 || (personality_set
1117 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1118 warning (_("Error disabling address space randomization: %s"),
1119 safe_strerror (errno));
1120 }
1121#endif /* HAVE_PERSONALITY */
1122
2455069d 1123 /* Make sure we report all signals during startup. */
94bedb42 1124 linux_nat_pass_signals (ops, 0, NULL);
2455069d 1125
136d6dae 1126 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
b84876c2 1127
10568435
JK
1128#ifdef HAVE_PERSONALITY
1129 if (personality_set)
1130 {
1131 errno = 0;
1132 personality (personality_orig);
1133 if (errno != 0)
1134 warning (_("Error restoring address space randomization: %s"),
1135 safe_strerror (errno));
1136 }
1137#endif /* HAVE_PERSONALITY */
b84876c2
PA
1138}
1139
d6b0e80f 1140static void
c0939df1 1141linux_nat_attach (struct target_ops *ops, const char *args, int from_tty)
d6b0e80f
AC
1142{
1143 struct lwp_info *lp;
d6b0e80f 1144 int status;
af990527 1145 ptid_t ptid;
87b0bb13 1146 volatile struct gdb_exception ex;
d6b0e80f 1147
2455069d 1148 /* Make sure we report all signals during attach. */
94bedb42 1149 linux_nat_pass_signals (ops, 0, NULL);
2455069d 1150
87b0bb13
JK
1151 TRY_CATCH (ex, RETURN_MASK_ERROR)
1152 {
1153 linux_ops->to_attach (ops, args, from_tty);
1154 }
1155 if (ex.reason < 0)
1156 {
1157 pid_t pid = parse_pid_to_attach (args);
1158 struct buffer buffer;
1159 char *message, *buffer_s;
1160
1161 message = xstrdup (ex.message);
1162 make_cleanup (xfree, message);
1163
1164 buffer_init (&buffer);
7ae1a6a6 1165 linux_ptrace_attach_fail_reason (pid, &buffer);
87b0bb13
JK
1166
1167 buffer_grow_str0 (&buffer, "");
1168 buffer_s = buffer_finish (&buffer);
1169 make_cleanup (xfree, buffer_s);
1170
7ae1a6a6
PA
1171 if (*buffer_s != '\0')
1172 throw_error (ex.error, "warning: %s\n%s", buffer_s, message);
1173 else
1174 throw_error (ex.error, "%s", message);
87b0bb13 1175 }
d6b0e80f 1176
af990527
PA
1177 /* The ptrace base target adds the main thread with (pid,0,0)
1178 format. Decorate it with lwp info. */
dfd4cc63
LM
1179 ptid = ptid_build (ptid_get_pid (inferior_ptid),
1180 ptid_get_pid (inferior_ptid),
1181 0);
af990527
PA
1182 thread_change_ptid (inferior_ptid, ptid);
1183
9f0bdab8 1184 /* Add the initial process as the first LWP to the list. */
26cb8b7c 1185 lp = add_initial_lwp (ptid);
a0ef4274
DJ
1186
1187 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1188 &lp->signalled);
dacc9cb2
PP
1189 if (!WIFSTOPPED (status))
1190 {
1191 if (WIFEXITED (status))
1192 {
1193 int exit_code = WEXITSTATUS (status);
1194
1195 target_terminal_ours ();
1196 target_mourn_inferior ();
1197 if (exit_code == 0)
1198 error (_("Unable to attach: program exited normally."));
1199 else
1200 error (_("Unable to attach: program exited with code %d."),
1201 exit_code);
1202 }
1203 else if (WIFSIGNALED (status))
1204 {
2ea28649 1205 enum gdb_signal signo;
dacc9cb2
PP
1206
1207 target_terminal_ours ();
1208 target_mourn_inferior ();
1209
2ea28649 1210 signo = gdb_signal_from_host (WTERMSIG (status));
dacc9cb2
PP
1211 error (_("Unable to attach: program terminated with signal "
1212 "%s, %s."),
2ea28649
PA
1213 gdb_signal_to_name (signo),
1214 gdb_signal_to_string (signo));
dacc9cb2
PP
1215 }
1216
1217 internal_error (__FILE__, __LINE__,
1218 _("unexpected status %d for PID %ld"),
dfd4cc63 1219 status, (long) ptid_get_lwp (ptid));
dacc9cb2
PP
1220 }
1221
a0ef4274 1222 lp->stopped = 1;
9f0bdab8 1223
a0ef4274 1224 /* Save the wait status to report later. */
d6b0e80f 1225 lp->resumed = 1;
a0ef4274
DJ
1226 if (debug_linux_nat)
1227 fprintf_unfiltered (gdb_stdlog,
1228 "LNA: waitpid %ld, saving status %s\n",
dfd4cc63 1229 (long) ptid_get_pid (lp->ptid), status_to_str (status));
710151dd 1230
7feb7d06
PA
1231 lp->status = status;
1232
1233 if (target_can_async_p ())
1234 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1235}
1236
a0ef4274
DJ
1237/* Get pending status of LP. */
1238static int
1239get_pending_status (struct lwp_info *lp, int *status)
1240{
a493e3e2 1241 enum gdb_signal signo = GDB_SIGNAL_0;
ca2163eb
PA
1242
1243 /* If we paused threads momentarily, we may have stored pending
1244 events in lp->status or lp->waitstatus (see stop_wait_callback),
1245 and GDB core hasn't seen any signal for those threads.
1246 Otherwise, the last signal reported to the core is found in the
1247 thread object's stop_signal.
1248
1249 There's a corner case that isn't handled here at present. Only
1250 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1251 stop_signal make sense as a real signal to pass to the inferior.
1252 Some catchpoint related events, like
1253 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
a493e3e2 1254 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
ca2163eb
PA
1255 those traps are debug API (ptrace in our case) related and
1256 induced; the inferior wouldn't see them if it wasn't being
1257 traced. Hence, we should never pass them to the inferior, even
1258 when set to pass state. Since this corner case isn't handled by
1259 infrun.c when proceeding with a signal, for consistency, neither
1260 do we handle it here (or elsewhere in the file we check for
1261 signal pass state). Normally SIGTRAP isn't set to pass state, so
1262 this is really a corner case. */
1263
1264 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
a493e3e2 1265 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
ca2163eb 1266 else if (lp->status)
2ea28649 1267 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
ca2163eb
PA
1268 else if (non_stop && !is_executing (lp->ptid))
1269 {
1270 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1271
16c381f0 1272 signo = tp->suspend.stop_signal;
ca2163eb
PA
1273 }
1274 else if (!non_stop)
a0ef4274 1275 {
ca2163eb
PA
1276 struct target_waitstatus last;
1277 ptid_t last_ptid;
4c28f408 1278
ca2163eb 1279 get_last_target_status (&last_ptid, &last);
4c28f408 1280
dfd4cc63 1281 if (ptid_get_lwp (lp->ptid) == ptid_get_lwp (last_ptid))
ca2163eb 1282 {
e09875d4 1283 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1284
16c381f0 1285 signo = tp->suspend.stop_signal;
4c28f408 1286 }
ca2163eb 1287 }
4c28f408 1288
ca2163eb 1289 *status = 0;
4c28f408 1290
a493e3e2 1291 if (signo == GDB_SIGNAL_0)
ca2163eb
PA
1292 {
1293 if (debug_linux_nat)
1294 fprintf_unfiltered (gdb_stdlog,
1295 "GPT: lwp %s has no pending signal\n",
1296 target_pid_to_str (lp->ptid));
1297 }
1298 else if (!signal_pass_state (signo))
1299 {
1300 if (debug_linux_nat)
3e43a32a
MS
1301 fprintf_unfiltered (gdb_stdlog,
1302 "GPT: lwp %s had signal %s, "
1303 "but it is in no pass state\n",
ca2163eb 1304 target_pid_to_str (lp->ptid),
2ea28649 1305 gdb_signal_to_string (signo));
a0ef4274 1306 }
a0ef4274 1307 else
4c28f408 1308 {
2ea28649 1309 *status = W_STOPCODE (gdb_signal_to_host (signo));
ca2163eb
PA
1310
1311 if (debug_linux_nat)
1312 fprintf_unfiltered (gdb_stdlog,
1313 "GPT: lwp %s has pending signal %s\n",
1314 target_pid_to_str (lp->ptid),
2ea28649 1315 gdb_signal_to_string (signo));
4c28f408 1316 }
a0ef4274
DJ
1317
1318 return 0;
1319}
1320
d6b0e80f
AC
1321static int
1322detach_callback (struct lwp_info *lp, void *data)
1323{
1324 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1325
1326 if (debug_linux_nat && lp->status)
1327 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1328 strsignal (WSTOPSIG (lp->status)),
1329 target_pid_to_str (lp->ptid));
1330
a0ef4274
DJ
1331 /* If there is a pending SIGSTOP, get rid of it. */
1332 if (lp->signalled)
d6b0e80f 1333 {
d6b0e80f
AC
1334 if (debug_linux_nat)
1335 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1336 "DC: Sending SIGCONT to %s\n",
1337 target_pid_to_str (lp->ptid));
d6b0e80f 1338
dfd4cc63 1339 kill_lwp (ptid_get_lwp (lp->ptid), SIGCONT);
d6b0e80f 1340 lp->signalled = 0;
d6b0e80f
AC
1341 }
1342
1343 /* We don't actually detach from the LWP that has an id equal to the
1344 overall process id just yet. */
dfd4cc63 1345 if (ptid_get_lwp (lp->ptid) != ptid_get_pid (lp->ptid))
d6b0e80f 1346 {
a0ef4274
DJ
1347 int status = 0;
1348
1349 /* Pass on any pending signal for this LWP. */
1350 get_pending_status (lp, &status);
1351
7b50312a
PA
1352 if (linux_nat_prepare_to_resume != NULL)
1353 linux_nat_prepare_to_resume (lp);
d6b0e80f 1354 errno = 0;
dfd4cc63 1355 if (ptrace (PTRACE_DETACH, ptid_get_lwp (lp->ptid), 0,
a0ef4274 1356 WSTOPSIG (status)) < 0)
8a3fe4f8 1357 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1358 safe_strerror (errno));
1359
1360 if (debug_linux_nat)
1361 fprintf_unfiltered (gdb_stdlog,
1362 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1363 target_pid_to_str (lp->ptid),
7feb7d06 1364 strsignal (WSTOPSIG (status)));
d6b0e80f
AC
1365
1366 delete_lwp (lp->ptid);
1367 }
1368
1369 return 0;
1370}
1371
1372static void
52554a0e 1373linux_nat_detach (struct target_ops *ops, const char *args, int from_tty)
d6b0e80f 1374{
b84876c2 1375 int pid;
a0ef4274 1376 int status;
d90e17a7
PA
1377 struct lwp_info *main_lwp;
1378
dfd4cc63 1379 pid = ptid_get_pid (inferior_ptid);
a0ef4274 1380
ae5e0686
MK
1381 /* Don't unregister from the event loop, as there may be other
1382 inferiors running. */
b84876c2 1383
4c28f408
PA
1384 /* Stop all threads before detaching. ptrace requires that the
1385 thread is stopped to sucessfully detach. */
d90e17a7 1386 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
4c28f408
PA
1387 /* ... and wait until all of them have reported back that
1388 they're no longer running. */
d90e17a7 1389 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
4c28f408 1390
d90e17a7 1391 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
d6b0e80f
AC
1392
1393 /* Only the initial process should be left right now. */
dfd4cc63 1394 gdb_assert (num_lwps (ptid_get_pid (inferior_ptid)) == 1);
d90e17a7
PA
1395
1396 main_lwp = find_lwp_pid (pid_to_ptid (pid));
d6b0e80f 1397
a0ef4274
DJ
1398 /* Pass on any pending signal for the last LWP. */
1399 if ((args == NULL || *args == '\0')
d90e17a7 1400 && get_pending_status (main_lwp, &status) != -1
a0ef4274
DJ
1401 && WIFSTOPPED (status))
1402 {
52554a0e
TT
1403 char *tem;
1404
a0ef4274
DJ
1405 /* Put the signal number in ARGS so that inf_ptrace_detach will
1406 pass it along with PTRACE_DETACH. */
52554a0e 1407 tem = alloca (8);
cde33bf1 1408 xsnprintf (tem, 8, "%d", (int) WSTOPSIG (status));
52554a0e 1409 args = tem;
ddabfc73
TT
1410 if (debug_linux_nat)
1411 fprintf_unfiltered (gdb_stdlog,
1412 "LND: Sending signal %s to %s\n",
1413 args,
1414 target_pid_to_str (main_lwp->ptid));
a0ef4274
DJ
1415 }
1416
7b50312a
PA
1417 if (linux_nat_prepare_to_resume != NULL)
1418 linux_nat_prepare_to_resume (main_lwp);
d90e17a7 1419 delete_lwp (main_lwp->ptid);
b84876c2 1420
7a7d3353
PA
1421 if (forks_exist_p ())
1422 {
1423 /* Multi-fork case. The current inferior_ptid is being detached
1424 from, but there are other viable forks to debug. Detach from
1425 the current fork, and context-switch to the first
1426 available. */
1427 linux_fork_detach (args, from_tty);
7a7d3353
PA
1428 }
1429 else
1430 linux_ops->to_detach (ops, args, from_tty);
d6b0e80f
AC
1431}
1432
1433/* Resume LP. */
1434
25289eb2 1435static void
e5ef252a 1436resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
d6b0e80f 1437{
25289eb2 1438 if (lp->stopped)
6c95b8df 1439 {
c9657e70 1440 struct inferior *inf = find_inferior_ptid (lp->ptid);
25289eb2
PA
1441
1442 if (inf->vfork_child != NULL)
1443 {
1444 if (debug_linux_nat)
1445 fprintf_unfiltered (gdb_stdlog,
1446 "RC: Not resuming %s (vfork parent)\n",
1447 target_pid_to_str (lp->ptid));
1448 }
1449 else if (lp->status == 0
1450 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
1451 {
1452 if (debug_linux_nat)
1453 fprintf_unfiltered (gdb_stdlog,
e5ef252a
PA
1454 "RC: Resuming sibling %s, %s, %s\n",
1455 target_pid_to_str (lp->ptid),
1456 (signo != GDB_SIGNAL_0
1457 ? strsignal (gdb_signal_to_host (signo))
1458 : "0"),
1459 step ? "step" : "resume");
25289eb2 1460
7b50312a
PA
1461 if (linux_nat_prepare_to_resume != NULL)
1462 linux_nat_prepare_to_resume (lp);
25289eb2 1463 linux_ops->to_resume (linux_ops,
dfd4cc63 1464 pid_to_ptid (ptid_get_lwp (lp->ptid)),
e5ef252a 1465 step, signo);
25289eb2
PA
1466 lp->stopped = 0;
1467 lp->step = step;
25289eb2
PA
1468 lp->stopped_by_watchpoint = 0;
1469 }
1470 else
1471 {
1472 if (debug_linux_nat)
1473 fprintf_unfiltered (gdb_stdlog,
1474 "RC: Not resuming sibling %s (has pending)\n",
1475 target_pid_to_str (lp->ptid));
1476 }
6c95b8df 1477 }
25289eb2 1478 else
d6b0e80f 1479 {
d90e17a7
PA
1480 if (debug_linux_nat)
1481 fprintf_unfiltered (gdb_stdlog,
25289eb2 1482 "RC: Not resuming sibling %s (not stopped)\n",
d6b0e80f 1483 target_pid_to_str (lp->ptid));
d6b0e80f 1484 }
25289eb2 1485}
d6b0e80f 1486
8817a6f2
PA
1487/* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1488 Resume LWP with the last stop signal, if it is in pass state. */
e5ef252a 1489
25289eb2 1490static int
8817a6f2 1491linux_nat_resume_callback (struct lwp_info *lp, void *except)
25289eb2 1492{
e5ef252a
PA
1493 enum gdb_signal signo = GDB_SIGNAL_0;
1494
8817a6f2
PA
1495 if (lp == except)
1496 return 0;
1497
e5ef252a
PA
1498 if (lp->stopped)
1499 {
1500 struct thread_info *thread;
1501
1502 thread = find_thread_ptid (lp->ptid);
1503 if (thread != NULL)
1504 {
70509625 1505 signo = thread->suspend.stop_signal;
e5ef252a
PA
1506 thread->suspend.stop_signal = GDB_SIGNAL_0;
1507 }
1508 }
1509
1510 resume_lwp (lp, 0, signo);
d6b0e80f
AC
1511 return 0;
1512}
1513
1514static int
1515resume_clear_callback (struct lwp_info *lp, void *data)
1516{
1517 lp->resumed = 0;
25289eb2 1518 lp->last_resume_kind = resume_stop;
d6b0e80f
AC
1519 return 0;
1520}
1521
1522static int
1523resume_set_callback (struct lwp_info *lp, void *data)
1524{
1525 lp->resumed = 1;
25289eb2 1526 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1527 return 0;
1528}
1529
1530static void
28439f5e 1531linux_nat_resume (struct target_ops *ops,
2ea28649 1532 ptid_t ptid, int step, enum gdb_signal signo)
d6b0e80f
AC
1533{
1534 struct lwp_info *lp;
d90e17a7 1535 int resume_many;
d6b0e80f 1536
76f50ad1
DJ
1537 if (debug_linux_nat)
1538 fprintf_unfiltered (gdb_stdlog,
1539 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1540 step ? "step" : "resume",
1541 target_pid_to_str (ptid),
a493e3e2 1542 (signo != GDB_SIGNAL_0
2ea28649 1543 ? strsignal (gdb_signal_to_host (signo)) : "0"),
76f50ad1
DJ
1544 target_pid_to_str (inferior_ptid));
1545
d6b0e80f 1546 /* A specific PTID means `step only this process id'. */
d90e17a7
PA
1547 resume_many = (ptid_equal (minus_one_ptid, ptid)
1548 || ptid_is_pid (ptid));
4c28f408 1549
e3e9f5a2
PA
1550 /* Mark the lwps we're resuming as resumed. */
1551 iterate_over_lwps (ptid, resume_set_callback, NULL);
d6b0e80f 1552
d90e17a7
PA
1553 /* See if it's the current inferior that should be handled
1554 specially. */
1555 if (resume_many)
1556 lp = find_lwp_pid (inferior_ptid);
1557 else
1558 lp = find_lwp_pid (ptid);
9f0bdab8 1559 gdb_assert (lp != NULL);
d6b0e80f 1560
9f0bdab8
DJ
1561 /* Remember if we're stepping. */
1562 lp->step = step;
25289eb2 1563 lp->last_resume_kind = step ? resume_step : resume_continue;
d6b0e80f 1564
9f0bdab8
DJ
1565 /* If we have a pending wait status for this thread, there is no
1566 point in resuming the process. But first make sure that
1567 linux_nat_wait won't preemptively handle the event - we
1568 should never take this short-circuit if we are going to
1569 leave LP running, since we have skipped resuming all the
1570 other threads. This bit of code needs to be synchronized
1571 with linux_nat_wait. */
76f50ad1 1572
9f0bdab8
DJ
1573 if (lp->status && WIFSTOPPED (lp->status))
1574 {
2455069d
UW
1575 if (!lp->step
1576 && WSTOPSIG (lp->status)
1577 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 1578 {
9f0bdab8
DJ
1579 if (debug_linux_nat)
1580 fprintf_unfiltered (gdb_stdlog,
1581 "LLR: Not short circuiting for ignored "
1582 "status 0x%x\n", lp->status);
1583
d6b0e80f
AC
1584 /* FIXME: What should we do if we are supposed to continue
1585 this thread with a signal? */
a493e3e2 1586 gdb_assert (signo == GDB_SIGNAL_0);
2ea28649 1587 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
1588 lp->status = 0;
1589 }
1590 }
76f50ad1 1591
6c95b8df 1592 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
9f0bdab8
DJ
1593 {
1594 /* FIXME: What should we do if we are supposed to continue
1595 this thread with a signal? */
a493e3e2 1596 gdb_assert (signo == GDB_SIGNAL_0);
76f50ad1 1597
9f0bdab8
DJ
1598 if (debug_linux_nat)
1599 fprintf_unfiltered (gdb_stdlog,
1600 "LLR: Short circuiting for status 0x%x\n",
1601 lp->status);
d6b0e80f 1602
7feb7d06
PA
1603 if (target_can_async_p ())
1604 {
1605 target_async (inferior_event_handler, 0);
1606 /* Tell the event loop we have something to process. */
1607 async_file_mark ();
1608 }
9f0bdab8 1609 return;
d6b0e80f
AC
1610 }
1611
d90e17a7 1612 if (resume_many)
8817a6f2 1613 iterate_over_lwps (ptid, linux_nat_resume_callback, lp);
d90e17a7
PA
1614
1615 /* Convert to something the lower layer understands. */
dfd4cc63 1616 ptid = pid_to_ptid (ptid_get_lwp (lp->ptid));
d6b0e80f 1617
7b50312a
PA
1618 if (linux_nat_prepare_to_resume != NULL)
1619 linux_nat_prepare_to_resume (lp);
28439f5e 1620 linux_ops->to_resume (linux_ops, ptid, step, signo);
ebec9a0f 1621 lp->stopped_by_watchpoint = 0;
8817a6f2 1622 lp->stopped = 0;
9f0bdab8 1623
d6b0e80f
AC
1624 if (debug_linux_nat)
1625 fprintf_unfiltered (gdb_stdlog,
1626 "LLR: %s %s, %s (resume event thread)\n",
1627 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1628 target_pid_to_str (ptid),
a493e3e2 1629 (signo != GDB_SIGNAL_0
2ea28649 1630 ? strsignal (gdb_signal_to_host (signo)) : "0"));
b84876c2
PA
1631
1632 if (target_can_async_p ())
8ea051c5 1633 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1634}
1635
c5f62d5f 1636/* Send a signal to an LWP. */
d6b0e80f
AC
1637
1638static int
1639kill_lwp (int lwpid, int signo)
1640{
c5f62d5f
DE
1641 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1642 fails, then we are not using nptl threads and we should be using kill. */
d6b0e80f
AC
1643
1644#ifdef HAVE_TKILL_SYSCALL
c5f62d5f
DE
1645 {
1646 static int tkill_failed;
1647
1648 if (!tkill_failed)
1649 {
1650 int ret;
1651
1652 errno = 0;
1653 ret = syscall (__NR_tkill, lwpid, signo);
1654 if (errno != ENOSYS)
1655 return ret;
1656 tkill_failed = 1;
1657 }
1658 }
d6b0e80f
AC
1659#endif
1660
1661 return kill (lwpid, signo);
1662}
1663
ca2163eb
PA
1664/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1665 event, check if the core is interested in it: if not, ignore the
1666 event, and keep waiting; otherwise, we need to toggle the LWP's
1667 syscall entry/exit status, since the ptrace event itself doesn't
1668 indicate it, and report the trap to higher layers. */
1669
1670static int
1671linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1672{
1673 struct target_waitstatus *ourstatus = &lp->waitstatus;
1674 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
1675 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
1676
1677 if (stopping)
1678 {
1679 /* If we're stopping threads, there's a SIGSTOP pending, which
1680 makes it so that the LWP reports an immediate syscall return,
1681 followed by the SIGSTOP. Skip seeing that "return" using
1682 PTRACE_CONT directly, and let stop_wait_callback collect the
1683 SIGSTOP. Later when the thread is resumed, a new syscall
1684 entry event. If we didn't do this (and returned 0), we'd
1685 leave a syscall entry pending, and our caller, by using
1686 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1687 itself. Later, when the user re-resumes this LWP, we'd see
1688 another syscall entry event and we'd mistake it for a return.
1689
1690 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1691 (leaving immediately with LWP->signalled set, without issuing
1692 a PTRACE_CONT), it would still be problematic to leave this
1693 syscall enter pending, as later when the thread is resumed,
1694 it would then see the same syscall exit mentioned above,
1695 followed by the delayed SIGSTOP, while the syscall didn't
1696 actually get to execute. It seems it would be even more
1697 confusing to the user. */
1698
1699 if (debug_linux_nat)
1700 fprintf_unfiltered (gdb_stdlog,
1701 "LHST: ignoring syscall %d "
1702 "for LWP %ld (stopping threads), "
1703 "resuming with PTRACE_CONT for SIGSTOP\n",
1704 syscall_number,
dfd4cc63 1705 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1706
1707 lp->syscall_state = TARGET_WAITKIND_IGNORE;
dfd4cc63 1708 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
8817a6f2 1709 lp->stopped = 0;
ca2163eb
PA
1710 return 1;
1711 }
1712
1713 if (catch_syscall_enabled ())
1714 {
1715 /* Always update the entry/return state, even if this particular
1716 syscall isn't interesting to the core now. In async mode,
1717 the user could install a new catchpoint for this syscall
1718 between syscall enter/return, and we'll need to know to
1719 report a syscall return if that happens. */
1720 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1721 ? TARGET_WAITKIND_SYSCALL_RETURN
1722 : TARGET_WAITKIND_SYSCALL_ENTRY);
1723
1724 if (catching_syscall_number (syscall_number))
1725 {
1726 /* Alright, an event to report. */
1727 ourstatus->kind = lp->syscall_state;
1728 ourstatus->value.syscall_number = syscall_number;
1729
1730 if (debug_linux_nat)
1731 fprintf_unfiltered (gdb_stdlog,
1732 "LHST: stopping for %s of syscall %d"
1733 " for LWP %ld\n",
3e43a32a
MS
1734 lp->syscall_state
1735 == TARGET_WAITKIND_SYSCALL_ENTRY
ca2163eb
PA
1736 ? "entry" : "return",
1737 syscall_number,
dfd4cc63 1738 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1739 return 0;
1740 }
1741
1742 if (debug_linux_nat)
1743 fprintf_unfiltered (gdb_stdlog,
1744 "LHST: ignoring %s of syscall %d "
1745 "for LWP %ld\n",
1746 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1747 ? "entry" : "return",
1748 syscall_number,
dfd4cc63 1749 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1750 }
1751 else
1752 {
1753 /* If we had been syscall tracing, and hence used PT_SYSCALL
1754 before on this LWP, it could happen that the user removes all
1755 syscall catchpoints before we get to process this event.
1756 There are two noteworthy issues here:
1757
1758 - When stopped at a syscall entry event, resuming with
1759 PT_STEP still resumes executing the syscall and reports a
1760 syscall return.
1761
1762 - Only PT_SYSCALL catches syscall enters. If we last
1763 single-stepped this thread, then this event can't be a
1764 syscall enter. If we last single-stepped this thread, this
1765 has to be a syscall exit.
1766
1767 The points above mean that the next resume, be it PT_STEP or
1768 PT_CONTINUE, can not trigger a syscall trace event. */
1769 if (debug_linux_nat)
1770 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
1771 "LHST: caught syscall event "
1772 "with no syscall catchpoints."
ca2163eb
PA
1773 " %d for LWP %ld, ignoring\n",
1774 syscall_number,
dfd4cc63 1775 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1776 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1777 }
1778
1779 /* The core isn't interested in this event. For efficiency, avoid
1780 stopping all threads only to have the core resume them all again.
1781 Since we're not stopping threads, if we're still syscall tracing
1782 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1783 subsequent syscall. Simply resume using the inf-ptrace layer,
1784 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1785
1786 /* Note that gdbarch_get_syscall_number may access registers, hence
1787 fill a regcache. */
1788 registers_changed ();
7b50312a
PA
1789 if (linux_nat_prepare_to_resume != NULL)
1790 linux_nat_prepare_to_resume (lp);
dfd4cc63 1791 linux_ops->to_resume (linux_ops, pid_to_ptid (ptid_get_lwp (lp->ptid)),
a493e3e2 1792 lp->step, GDB_SIGNAL_0);
8817a6f2 1793 lp->stopped = 0;
ca2163eb
PA
1794 return 1;
1795}
1796
3d799a95
DJ
1797/* Handle a GNU/Linux extended wait response. If we see a clone
1798 event, we need to add the new LWP to our list (and not report the
1799 trap to higher layers). This function returns non-zero if the
1800 event should be ignored and we should wait again. If STOPPING is
1801 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
1802
1803static int
3d799a95
DJ
1804linux_handle_extended_wait (struct lwp_info *lp, int status,
1805 int stopping)
d6b0e80f 1806{
dfd4cc63 1807 int pid = ptid_get_lwp (lp->ptid);
3d799a95 1808 struct target_waitstatus *ourstatus = &lp->waitstatus;
89a5711c 1809 int event = linux_ptrace_get_extended_event (status);
d6b0e80f 1810
3d799a95
DJ
1811 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1812 || event == PTRACE_EVENT_CLONE)
d6b0e80f 1813 {
3d799a95
DJ
1814 unsigned long new_pid;
1815 int ret;
1816
1817 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 1818
3d799a95
DJ
1819 /* If we haven't already seen the new PID stop, wait for it now. */
1820 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1821 {
1822 /* The new child has a pending SIGSTOP. We can't affect it until it
1823 hits the SIGSTOP, but we're already attached. */
1824 ret = my_waitpid (new_pid, &status,
1825 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1826 if (ret == -1)
1827 perror_with_name (_("waiting for new child"));
1828 else if (ret != new_pid)
1829 internal_error (__FILE__, __LINE__,
1830 _("wait returned unexpected PID %d"), ret);
1831 else if (!WIFSTOPPED (status))
1832 internal_error (__FILE__, __LINE__,
1833 _("wait returned unexpected status 0x%x"), status);
1834 }
1835
3a3e9ee3 1836 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95 1837
26cb8b7c
PA
1838 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1839 {
1840 /* The arch-specific native code may need to know about new
1841 forks even if those end up never mapped to an
1842 inferior. */
1843 if (linux_nat_new_fork != NULL)
1844 linux_nat_new_fork (lp, new_pid);
1845 }
1846
2277426b 1847 if (event == PTRACE_EVENT_FORK
dfd4cc63 1848 && linux_fork_checkpointing_p (ptid_get_pid (lp->ptid)))
2277426b 1849 {
2277426b
PA
1850 /* Handle checkpointing by linux-fork.c here as a special
1851 case. We don't want the follow-fork-mode or 'catch fork'
1852 to interfere with this. */
1853
1854 /* This won't actually modify the breakpoint list, but will
1855 physically remove the breakpoints from the child. */
d80ee84f 1856 detach_breakpoints (ptid_build (new_pid, new_pid, 0));
2277426b
PA
1857
1858 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
1859 if (!find_fork_pid (new_pid))
1860 add_fork (new_pid);
2277426b
PA
1861
1862 /* Report as spurious, so that infrun doesn't want to follow
1863 this fork. We're actually doing an infcall in
1864 linux-fork.c. */
1865 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2277426b
PA
1866
1867 /* Report the stop to the core. */
1868 return 0;
1869 }
1870
3d799a95
DJ
1871 if (event == PTRACE_EVENT_FORK)
1872 ourstatus->kind = TARGET_WAITKIND_FORKED;
1873 else if (event == PTRACE_EVENT_VFORK)
1874 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 1875 else
3d799a95 1876 {
78768c4a
JK
1877 struct lwp_info *new_lp;
1878
3d799a95 1879 ourstatus->kind = TARGET_WAITKIND_IGNORE;
78768c4a 1880
3c4d7e12
PA
1881 if (debug_linux_nat)
1882 fprintf_unfiltered (gdb_stdlog,
1883 "LHEW: Got clone event "
1884 "from LWP %d, new child is LWP %ld\n",
1885 pid, new_pid);
1886
dfd4cc63 1887 new_lp = add_lwp (ptid_build (ptid_get_pid (lp->ptid), new_pid, 0));
3d799a95 1888 new_lp->cloned = 1;
4c28f408 1889 new_lp->stopped = 1;
d6b0e80f 1890
3d799a95
DJ
1891 if (WSTOPSIG (status) != SIGSTOP)
1892 {
1893 /* This can happen if someone starts sending signals to
1894 the new thread before it gets a chance to run, which
1895 have a lower number than SIGSTOP (e.g. SIGUSR1).
1896 This is an unlikely case, and harder to handle for
1897 fork / vfork than for clone, so we do not try - but
1898 we handle it for clone events here. We'll send
1899 the other signal on to the thread below. */
1900
1901 new_lp->signalled = 1;
1902 }
1903 else
79395f92
PA
1904 {
1905 struct thread_info *tp;
1906
1907 /* When we stop for an event in some other thread, and
1908 pull the thread list just as this thread has cloned,
1909 we'll have seen the new thread in the thread_db list
1910 before handling the CLONE event (glibc's
1911 pthread_create adds the new thread to the thread list
1912 before clone'ing, and has the kernel fill in the
1913 thread's tid on the clone call with
1914 CLONE_PARENT_SETTID). If that happened, and the core
1915 had requested the new thread to stop, we'll have
1916 killed it with SIGSTOP. But since SIGSTOP is not an
1917 RT signal, it can only be queued once. We need to be
1918 careful to not resume the LWP if we wanted it to
1919 stop. In that case, we'll leave the SIGSTOP pending.
a493e3e2 1920 It will later be reported as GDB_SIGNAL_0. */
79395f92
PA
1921 tp = find_thread_ptid (new_lp->ptid);
1922 if (tp != NULL && tp->stop_requested)
1923 new_lp->last_resume_kind = resume_stop;
1924 else
1925 status = 0;
1926 }
d6b0e80f 1927
4c28f408 1928 if (non_stop)
3d799a95 1929 {
4c28f408
PA
1930 /* Add the new thread to GDB's lists as soon as possible
1931 so that:
1932
1933 1) the frontend doesn't have to wait for a stop to
1934 display them, and,
1935
1936 2) we tag it with the correct running state. */
1937
1938 /* If the thread_db layer is active, let it know about
1939 this new thread, and add it to GDB's list. */
1940 if (!thread_db_attach_lwp (new_lp->ptid))
1941 {
1942 /* We're not using thread_db. Add it to GDB's
1943 list. */
dfd4cc63 1944 target_post_attach (ptid_get_lwp (new_lp->ptid));
4c28f408
PA
1945 add_thread (new_lp->ptid);
1946 }
1947
1948 if (!stopping)
1949 {
1950 set_running (new_lp->ptid, 1);
1951 set_executing (new_lp->ptid, 1);
e21ffe51
PA
1952 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
1953 resume_stop. */
1954 new_lp->last_resume_kind = resume_continue;
4c28f408
PA
1955 }
1956 }
1957
79395f92
PA
1958 if (status != 0)
1959 {
1960 /* We created NEW_LP so it cannot yet contain STATUS. */
1961 gdb_assert (new_lp->status == 0);
1962
1963 /* Save the wait status to report later. */
1964 if (debug_linux_nat)
1965 fprintf_unfiltered (gdb_stdlog,
1966 "LHEW: waitpid of new LWP %ld, "
1967 "saving status %s\n",
dfd4cc63 1968 (long) ptid_get_lwp (new_lp->ptid),
79395f92
PA
1969 status_to_str (status));
1970 new_lp->status = status;
1971 }
1972
ca2163eb
PA
1973 /* Note the need to use the low target ops to resume, to
1974 handle resuming with PT_SYSCALL if we have syscall
1975 catchpoints. */
4c28f408
PA
1976 if (!stopping)
1977 {
3d799a95 1978 new_lp->resumed = 1;
ca2163eb 1979
79395f92 1980 if (status == 0)
ad34eb2f 1981 {
e21ffe51 1982 gdb_assert (new_lp->last_resume_kind == resume_continue);
ad34eb2f
JK
1983 if (debug_linux_nat)
1984 fprintf_unfiltered (gdb_stdlog,
79395f92 1985 "LHEW: resuming new LWP %ld\n",
dfd4cc63 1986 ptid_get_lwp (new_lp->ptid));
7b50312a
PA
1987 if (linux_nat_prepare_to_resume != NULL)
1988 linux_nat_prepare_to_resume (new_lp);
79395f92 1989 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
a493e3e2 1990 0, GDB_SIGNAL_0);
79395f92 1991 new_lp->stopped = 0;
ad34eb2f
JK
1992 }
1993 }
d6b0e80f 1994
3d799a95
DJ
1995 if (debug_linux_nat)
1996 fprintf_unfiltered (gdb_stdlog,
3c4d7e12 1997 "LHEW: resuming parent LWP %d\n", pid);
7b50312a
PA
1998 if (linux_nat_prepare_to_resume != NULL)
1999 linux_nat_prepare_to_resume (lp);
dfd4cc63
LM
2000 linux_ops->to_resume (linux_ops,
2001 pid_to_ptid (ptid_get_lwp (lp->ptid)),
a493e3e2 2002 0, GDB_SIGNAL_0);
8817a6f2 2003 lp->stopped = 0;
3d799a95
DJ
2004 return 1;
2005 }
2006
2007 return 0;
d6b0e80f
AC
2008 }
2009
3d799a95
DJ
2010 if (event == PTRACE_EVENT_EXEC)
2011 {
a75724bc
PA
2012 if (debug_linux_nat)
2013 fprintf_unfiltered (gdb_stdlog,
2014 "LHEW: Got exec event from LWP %ld\n",
dfd4cc63 2015 ptid_get_lwp (lp->ptid));
a75724bc 2016
3d799a95
DJ
2017 ourstatus->kind = TARGET_WAITKIND_EXECD;
2018 ourstatus->value.execd_pathname
8dd27370 2019 = xstrdup (linux_child_pid_to_exec_file (NULL, pid));
3d799a95 2020
6c95b8df
PA
2021 return 0;
2022 }
2023
2024 if (event == PTRACE_EVENT_VFORK_DONE)
2025 {
2026 if (current_inferior ()->waiting_for_vfork_done)
3d799a95 2027 {
6c95b8df 2028 if (debug_linux_nat)
3e43a32a
MS
2029 fprintf_unfiltered (gdb_stdlog,
2030 "LHEW: Got expected PTRACE_EVENT_"
2031 "VFORK_DONE from LWP %ld: stopping\n",
dfd4cc63 2032 ptid_get_lwp (lp->ptid));
3d799a95 2033
6c95b8df
PA
2034 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2035 return 0;
3d799a95
DJ
2036 }
2037
6c95b8df 2038 if (debug_linux_nat)
3e43a32a
MS
2039 fprintf_unfiltered (gdb_stdlog,
2040 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2041 "from LWP %ld: resuming\n",
dfd4cc63
LM
2042 ptid_get_lwp (lp->ptid));
2043 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
6c95b8df 2044 return 1;
3d799a95
DJ
2045 }
2046
2047 internal_error (__FILE__, __LINE__,
2048 _("unknown ptrace event %d"), event);
d6b0e80f
AC
2049}
2050
2051/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2052 exited. */
2053
2054static int
2055wait_lwp (struct lwp_info *lp)
2056{
2057 pid_t pid;
432b4d03 2058 int status = 0;
d6b0e80f 2059 int thread_dead = 0;
432b4d03 2060 sigset_t prev_mask;
d6b0e80f
AC
2061
2062 gdb_assert (!lp->stopped);
2063 gdb_assert (lp->status == 0);
2064
432b4d03
JK
2065 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2066 block_child_signals (&prev_mask);
2067
2068 for (;;)
d6b0e80f 2069 {
432b4d03
JK
2070 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2071 was right and we should just call sigsuspend. */
2072
dfd4cc63 2073 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, WNOHANG);
d6b0e80f 2074 if (pid == -1 && errno == ECHILD)
dfd4cc63 2075 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, __WCLONE | WNOHANG);
a9f4bb21
PA
2076 if (pid == -1 && errno == ECHILD)
2077 {
2078 /* The thread has previously exited. We need to delete it
2079 now because, for some vendor 2.4 kernels with NPTL
2080 support backported, there won't be an exit event unless
2081 it is the main thread. 2.6 kernels will report an exit
2082 event for each thread that exits, as expected. */
2083 thread_dead = 1;
2084 if (debug_linux_nat)
2085 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2086 target_pid_to_str (lp->ptid));
2087 }
432b4d03
JK
2088 if (pid != 0)
2089 break;
2090
2091 /* Bugs 10970, 12702.
2092 Thread group leader may have exited in which case we'll lock up in
2093 waitpid if there are other threads, even if they are all zombies too.
2094 Basically, we're not supposed to use waitpid this way.
2095 __WCLONE is not applicable for the leader so we can't use that.
2096 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2097 process; it gets ESRCH both for the zombie and for running processes.
2098
2099 As a workaround, check if we're waiting for the thread group leader and
2100 if it's a zombie, and avoid calling waitpid if it is.
2101
2102 This is racy, what if the tgl becomes a zombie right after we check?
2103 Therefore always use WNOHANG with sigsuspend - it is equivalent to
5f572dec 2104 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
432b4d03 2105
dfd4cc63
LM
2106 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid)
2107 && linux_proc_pid_is_zombie (ptid_get_lwp (lp->ptid)))
d6b0e80f 2108 {
d6b0e80f
AC
2109 thread_dead = 1;
2110 if (debug_linux_nat)
432b4d03
JK
2111 fprintf_unfiltered (gdb_stdlog,
2112 "WL: Thread group leader %s vanished.\n",
d6b0e80f 2113 target_pid_to_str (lp->ptid));
432b4d03 2114 break;
d6b0e80f 2115 }
432b4d03
JK
2116
2117 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2118 get invoked despite our caller had them intentionally blocked by
2119 block_child_signals. This is sensitive only to the loop of
2120 linux_nat_wait_1 and there if we get called my_waitpid gets called
2121 again before it gets to sigsuspend so we can safely let the handlers
2122 get executed here. */
2123
d36bf488
DE
2124 if (debug_linux_nat)
2125 fprintf_unfiltered (gdb_stdlog, "WL: about to sigsuspend\n");
432b4d03
JK
2126 sigsuspend (&suspend_mask);
2127 }
2128
2129 restore_child_signals_mask (&prev_mask);
2130
d6b0e80f
AC
2131 if (!thread_dead)
2132 {
dfd4cc63 2133 gdb_assert (pid == ptid_get_lwp (lp->ptid));
d6b0e80f
AC
2134
2135 if (debug_linux_nat)
2136 {
2137 fprintf_unfiltered (gdb_stdlog,
2138 "WL: waitpid %s received %s\n",
2139 target_pid_to_str (lp->ptid),
2140 status_to_str (status));
2141 }
d6b0e80f 2142
a9f4bb21
PA
2143 /* Check if the thread has exited. */
2144 if (WIFEXITED (status) || WIFSIGNALED (status))
2145 {
2146 thread_dead = 1;
2147 if (debug_linux_nat)
2148 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2149 target_pid_to_str (lp->ptid));
2150 }
d6b0e80f
AC
2151 }
2152
2153 if (thread_dead)
2154 {
e26af52f 2155 exit_lwp (lp);
d6b0e80f
AC
2156 return 0;
2157 }
2158
2159 gdb_assert (WIFSTOPPED (status));
8817a6f2 2160 lp->stopped = 1;
d6b0e80f 2161
ca2163eb
PA
2162 /* Handle GNU/Linux's syscall SIGTRAPs. */
2163 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2164 {
2165 /* No longer need the sysgood bit. The ptrace event ends up
2166 recorded in lp->waitstatus if we care for it. We can carry
2167 on handling the event like a regular SIGTRAP from here
2168 on. */
2169 status = W_STOPCODE (SIGTRAP);
2170 if (linux_handle_syscall_trap (lp, 1))
2171 return wait_lwp (lp);
2172 }
2173
d6b0e80f 2174 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2175 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2176 && linux_is_extended_waitstatus (status))
d6b0e80f
AC
2177 {
2178 if (debug_linux_nat)
2179 fprintf_unfiltered (gdb_stdlog,
2180 "WL: Handling extended status 0x%06x\n",
2181 status);
3d799a95 2182 if (linux_handle_extended_wait (lp, status, 1))
d6b0e80f
AC
2183 return wait_lwp (lp);
2184 }
2185
2186 return status;
2187}
2188
2189/* Send a SIGSTOP to LP. */
2190
2191static int
2192stop_callback (struct lwp_info *lp, void *data)
2193{
2194 if (!lp->stopped && !lp->signalled)
2195 {
2196 int ret;
2197
2198 if (debug_linux_nat)
2199 {
2200 fprintf_unfiltered (gdb_stdlog,
2201 "SC: kill %s **<SIGSTOP>**\n",
2202 target_pid_to_str (lp->ptid));
2203 }
2204 errno = 0;
dfd4cc63 2205 ret = kill_lwp (ptid_get_lwp (lp->ptid), SIGSTOP);
d6b0e80f
AC
2206 if (debug_linux_nat)
2207 {
2208 fprintf_unfiltered (gdb_stdlog,
2209 "SC: lwp kill %d %s\n",
2210 ret,
2211 errno ? safe_strerror (errno) : "ERRNO-OK");
2212 }
2213
2214 lp->signalled = 1;
2215 gdb_assert (lp->status == 0);
2216 }
2217
2218 return 0;
2219}
2220
7b50312a
PA
2221/* Request a stop on LWP. */
2222
2223void
2224linux_stop_lwp (struct lwp_info *lwp)
2225{
2226 stop_callback (lwp, NULL);
2227}
2228
57380f4e 2229/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2230
2231static int
57380f4e
DJ
2232linux_nat_has_pending_sigint (int pid)
2233{
2234 sigset_t pending, blocked, ignored;
57380f4e
DJ
2235
2236 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2237
2238 if (sigismember (&pending, SIGINT)
2239 && !sigismember (&ignored, SIGINT))
2240 return 1;
2241
2242 return 0;
2243}
2244
2245/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2246
2247static int
2248set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2249{
57380f4e
DJ
2250 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2251 flag to consume the next one. */
2252 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2253 && WSTOPSIG (lp->status) == SIGINT)
2254 lp->status = 0;
2255 else
2256 lp->ignore_sigint = 1;
2257
2258 return 0;
2259}
2260
2261/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2262 This function is called after we know the LWP has stopped; if the LWP
2263 stopped before the expected SIGINT was delivered, then it will never have
2264 arrived. Also, if the signal was delivered to a shared queue and consumed
2265 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2266
57380f4e
DJ
2267static void
2268maybe_clear_ignore_sigint (struct lwp_info *lp)
2269{
2270 if (!lp->ignore_sigint)
2271 return;
2272
dfd4cc63 2273 if (!linux_nat_has_pending_sigint (ptid_get_lwp (lp->ptid)))
57380f4e
DJ
2274 {
2275 if (debug_linux_nat)
2276 fprintf_unfiltered (gdb_stdlog,
2277 "MCIS: Clearing bogus flag for %s\n",
2278 target_pid_to_str (lp->ptid));
2279 lp->ignore_sigint = 0;
2280 }
2281}
2282
ebec9a0f
PA
2283/* Fetch the possible triggered data watchpoint info and store it in
2284 LP.
2285
2286 On some archs, like x86, that use debug registers to set
2287 watchpoints, it's possible that the way to know which watched
2288 address trapped, is to check the register that is used to select
2289 which address to watch. Problem is, between setting the watchpoint
2290 and reading back which data address trapped, the user may change
2291 the set of watchpoints, and, as a consequence, GDB changes the
2292 debug registers in the inferior. To avoid reading back a stale
2293 stopped-data-address when that happens, we cache in LP the fact
2294 that a watchpoint trapped, and the corresponding data address, as
2295 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2296 registers meanwhile, we have the cached data we can rely on. */
2297
2298static void
2299save_sigtrap (struct lwp_info *lp)
2300{
2301 struct cleanup *old_chain;
2302
2303 if (linux_ops->to_stopped_by_watchpoint == NULL)
2304 {
2305 lp->stopped_by_watchpoint = 0;
2306 return;
2307 }
2308
2309 old_chain = save_inferior_ptid ();
2310 inferior_ptid = lp->ptid;
2311
6a109b6b 2312 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint (linux_ops);
ebec9a0f
PA
2313
2314 if (lp->stopped_by_watchpoint)
2315 {
2316 if (linux_ops->to_stopped_data_address != NULL)
2317 lp->stopped_data_address_p =
2318 linux_ops->to_stopped_data_address (&current_target,
2319 &lp->stopped_data_address);
2320 else
2321 lp->stopped_data_address_p = 0;
2322 }
2323
2324 do_cleanups (old_chain);
2325}
2326
2327/* See save_sigtrap. */
2328
2329static int
6a109b6b 2330linux_nat_stopped_by_watchpoint (struct target_ops *ops)
ebec9a0f
PA
2331{
2332 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2333
2334 gdb_assert (lp != NULL);
2335
2336 return lp->stopped_by_watchpoint;
2337}
2338
2339static int
2340linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2341{
2342 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2343
2344 gdb_assert (lp != NULL);
2345
2346 *addr_p = lp->stopped_data_address;
2347
2348 return lp->stopped_data_address_p;
2349}
2350
26ab7092
JK
2351/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2352
2353static int
2354sigtrap_is_event (int status)
2355{
2356 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2357}
2358
2359/* SIGTRAP-like events recognizer. */
2360
2361static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2362
00390b84
JK
2363/* Check for SIGTRAP-like events in LP. */
2364
2365static int
2366linux_nat_lp_status_is_event (struct lwp_info *lp)
2367{
2368 /* We check for lp->waitstatus in addition to lp->status, because we can
2369 have pending process exits recorded in lp->status
2370 and W_EXITCODE(0,0) == 0. We should probably have an additional
2371 lp->status_p flag. */
2372
2373 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2374 && linux_nat_status_is_event (lp->status));
2375}
2376
26ab7092
JK
2377/* Set alternative SIGTRAP-like events recognizer. If
2378 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2379 applied. */
2380
2381void
2382linux_nat_set_status_is_event (struct target_ops *t,
2383 int (*status_is_event) (int status))
2384{
2385 linux_nat_status_is_event = status_is_event;
2386}
2387
57380f4e
DJ
2388/* Wait until LP is stopped. */
2389
2390static int
2391stop_wait_callback (struct lwp_info *lp, void *data)
2392{
c9657e70 2393 struct inferior *inf = find_inferior_ptid (lp->ptid);
6c95b8df
PA
2394
2395 /* If this is a vfork parent, bail out, it is not going to report
2396 any SIGSTOP until the vfork is done with. */
2397 if (inf->vfork_child != NULL)
2398 return 0;
2399
d6b0e80f
AC
2400 if (!lp->stopped)
2401 {
2402 int status;
2403
2404 status = wait_lwp (lp);
2405 if (status == 0)
2406 return 0;
2407
57380f4e
DJ
2408 if (lp->ignore_sigint && WIFSTOPPED (status)
2409 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2410 {
57380f4e 2411 lp->ignore_sigint = 0;
d6b0e80f
AC
2412
2413 errno = 0;
dfd4cc63 2414 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
8817a6f2 2415 lp->stopped = 0;
d6b0e80f
AC
2416 if (debug_linux_nat)
2417 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2418 "PTRACE_CONT %s, 0, 0 (%s) "
2419 "(discarding SIGINT)\n",
d6b0e80f
AC
2420 target_pid_to_str (lp->ptid),
2421 errno ? safe_strerror (errno) : "OK");
2422
57380f4e 2423 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2424 }
2425
57380f4e
DJ
2426 maybe_clear_ignore_sigint (lp);
2427
d6b0e80f
AC
2428 if (WSTOPSIG (status) != SIGSTOP)
2429 {
e5ef252a 2430 /* The thread was stopped with a signal other than SIGSTOP. */
7feb7d06 2431
e5ef252a
PA
2432 save_sigtrap (lp);
2433
2434 if (debug_linux_nat)
2435 fprintf_unfiltered (gdb_stdlog,
2436 "SWC: Pending event %s in %s\n",
2437 status_to_str ((int) status),
2438 target_pid_to_str (lp->ptid));
2439
2440 /* Save the sigtrap event. */
2441 lp->status = status;
e5ef252a 2442 gdb_assert (lp->signalled);
d6b0e80f
AC
2443 }
2444 else
2445 {
2446 /* We caught the SIGSTOP that we intended to catch, so
2447 there's no SIGSTOP pending. */
e5ef252a
PA
2448
2449 if (debug_linux_nat)
2450 fprintf_unfiltered (gdb_stdlog,
2451 "SWC: Delayed SIGSTOP caught for %s.\n",
2452 target_pid_to_str (lp->ptid));
2453
e5ef252a
PA
2454 /* Reset SIGNALLED only after the stop_wait_callback call
2455 above as it does gdb_assert on SIGNALLED. */
d6b0e80f
AC
2456 lp->signalled = 0;
2457 }
2458 }
2459
2460 return 0;
2461}
2462
d6b0e80f
AC
2463/* Return non-zero if LP has a wait status pending. */
2464
2465static int
2466status_callback (struct lwp_info *lp, void *data)
2467{
2468 /* Only report a pending wait status if we pretend that this has
2469 indeed been resumed. */
ca2163eb
PA
2470 if (!lp->resumed)
2471 return 0;
2472
2473 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2474 {
2475 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
766062f6 2476 or a pending process exit. Note that `W_EXITCODE(0,0) ==
ca2163eb
PA
2477 0', so a clean process exit can not be stored pending in
2478 lp->status, it is indistinguishable from
2479 no-pending-status. */
2480 return 1;
2481 }
2482
2483 if (lp->status != 0)
2484 return 1;
2485
2486 return 0;
d6b0e80f
AC
2487}
2488
2489/* Return non-zero if LP isn't stopped. */
2490
2491static int
2492running_callback (struct lwp_info *lp, void *data)
2493{
25289eb2
PA
2494 return (!lp->stopped
2495 || ((lp->status != 0
2496 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2497 && lp->resumed));
d6b0e80f
AC
2498}
2499
2500/* Count the LWP's that have had events. */
2501
2502static int
2503count_events_callback (struct lwp_info *lp, void *data)
2504{
2505 int *count = data;
2506
2507 gdb_assert (count != NULL);
2508
e09490f1 2509 /* Count only resumed LWPs that have a SIGTRAP event pending. */
00390b84 2510 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
2511 (*count)++;
2512
2513 return 0;
2514}
2515
2516/* Select the LWP (if any) that is currently being single-stepped. */
2517
2518static int
2519select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2520{
25289eb2
PA
2521 if (lp->last_resume_kind == resume_step
2522 && lp->status != 0)
d6b0e80f
AC
2523 return 1;
2524 else
2525 return 0;
2526}
2527
2528/* Select the Nth LWP that has had a SIGTRAP event. */
2529
2530static int
2531select_event_lwp_callback (struct lwp_info *lp, void *data)
2532{
2533 int *selector = data;
2534
2535 gdb_assert (selector != NULL);
2536
1777feb0 2537 /* Select only resumed LWPs that have a SIGTRAP event pending. */
00390b84 2538 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
2539 if ((*selector)-- == 0)
2540 return 1;
2541
2542 return 0;
2543}
2544
710151dd
PA
2545static int
2546cancel_breakpoint (struct lwp_info *lp)
2547{
2548 /* Arrange for a breakpoint to be hit again later. We don't keep
2549 the SIGTRAP status and don't forward the SIGTRAP signal to the
2550 LWP. We will handle the current event, eventually we will resume
2551 this LWP, and this breakpoint will trap again.
2552
2553 If we do not do this, then we run the risk that the user will
2554 delete or disable the breakpoint, but the LWP will have already
2555 tripped on it. */
2556
515630c5
UW
2557 struct regcache *regcache = get_thread_regcache (lp->ptid);
2558 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2559 CORE_ADDR pc;
2560
118e6252 2561 pc = regcache_read_pc (regcache) - target_decr_pc_after_break (gdbarch);
6c95b8df 2562 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
710151dd
PA
2563 {
2564 if (debug_linux_nat)
2565 fprintf_unfiltered (gdb_stdlog,
2566 "CB: Push back breakpoint for %s\n",
2567 target_pid_to_str (lp->ptid));
2568
2569 /* Back up the PC if necessary. */
118e6252 2570 if (target_decr_pc_after_break (gdbarch))
515630c5
UW
2571 regcache_write_pc (regcache, pc);
2572
710151dd
PA
2573 return 1;
2574 }
2575 return 0;
2576}
2577
d6b0e80f
AC
2578static int
2579cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2580{
2581 struct lwp_info *event_lp = data;
2582
2583 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2584 if (lp == event_lp)
2585 return 0;
2586
2587 /* If a LWP other than the LWP that we're reporting an event for has
2588 hit a GDB breakpoint (as opposed to some random trap signal),
2589 then just arrange for it to hit it again later. We don't keep
2590 the SIGTRAP status and don't forward the SIGTRAP signal to the
2591 LWP. We will handle the current event, eventually we will resume
2592 all LWPs, and this one will get its breakpoint trap again.
2593
2594 If we do not do this, then we run the risk that the user will
2595 delete or disable the breakpoint, but the LWP will have already
2596 tripped on it. */
2597
00390b84 2598 if (linux_nat_lp_status_is_event (lp)
710151dd
PA
2599 && cancel_breakpoint (lp))
2600 /* Throw away the SIGTRAP. */
2601 lp->status = 0;
d6b0e80f
AC
2602
2603 return 0;
2604}
2605
2606/* Select one LWP out of those that have events pending. */
2607
2608static void
d90e17a7 2609select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
2610{
2611 int num_events = 0;
2612 int random_selector;
2613 struct lwp_info *event_lp;
2614
ac264b3b 2615 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2616 (*orig_lp)->status = *status;
2617
2618 /* Give preference to any LWP that is being single-stepped. */
d90e17a7
PA
2619 event_lp = iterate_over_lwps (filter,
2620 select_singlestep_lwp_callback, NULL);
d6b0e80f
AC
2621 if (event_lp != NULL)
2622 {
2623 if (debug_linux_nat)
2624 fprintf_unfiltered (gdb_stdlog,
2625 "SEL: Select single-step %s\n",
2626 target_pid_to_str (event_lp->ptid));
2627 }
2628 else
2629 {
2630 /* No single-stepping LWP. Select one at random, out of those
2631 which have had SIGTRAP events. */
2632
2633 /* First see how many SIGTRAP events we have. */
d90e17a7 2634 iterate_over_lwps (filter, count_events_callback, &num_events);
d6b0e80f
AC
2635
2636 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2637 random_selector = (int)
2638 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2639
2640 if (debug_linux_nat && num_events > 1)
2641 fprintf_unfiltered (gdb_stdlog,
2642 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2643 num_events, random_selector);
2644
d90e17a7
PA
2645 event_lp = iterate_over_lwps (filter,
2646 select_event_lwp_callback,
d6b0e80f
AC
2647 &random_selector);
2648 }
2649
2650 if (event_lp != NULL)
2651 {
2652 /* Switch the event LWP. */
2653 *orig_lp = event_lp;
2654 *status = event_lp->status;
2655 }
2656
2657 /* Flush the wait status for the event LWP. */
2658 (*orig_lp)->status = 0;
2659}
2660
2661/* Return non-zero if LP has been resumed. */
2662
2663static int
2664resumed_callback (struct lwp_info *lp, void *data)
2665{
2666 return lp->resumed;
2667}
2668
12d9289a
PA
2669/* Stop an active thread, verify it still exists, then resume it. If
2670 the thread ends up with a pending status, then it is not resumed,
2671 and *DATA (really a pointer to int), is set. */
d6b0e80f
AC
2672
2673static int
2674stop_and_resume_callback (struct lwp_info *lp, void *data)
2675{
12d9289a
PA
2676 int *new_pending_p = data;
2677
25289eb2 2678 if (!lp->stopped)
d6b0e80f 2679 {
25289eb2
PA
2680 ptid_t ptid = lp->ptid;
2681
d6b0e80f
AC
2682 stop_callback (lp, NULL);
2683 stop_wait_callback (lp, NULL);
25289eb2
PA
2684
2685 /* Resume if the lwp still exists, and the core wanted it
2686 running. */
12d9289a
PA
2687 lp = find_lwp_pid (ptid);
2688 if (lp != NULL)
25289eb2 2689 {
12d9289a
PA
2690 if (lp->last_resume_kind == resume_stop
2691 && lp->status == 0)
2692 {
2693 /* The core wanted the LWP to stop. Even if it stopped
2694 cleanly (with SIGSTOP), leave the event pending. */
2695 if (debug_linux_nat)
2696 fprintf_unfiltered (gdb_stdlog,
2697 "SARC: core wanted LWP %ld stopped "
2698 "(leaving SIGSTOP pending)\n",
dfd4cc63 2699 ptid_get_lwp (lp->ptid));
12d9289a
PA
2700 lp->status = W_STOPCODE (SIGSTOP);
2701 }
2702
2703 if (lp->status == 0)
2704 {
2705 if (debug_linux_nat)
2706 fprintf_unfiltered (gdb_stdlog,
2707 "SARC: re-resuming LWP %ld\n",
dfd4cc63 2708 ptid_get_lwp (lp->ptid));
e5ef252a 2709 resume_lwp (lp, lp->step, GDB_SIGNAL_0);
12d9289a
PA
2710 }
2711 else
2712 {
2713 if (debug_linux_nat)
2714 fprintf_unfiltered (gdb_stdlog,
2715 "SARC: not re-resuming LWP %ld "
2716 "(has pending)\n",
dfd4cc63 2717 ptid_get_lwp (lp->ptid));
12d9289a
PA
2718 if (new_pending_p)
2719 *new_pending_p = 1;
2720 }
25289eb2 2721 }
d6b0e80f
AC
2722 }
2723 return 0;
2724}
2725
02f3fc28 2726/* Check if we should go on and pass this event to common code.
12d9289a
PA
2727 Return the affected lwp if we are, or NULL otherwise. If we stop
2728 all lwps temporarily, we may end up with new pending events in some
2729 other lwp. In that case set *NEW_PENDING_P to true. */
2730
02f3fc28 2731static struct lwp_info *
0e5bf2a8 2732linux_nat_filter_event (int lwpid, int status, int *new_pending_p)
02f3fc28
PA
2733{
2734 struct lwp_info *lp;
89a5711c 2735 int event = linux_ptrace_get_extended_event (status);
02f3fc28 2736
12d9289a
PA
2737 *new_pending_p = 0;
2738
02f3fc28
PA
2739 lp = find_lwp_pid (pid_to_ptid (lwpid));
2740
2741 /* Check for stop events reported by a process we didn't already
2742 know about - anything not already in our LWP list.
2743
2744 If we're expecting to receive stopped processes after
2745 fork, vfork, and clone events, then we'll just add the
2746 new one to our list and go back to waiting for the event
2747 to be reported - the stopped process might be returned
0e5bf2a8
PA
2748 from waitpid before or after the event is.
2749
2750 But note the case of a non-leader thread exec'ing after the
2751 leader having exited, and gone from our lists. The non-leader
2752 thread changes its tid to the tgid. */
2753
2754 if (WIFSTOPPED (status) && lp == NULL
89a5711c 2755 && (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC))
0e5bf2a8
PA
2756 {
2757 /* A multi-thread exec after we had seen the leader exiting. */
2758 if (debug_linux_nat)
2759 fprintf_unfiltered (gdb_stdlog,
2760 "LLW: Re-adding thread group leader LWP %d.\n",
2761 lwpid);
2762
dfd4cc63 2763 lp = add_lwp (ptid_build (lwpid, lwpid, 0));
0e5bf2a8
PA
2764 lp->stopped = 1;
2765 lp->resumed = 1;
2766 add_thread (lp->ptid);
2767 }
2768
02f3fc28
PA
2769 if (WIFSTOPPED (status) && !lp)
2770 {
84636d28 2771 add_to_pid_list (&stopped_pids, lwpid, status);
02f3fc28
PA
2772 return NULL;
2773 }
2774
2775 /* Make sure we don't report an event for the exit of an LWP not in
1777feb0 2776 our list, i.e. not part of the current process. This can happen
fd62cb89 2777 if we detach from a program we originally forked and then it
02f3fc28
PA
2778 exits. */
2779 if (!WIFSTOPPED (status) && !lp)
2780 return NULL;
2781
8817a6f2
PA
2782 /* This LWP is stopped now. (And if dead, this prevents it from
2783 ever being continued.) */
2784 lp->stopped = 1;
2785
ca2163eb
PA
2786 /* Handle GNU/Linux's syscall SIGTRAPs. */
2787 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2788 {
2789 /* No longer need the sysgood bit. The ptrace event ends up
2790 recorded in lp->waitstatus if we care for it. We can carry
2791 on handling the event like a regular SIGTRAP from here
2792 on. */
2793 status = W_STOPCODE (SIGTRAP);
2794 if (linux_handle_syscall_trap (lp, 0))
2795 return NULL;
2796 }
02f3fc28 2797
ca2163eb 2798 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2799 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2800 && linux_is_extended_waitstatus (status))
02f3fc28
PA
2801 {
2802 if (debug_linux_nat)
2803 fprintf_unfiltered (gdb_stdlog,
2804 "LLW: Handling extended status 0x%06x\n",
2805 status);
2806 if (linux_handle_extended_wait (lp, status, 0))
2807 return NULL;
2808 }
2809
26ab7092 2810 if (linux_nat_status_is_event (status))
da559b09 2811 save_sigtrap (lp);
ca2163eb 2812
02f3fc28 2813 /* Check if the thread has exited. */
d90e17a7 2814 if ((WIFEXITED (status) || WIFSIGNALED (status))
dfd4cc63 2815 && num_lwps (ptid_get_pid (lp->ptid)) > 1)
02f3fc28 2816 {
9db03742
JB
2817 /* If this is the main thread, we must stop all threads and verify
2818 if they are still alive. This is because in the nptl thread model
2819 on Linux 2.4, there is no signal issued for exiting LWPs
02f3fc28
PA
2820 other than the main thread. We only get the main thread exit
2821 signal once all child threads have already exited. If we
2822 stop all the threads and use the stop_wait_callback to check
2823 if they have exited we can determine whether this signal
2824 should be ignored or whether it means the end of the debugged
2825 application, regardless of which threading model is being
5d3b6af6 2826 used. */
dfd4cc63 2827 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid))
02f3fc28 2828 {
dfd4cc63 2829 iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)),
12d9289a 2830 stop_and_resume_callback, new_pending_p);
02f3fc28
PA
2831 }
2832
2833 if (debug_linux_nat)
2834 fprintf_unfiltered (gdb_stdlog,
2835 "LLW: %s exited.\n",
2836 target_pid_to_str (lp->ptid));
2837
dfd4cc63 2838 if (num_lwps (ptid_get_pid (lp->ptid)) > 1)
9db03742
JB
2839 {
2840 /* If there is at least one more LWP, then the exit signal
2841 was not the end of the debugged application and should be
2842 ignored. */
2843 exit_lwp (lp);
2844 return NULL;
2845 }
02f3fc28
PA
2846 }
2847
2848 /* Check if the current LWP has previously exited. In the nptl
2849 thread model, LWPs other than the main thread do not issue
2850 signals when they exit so we must check whenever the thread has
2851 stopped. A similar check is made in stop_wait_callback(). */
dfd4cc63 2852 if (num_lwps (ptid_get_pid (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
02f3fc28 2853 {
dfd4cc63 2854 ptid_t ptid = pid_to_ptid (ptid_get_pid (lp->ptid));
d90e17a7 2855
02f3fc28
PA
2856 if (debug_linux_nat)
2857 fprintf_unfiltered (gdb_stdlog,
2858 "LLW: %s exited.\n",
2859 target_pid_to_str (lp->ptid));
2860
2861 exit_lwp (lp);
2862
2863 /* Make sure there is at least one thread running. */
d90e17a7 2864 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
02f3fc28
PA
2865
2866 /* Discard the event. */
2867 return NULL;
2868 }
2869
2870 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2871 an attempt to stop an LWP. */
2872 if (lp->signalled
2873 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2874 {
2875 if (debug_linux_nat)
2876 fprintf_unfiltered (gdb_stdlog,
2877 "LLW: Delayed SIGSTOP caught for %s.\n",
2878 target_pid_to_str (lp->ptid));
2879
02f3fc28
PA
2880 lp->signalled = 0;
2881
25289eb2
PA
2882 if (lp->last_resume_kind != resume_stop)
2883 {
2884 /* This is a delayed SIGSTOP. */
02f3fc28 2885
25289eb2
PA
2886 registers_changed ();
2887
7b50312a
PA
2888 if (linux_nat_prepare_to_resume != NULL)
2889 linux_nat_prepare_to_resume (lp);
dfd4cc63
LM
2890 linux_ops->to_resume (linux_ops,
2891 pid_to_ptid (ptid_get_lwp (lp->ptid)),
2892 lp->step, GDB_SIGNAL_0);
25289eb2
PA
2893 if (debug_linux_nat)
2894 fprintf_unfiltered (gdb_stdlog,
2895 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2896 lp->step ?
2897 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2898 target_pid_to_str (lp->ptid));
02f3fc28 2899
25289eb2
PA
2900 lp->stopped = 0;
2901 gdb_assert (lp->resumed);
02f3fc28 2902
25289eb2
PA
2903 /* Discard the event. */
2904 return NULL;
2905 }
02f3fc28
PA
2906 }
2907
57380f4e
DJ
2908 /* Make sure we don't report a SIGINT that we have already displayed
2909 for another thread. */
2910 if (lp->ignore_sigint
2911 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
2912 {
2913 if (debug_linux_nat)
2914 fprintf_unfiltered (gdb_stdlog,
2915 "LLW: Delayed SIGINT caught for %s.\n",
2916 target_pid_to_str (lp->ptid));
2917
2918 /* This is a delayed SIGINT. */
2919 lp->ignore_sigint = 0;
2920
2921 registers_changed ();
7b50312a
PA
2922 if (linux_nat_prepare_to_resume != NULL)
2923 linux_nat_prepare_to_resume (lp);
dfd4cc63 2924 linux_ops->to_resume (linux_ops, pid_to_ptid (ptid_get_lwp (lp->ptid)),
a493e3e2 2925 lp->step, GDB_SIGNAL_0);
57380f4e
DJ
2926 if (debug_linux_nat)
2927 fprintf_unfiltered (gdb_stdlog,
2928 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
2929 lp->step ?
2930 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2931 target_pid_to_str (lp->ptid));
2932
2933 lp->stopped = 0;
2934 gdb_assert (lp->resumed);
2935
2936 /* Discard the event. */
2937 return NULL;
2938 }
2939
02f3fc28
PA
2940 /* An interesting event. */
2941 gdb_assert (lp);
ca2163eb 2942 lp->status = status;
02f3fc28
PA
2943 return lp;
2944}
2945
0e5bf2a8
PA
2946/* Detect zombie thread group leaders, and "exit" them. We can't reap
2947 their exits until all other threads in the group have exited. */
2948
2949static void
2950check_zombie_leaders (void)
2951{
2952 struct inferior *inf;
2953
2954 ALL_INFERIORS (inf)
2955 {
2956 struct lwp_info *leader_lp;
2957
2958 if (inf->pid == 0)
2959 continue;
2960
2961 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
2962 if (leader_lp != NULL
2963 /* Check if there are other threads in the group, as we may
2964 have raced with the inferior simply exiting. */
2965 && num_lwps (inf->pid) > 1
5f572dec 2966 && linux_proc_pid_is_zombie (inf->pid))
0e5bf2a8
PA
2967 {
2968 if (debug_linux_nat)
2969 fprintf_unfiltered (gdb_stdlog,
2970 "CZL: Thread group leader %d zombie "
2971 "(it exited, or another thread execd).\n",
2972 inf->pid);
2973
2974 /* A leader zombie can mean one of two things:
2975
2976 - It exited, and there's an exit status pending
2977 available, or only the leader exited (not the whole
2978 program). In the latter case, we can't waitpid the
2979 leader's exit status until all other threads are gone.
2980
2981 - There are 3 or more threads in the group, and a thread
2982 other than the leader exec'd. On an exec, the Linux
2983 kernel destroys all other threads (except the execing
2984 one) in the thread group, and resets the execing thread's
2985 tid to the tgid. No exit notification is sent for the
2986 execing thread -- from the ptracer's perspective, it
2987 appears as though the execing thread just vanishes.
2988 Until we reap all other threads except the leader and the
2989 execing thread, the leader will be zombie, and the
2990 execing thread will be in `D (disc sleep)'. As soon as
2991 all other threads are reaped, the execing thread changes
2992 it's tid to the tgid, and the previous (zombie) leader
2993 vanishes, giving place to the "new" leader. We could try
2994 distinguishing the exit and exec cases, by waiting once
2995 more, and seeing if something comes out, but it doesn't
2996 sound useful. The previous leader _does_ go away, and
2997 we'll re-add the new one once we see the exec event
2998 (which is just the same as what would happen if the
2999 previous leader did exit voluntarily before some other
3000 thread execs). */
3001
3002 if (debug_linux_nat)
3003 fprintf_unfiltered (gdb_stdlog,
3004 "CZL: Thread group leader %d vanished.\n",
3005 inf->pid);
3006 exit_lwp (leader_lp);
3007 }
3008 }
3009}
3010
d6b0e80f 3011static ptid_t
7feb7d06 3012linux_nat_wait_1 (struct target_ops *ops,
47608cb1
PA
3013 ptid_t ptid, struct target_waitstatus *ourstatus,
3014 int target_options)
d6b0e80f 3015{
fc9b8e47 3016 sigset_t prev_mask;
4b60df3d 3017 enum resume_kind last_resume_kind;
12d9289a 3018 struct lwp_info *lp;
12d9289a 3019 int status;
d6b0e80f 3020
01124a23 3021 if (debug_linux_nat)
b84876c2
PA
3022 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3023
f973ed9c
DJ
3024 /* The first time we get here after starting a new inferior, we may
3025 not have added it to the LWP list yet - this is the earliest
3026 moment at which we know its PID. */
d90e17a7 3027 if (ptid_is_pid (inferior_ptid))
f973ed9c 3028 {
27c9d204
PA
3029 /* Upgrade the main thread's ptid. */
3030 thread_change_ptid (inferior_ptid,
dfd4cc63
LM
3031 ptid_build (ptid_get_pid (inferior_ptid),
3032 ptid_get_pid (inferior_ptid), 0));
27c9d204 3033
26cb8b7c 3034 lp = add_initial_lwp (inferior_ptid);
f973ed9c
DJ
3035 lp->resumed = 1;
3036 }
3037
12696c10 3038 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
7feb7d06 3039 block_child_signals (&prev_mask);
d6b0e80f
AC
3040
3041retry:
d90e17a7
PA
3042 lp = NULL;
3043 status = 0;
d6b0e80f
AC
3044
3045 /* First check if there is a LWP with a wait status pending. */
0e5bf2a8 3046 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
d6b0e80f 3047 {
0e5bf2a8 3048 /* Any LWP in the PTID group that's been resumed will do. */
d90e17a7 3049 lp = iterate_over_lwps (ptid, status_callback, NULL);
d6b0e80f
AC
3050 if (lp)
3051 {
ca2163eb 3052 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3053 fprintf_unfiltered (gdb_stdlog,
3054 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3055 status_to_str (lp->status),
d6b0e80f
AC
3056 target_pid_to_str (lp->ptid));
3057 }
d6b0e80f 3058 }
dfd4cc63 3059 else if (ptid_lwp_p (ptid))
d6b0e80f
AC
3060 {
3061 if (debug_linux_nat)
3062 fprintf_unfiltered (gdb_stdlog,
3063 "LLW: Waiting for specific LWP %s.\n",
3064 target_pid_to_str (ptid));
3065
3066 /* We have a specific LWP to check. */
3067 lp = find_lwp_pid (ptid);
3068 gdb_assert (lp);
d6b0e80f 3069
ca2163eb 3070 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3071 fprintf_unfiltered (gdb_stdlog,
3072 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3073 status_to_str (lp->status),
d6b0e80f
AC
3074 target_pid_to_str (lp->ptid));
3075
d90e17a7
PA
3076 /* We check for lp->waitstatus in addition to lp->status,
3077 because we can have pending process exits recorded in
3078 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3079 an additional lp->status_p flag. */
ca2163eb 3080 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
d90e17a7 3081 lp = NULL;
d6b0e80f
AC
3082 }
3083
b84876c2
PA
3084 if (!target_can_async_p ())
3085 {
3086 /* Causes SIGINT to be passed on to the attached process. */
3087 set_sigint_trap ();
b84876c2 3088 }
d6b0e80f 3089
0e5bf2a8 3090 /* But if we don't find a pending event, we'll have to wait. */
7feb7d06 3091
d90e17a7 3092 while (lp == NULL)
d6b0e80f
AC
3093 {
3094 pid_t lwpid;
3095
0e5bf2a8
PA
3096 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3097 quirks:
3098
3099 - If the thread group leader exits while other threads in the
3100 thread group still exist, waitpid(TGID, ...) hangs. That
3101 waitpid won't return an exit status until the other threads
3102 in the group are reapped.
3103
3104 - When a non-leader thread execs, that thread just vanishes
3105 without reporting an exit (so we'd hang if we waited for it
3106 explicitly in that case). The exec event is reported to
3107 the TGID pid. */
3108
3109 errno = 0;
3110 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3111 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3112 lwpid = my_waitpid (-1, &status, WNOHANG);
3113
3114 if (debug_linux_nat)
3115 fprintf_unfiltered (gdb_stdlog,
3116 "LNW: waitpid(-1, ...) returned %d, %s\n",
3117 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
b84876c2 3118
d6b0e80f
AC
3119 if (lwpid > 0)
3120 {
12d9289a
PA
3121 /* If this is true, then we paused LWPs momentarily, and may
3122 now have pending events to handle. */
3123 int new_pending;
3124
d6b0e80f
AC
3125 if (debug_linux_nat)
3126 {
3127 fprintf_unfiltered (gdb_stdlog,
3128 "LLW: waitpid %ld received %s\n",
3129 (long) lwpid, status_to_str (status));
3130 }
3131
0e5bf2a8 3132 lp = linux_nat_filter_event (lwpid, status, &new_pending);
d90e17a7 3133
33355866
JK
3134 /* STATUS is now no longer valid, use LP->STATUS instead. */
3135 status = 0;
3136
0e5bf2a8 3137 if (lp && !ptid_match (lp->ptid, ptid))
d6b0e80f 3138 {
e3e9f5a2
PA
3139 gdb_assert (lp->resumed);
3140
d90e17a7 3141 if (debug_linux_nat)
2f693f9d
SDJ
3142 fprintf_unfiltered (gdb_stdlog,
3143 "LWP %ld got an event %06x, "
3144 "leaving pending.\n",
3145 ptid_get_lwp (lp->ptid), lp->status);
d90e17a7 3146
ca2163eb 3147 if (WIFSTOPPED (lp->status))
d90e17a7 3148 {
ca2163eb 3149 if (WSTOPSIG (lp->status) != SIGSTOP)
d90e17a7 3150 {
e3e9f5a2
PA
3151 /* Cancel breakpoint hits. The breakpoint may
3152 be removed before we fetch events from this
3153 process to report to the core. It is best
3154 not to assume the moribund breakpoints
3155 heuristic always handles these cases --- it
3156 could be too many events go through to the
3157 core before this one is handled. All-stop
3158 always cancels breakpoint hits in all
3159 threads. */
3160 if (non_stop
00390b84 3161 && linux_nat_lp_status_is_event (lp)
e3e9f5a2
PA
3162 && cancel_breakpoint (lp))
3163 {
3164 /* Throw away the SIGTRAP. */
3165 lp->status = 0;
3166
3167 if (debug_linux_nat)
2f693f9d
SDJ
3168 fprintf_unfiltered (gdb_stdlog,
3169 "LLW: LWP %ld hit a "
3170 "breakpoint while "
3171 "waiting for another "
3172 "process; "
3173 "cancelled it\n",
3174 ptid_get_lwp (lp->ptid));
e3e9f5a2 3175 }
d90e17a7
PA
3176 }
3177 else
8817a6f2 3178 lp->signalled = 0;
d90e17a7 3179 }
33355866 3180 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
d90e17a7
PA
3181 {
3182 if (debug_linux_nat)
2f693f9d
SDJ
3183 fprintf_unfiltered (gdb_stdlog,
3184 "Process %ld exited while stopping "
3185 "LWPs\n",
3186 ptid_get_lwp (lp->ptid));
d90e17a7
PA
3187
3188 /* This was the last lwp in the process. Since
3189 events are serialized to GDB core, and we can't
3190 report this one right now, but GDB core and the
3191 other target layers will want to be notified
3192 about the exit code/signal, leave the status
3193 pending for the next time we're able to report
3194 it. */
d90e17a7 3195
d90e17a7
PA
3196 /* Dead LWP's aren't expected to reported a pending
3197 sigstop. */
3198 lp->signalled = 0;
3199
3200 /* Store the pending event in the waitstatus as
3201 well, because W_EXITCODE(0,0) == 0. */
ca2163eb 3202 store_waitstatus (&lp->waitstatus, lp->status);
d90e17a7
PA
3203 }
3204
3205 /* Keep looking. */
3206 lp = NULL;
d6b0e80f
AC
3207 }
3208
0e5bf2a8 3209 if (new_pending)
d90e17a7 3210 {
0e5bf2a8
PA
3211 /* Some LWP now has a pending event. Go all the way
3212 back to check it. */
3213 goto retry;
3214 }
12d9289a 3215
0e5bf2a8
PA
3216 if (lp)
3217 {
3218 /* We got an event to report to the core. */
3219 break;
d90e17a7 3220 }
0e5bf2a8
PA
3221
3222 /* Retry until nothing comes out of waitpid. A single
3223 SIGCHLD can indicate more than one child stopped. */
3224 continue;
d6b0e80f
AC
3225 }
3226
0e5bf2a8
PA
3227 /* Check for zombie thread group leaders. Those can't be reaped
3228 until all other threads in the thread group are. */
3229 check_zombie_leaders ();
d6b0e80f 3230
0e5bf2a8
PA
3231 /* If there are no resumed children left, bail. We'd be stuck
3232 forever in the sigsuspend call below otherwise. */
3233 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3234 {
3235 if (debug_linux_nat)
3236 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
b84876c2 3237
0e5bf2a8 3238 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
b84876c2 3239
0e5bf2a8
PA
3240 if (!target_can_async_p ())
3241 clear_sigint_trap ();
b84876c2 3242
0e5bf2a8
PA
3243 restore_child_signals_mask (&prev_mask);
3244 return minus_one_ptid;
d6b0e80f 3245 }
28736962 3246
0e5bf2a8
PA
3247 /* No interesting event to report to the core. */
3248
3249 if (target_options & TARGET_WNOHANG)
3250 {
01124a23 3251 if (debug_linux_nat)
28736962
PA
3252 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3253
0e5bf2a8 3254 ourstatus->kind = TARGET_WAITKIND_IGNORE;
28736962
PA
3255 restore_child_signals_mask (&prev_mask);
3256 return minus_one_ptid;
3257 }
d6b0e80f
AC
3258
3259 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3260 gdb_assert (lp == NULL);
0e5bf2a8
PA
3261
3262 /* Block until we get an event reported with SIGCHLD. */
d36bf488
DE
3263 if (debug_linux_nat)
3264 fprintf_unfiltered (gdb_stdlog, "LNW: about to sigsuspend\n");
0e5bf2a8 3265 sigsuspend (&suspend_mask);
d6b0e80f
AC
3266 }
3267
b84876c2 3268 if (!target_can_async_p ())
d26b5354 3269 clear_sigint_trap ();
d6b0e80f
AC
3270
3271 gdb_assert (lp);
3272
ca2163eb
PA
3273 status = lp->status;
3274 lp->status = 0;
3275
d6b0e80f
AC
3276 /* Don't report signals that GDB isn't interested in, such as
3277 signals that are neither printed nor stopped upon. Stopping all
3278 threads can be a bit time-consuming so if we want decent
3279 performance with heavily multi-threaded programs, especially when
3280 they're using a high frequency timer, we'd better avoid it if we
3281 can. */
3282
3283 if (WIFSTOPPED (status))
3284 {
2ea28649 3285 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
d6b0e80f 3286
2455069d
UW
3287 /* When using hardware single-step, we need to report every signal.
3288 Otherwise, signals in pass_mask may be short-circuited. */
d539ed7e 3289 if (!lp->step
2455069d 3290 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
d6b0e80f
AC
3291 {
3292 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3293 here? It is not clear we should. GDB may not expect
3294 other threads to run. On the other hand, not resuming
3295 newly attached threads may cause an unwanted delay in
3296 getting them running. */
3297 registers_changed ();
7b50312a
PA
3298 if (linux_nat_prepare_to_resume != NULL)
3299 linux_nat_prepare_to_resume (lp);
dfd4cc63
LM
3300 linux_ops->to_resume (linux_ops,
3301 pid_to_ptid (ptid_get_lwp (lp->ptid)),
10d6c8cd 3302 lp->step, signo);
d6b0e80f
AC
3303 if (debug_linux_nat)
3304 fprintf_unfiltered (gdb_stdlog,
3305 "LLW: %s %s, %s (preempt 'handle')\n",
3306 lp->step ?
3307 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3308 target_pid_to_str (lp->ptid),
a493e3e2 3309 (signo != GDB_SIGNAL_0
2ea28649 3310 ? strsignal (gdb_signal_to_host (signo))
423ec54c 3311 : "0"));
d6b0e80f 3312 lp->stopped = 0;
d6b0e80f
AC
3313 goto retry;
3314 }
3315
1ad15515 3316 if (!non_stop)
d6b0e80f 3317 {
1ad15515
PA
3318 /* Only do the below in all-stop, as we currently use SIGINT
3319 to implement target_stop (see linux_nat_stop) in
3320 non-stop. */
a493e3e2 3321 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
1ad15515
PA
3322 {
3323 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3324 forwarded to the entire process group, that is, all LWPs
3325 will receive it - unless they're using CLONE_THREAD to
3326 share signals. Since we only want to report it once, we
3327 mark it as ignored for all LWPs except this one. */
d90e17a7
PA
3328 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3329 set_ignore_sigint, NULL);
1ad15515
PA
3330 lp->ignore_sigint = 0;
3331 }
3332 else
3333 maybe_clear_ignore_sigint (lp);
d6b0e80f
AC
3334 }
3335 }
3336
3337 /* This LWP is stopped now. */
3338 lp->stopped = 1;
3339
3340 if (debug_linux_nat)
3341 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3342 status_to_str (status), target_pid_to_str (lp->ptid));
3343
4c28f408
PA
3344 if (!non_stop)
3345 {
3346 /* Now stop all other LWP's ... */
d90e17a7 3347 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
4c28f408
PA
3348
3349 /* ... and wait until all of them have reported back that
3350 they're no longer running. */
d90e17a7 3351 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
4c28f408
PA
3352
3353 /* If we're not waiting for a specific LWP, choose an event LWP
3354 from among those that have had events. Giving equal priority
3355 to all LWPs that have had events helps prevent
3356 starvation. */
0e5bf2a8 3357 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
d90e17a7 3358 select_event_lwp (ptid, &lp, &status);
d6b0e80f 3359
e3e9f5a2
PA
3360 /* Now that we've selected our final event LWP, cancel any
3361 breakpoints in other LWPs that have hit a GDB breakpoint.
3362 See the comment in cancel_breakpoints_callback to find out
3363 why. */
3364 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3365
4b60df3d
PA
3366 /* We'll need this to determine whether to report a SIGSTOP as
3367 TARGET_WAITKIND_0. Need to take a copy because
3368 resume_clear_callback clears it. */
3369 last_resume_kind = lp->last_resume_kind;
3370
e3e9f5a2
PA
3371 /* In all-stop, from the core's perspective, all LWPs are now
3372 stopped until a new resume action is sent over. */
3373 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3374 }
3375 else
25289eb2 3376 {
4b60df3d
PA
3377 /* See above. */
3378 last_resume_kind = lp->last_resume_kind;
3379 resume_clear_callback (lp, NULL);
25289eb2 3380 }
d6b0e80f 3381
26ab7092 3382 if (linux_nat_status_is_event (status))
d6b0e80f 3383 {
d6b0e80f
AC
3384 if (debug_linux_nat)
3385 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3386 "LLW: trap ptid is %s.\n",
3387 target_pid_to_str (lp->ptid));
d6b0e80f 3388 }
d6b0e80f
AC
3389
3390 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3391 {
3392 *ourstatus = lp->waitstatus;
3393 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3394 }
3395 else
3396 store_waitstatus (ourstatus, status);
3397
01124a23 3398 if (debug_linux_nat)
b84876c2
PA
3399 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3400
7feb7d06 3401 restore_child_signals_mask (&prev_mask);
1e225492 3402
4b60df3d 3403 if (last_resume_kind == resume_stop
25289eb2
PA
3404 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3405 && WSTOPSIG (status) == SIGSTOP)
3406 {
3407 /* A thread that has been requested to stop by GDB with
3408 target_stop, and it stopped cleanly, so report as SIG0. The
3409 use of SIGSTOP is an implementation detail. */
a493e3e2 3410 ourstatus->value.sig = GDB_SIGNAL_0;
25289eb2
PA
3411 }
3412
1e225492
JK
3413 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3414 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3415 lp->core = -1;
3416 else
2e794194 3417 lp->core = linux_common_core_of_thread (lp->ptid);
1e225492 3418
f973ed9c 3419 return lp->ptid;
d6b0e80f
AC
3420}
3421
e3e9f5a2
PA
3422/* Resume LWPs that are currently stopped without any pending status
3423 to report, but are resumed from the core's perspective. */
3424
3425static int
3426resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3427{
3428 ptid_t *wait_ptid_p = data;
3429
3430 if (lp->stopped
3431 && lp->resumed
3432 && lp->status == 0
3433 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3434 {
336060f3
PA
3435 struct regcache *regcache = get_thread_regcache (lp->ptid);
3436 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3437 CORE_ADDR pc = regcache_read_pc (regcache);
3438
e3e9f5a2
PA
3439 gdb_assert (is_executing (lp->ptid));
3440
3441 /* Don't bother if there's a breakpoint at PC that we'd hit
3442 immediately, and we're not waiting for this LWP. */
3443 if (!ptid_match (lp->ptid, *wait_ptid_p))
3444 {
e3e9f5a2
PA
3445 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3446 return 0;
3447 }
3448
3449 if (debug_linux_nat)
3450 fprintf_unfiltered (gdb_stdlog,
336060f3
PA
3451 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
3452 target_pid_to_str (lp->ptid),
3453 paddress (gdbarch, pc),
3454 lp->step);
e3e9f5a2 3455
336060f3 3456 registers_changed ();
7b50312a
PA
3457 if (linux_nat_prepare_to_resume != NULL)
3458 linux_nat_prepare_to_resume (lp);
dfd4cc63 3459 linux_ops->to_resume (linux_ops, pid_to_ptid (ptid_get_lwp (lp->ptid)),
a493e3e2 3460 lp->step, GDB_SIGNAL_0);
e3e9f5a2 3461 lp->stopped = 0;
e3e9f5a2
PA
3462 lp->stopped_by_watchpoint = 0;
3463 }
3464
3465 return 0;
3466}
3467
7feb7d06
PA
3468static ptid_t
3469linux_nat_wait (struct target_ops *ops,
47608cb1
PA
3470 ptid_t ptid, struct target_waitstatus *ourstatus,
3471 int target_options)
7feb7d06
PA
3472{
3473 ptid_t event_ptid;
3474
3475 if (debug_linux_nat)
09826ec5
PA
3476 {
3477 char *options_string;
3478
3479 options_string = target_options_to_string (target_options);
3480 fprintf_unfiltered (gdb_stdlog,
3481 "linux_nat_wait: [%s], [%s]\n",
3482 target_pid_to_str (ptid),
3483 options_string);
3484 xfree (options_string);
3485 }
7feb7d06
PA
3486
3487 /* Flush the async file first. */
3488 if (target_can_async_p ())
3489 async_file_flush ();
3490
e3e9f5a2
PA
3491 /* Resume LWPs that are currently stopped without any pending status
3492 to report, but are resumed from the core's perspective. LWPs get
3493 in this state if we find them stopping at a time we're not
3494 interested in reporting the event (target_wait on a
3495 specific_process, for example, see linux_nat_wait_1), and
3496 meanwhile the event became uninteresting. Don't bother resuming
3497 LWPs we're not going to wait for if they'd stop immediately. */
3498 if (non_stop)
3499 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3500
47608cb1 3501 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
7feb7d06
PA
3502
3503 /* If we requested any event, and something came out, assume there
3504 may be more. If we requested a specific lwp or process, also
3505 assume there may be more. */
3506 if (target_can_async_p ()
6953d224
PA
3507 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3508 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
7feb7d06
PA
3509 || !ptid_equal (ptid, minus_one_ptid)))
3510 async_file_mark ();
3511
3512 /* Get ready for the next event. */
3513 if (target_can_async_p ())
3514 target_async (inferior_event_handler, 0);
3515
3516 return event_ptid;
3517}
3518
d6b0e80f
AC
3519static int
3520kill_callback (struct lwp_info *lp, void *data)
3521{
ed731959
JK
3522 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3523
3524 errno = 0;
69ff6be5 3525 kill_lwp (ptid_get_lwp (lp->ptid), SIGKILL);
ed731959 3526 if (debug_linux_nat)
57745c90
PA
3527 {
3528 int save_errno = errno;
3529
3530 fprintf_unfiltered (gdb_stdlog,
3531 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
3532 target_pid_to_str (lp->ptid),
3533 save_errno ? safe_strerror (save_errno) : "OK");
3534 }
ed731959
JK
3535
3536 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3537
d6b0e80f 3538 errno = 0;
dfd4cc63 3539 ptrace (PTRACE_KILL, ptid_get_lwp (lp->ptid), 0, 0);
d6b0e80f 3540 if (debug_linux_nat)
57745c90
PA
3541 {
3542 int save_errno = errno;
3543
3544 fprintf_unfiltered (gdb_stdlog,
3545 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3546 target_pid_to_str (lp->ptid),
3547 save_errno ? safe_strerror (save_errno) : "OK");
3548 }
d6b0e80f
AC
3549
3550 return 0;
3551}
3552
3553static int
3554kill_wait_callback (struct lwp_info *lp, void *data)
3555{
3556 pid_t pid;
3557
3558 /* We must make sure that there are no pending events (delayed
3559 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3560 program doesn't interfere with any following debugging session. */
3561
3562 /* For cloned processes we must check both with __WCLONE and
3563 without, since the exit status of a cloned process isn't reported
3564 with __WCLONE. */
3565 if (lp->cloned)
3566 {
3567 do
3568 {
dfd4cc63 3569 pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, __WCLONE);
e85a822c 3570 if (pid != (pid_t) -1)
d6b0e80f 3571 {
e85a822c
DJ
3572 if (debug_linux_nat)
3573 fprintf_unfiltered (gdb_stdlog,
3574 "KWC: wait %s received unknown.\n",
3575 target_pid_to_str (lp->ptid));
3576 /* The Linux kernel sometimes fails to kill a thread
3577 completely after PTRACE_KILL; that goes from the stop
3578 point in do_fork out to the one in
3579 get_signal_to_deliever and waits again. So kill it
3580 again. */
3581 kill_callback (lp, NULL);
d6b0e80f
AC
3582 }
3583 }
dfd4cc63 3584 while (pid == ptid_get_lwp (lp->ptid));
d6b0e80f
AC
3585
3586 gdb_assert (pid == -1 && errno == ECHILD);
3587 }
3588
3589 do
3590 {
dfd4cc63 3591 pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, 0);
e85a822c 3592 if (pid != (pid_t) -1)
d6b0e80f 3593 {
e85a822c
DJ
3594 if (debug_linux_nat)
3595 fprintf_unfiltered (gdb_stdlog,
3596 "KWC: wait %s received unk.\n",
3597 target_pid_to_str (lp->ptid));
3598 /* See the call to kill_callback above. */
3599 kill_callback (lp, NULL);
d6b0e80f
AC
3600 }
3601 }
dfd4cc63 3602 while (pid == ptid_get_lwp (lp->ptid));
d6b0e80f
AC
3603
3604 gdb_assert (pid == -1 && errno == ECHILD);
3605 return 0;
3606}
3607
3608static void
7d85a9c0 3609linux_nat_kill (struct target_ops *ops)
d6b0e80f 3610{
f973ed9c
DJ
3611 struct target_waitstatus last;
3612 ptid_t last_ptid;
3613 int status;
d6b0e80f 3614
f973ed9c
DJ
3615 /* If we're stopped while forking and we haven't followed yet,
3616 kill the other task. We need to do this first because the
3617 parent will be sleeping if this is a vfork. */
d6b0e80f 3618
f973ed9c 3619 get_last_target_status (&last_ptid, &last);
d6b0e80f 3620
f973ed9c
DJ
3621 if (last.kind == TARGET_WAITKIND_FORKED
3622 || last.kind == TARGET_WAITKIND_VFORKED)
3623 {
dfd4cc63 3624 ptrace (PT_KILL, ptid_get_pid (last.value.related_pid), 0, 0);
f973ed9c 3625 wait (&status);
26cb8b7c
PA
3626
3627 /* Let the arch-specific native code know this process is
3628 gone. */
dfd4cc63 3629 linux_nat_forget_process (ptid_get_pid (last.value.related_pid));
f973ed9c
DJ
3630 }
3631
3632 if (forks_exist_p ())
7feb7d06 3633 linux_fork_killall ();
f973ed9c
DJ
3634 else
3635 {
d90e17a7 3636 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
e0881a8e 3637
4c28f408
PA
3638 /* Stop all threads before killing them, since ptrace requires
3639 that the thread is stopped to sucessfully PTRACE_KILL. */
d90e17a7 3640 iterate_over_lwps (ptid, stop_callback, NULL);
4c28f408
PA
3641 /* ... and wait until all of them have reported back that
3642 they're no longer running. */
d90e17a7 3643 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4c28f408 3644
f973ed9c 3645 /* Kill all LWP's ... */
d90e17a7 3646 iterate_over_lwps (ptid, kill_callback, NULL);
f973ed9c
DJ
3647
3648 /* ... and wait until we've flushed all events. */
d90e17a7 3649 iterate_over_lwps (ptid, kill_wait_callback, NULL);
f973ed9c
DJ
3650 }
3651
3652 target_mourn_inferior ();
d6b0e80f
AC
3653}
3654
3655static void
136d6dae 3656linux_nat_mourn_inferior (struct target_ops *ops)
d6b0e80f 3657{
26cb8b7c
PA
3658 int pid = ptid_get_pid (inferior_ptid);
3659
3660 purge_lwp_list (pid);
d6b0e80f 3661
f973ed9c 3662 if (! forks_exist_p ())
d90e17a7
PA
3663 /* Normal case, no other forks available. */
3664 linux_ops->to_mourn_inferior (ops);
f973ed9c
DJ
3665 else
3666 /* Multi-fork case. The current inferior_ptid has exited, but
3667 there are other viable forks to debug. Delete the exiting
3668 one and context-switch to the first available. */
3669 linux_fork_mourn_inferior ();
26cb8b7c
PA
3670
3671 /* Let the arch-specific native code know this process is gone. */
3672 linux_nat_forget_process (pid);
d6b0e80f
AC
3673}
3674
5b009018
PA
3675/* Convert a native/host siginfo object, into/from the siginfo in the
3676 layout of the inferiors' architecture. */
3677
3678static void
a5362b9a 3679siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
5b009018
PA
3680{
3681 int done = 0;
3682
3683 if (linux_nat_siginfo_fixup != NULL)
3684 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3685
3686 /* If there was no callback, or the callback didn't do anything,
3687 then just do a straight memcpy. */
3688 if (!done)
3689 {
3690 if (direction == 1)
a5362b9a 3691 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5b009018 3692 else
a5362b9a 3693 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5b009018
PA
3694 }
3695}
3696
9b409511 3697static enum target_xfer_status
4aa995e1
PA
3698linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
3699 const char *annex, gdb_byte *readbuf,
9b409511
YQ
3700 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3701 ULONGEST *xfered_len)
4aa995e1 3702{
4aa995e1 3703 int pid;
a5362b9a
TS
3704 siginfo_t siginfo;
3705 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1
PA
3706
3707 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3708 gdb_assert (readbuf || writebuf);
3709
dfd4cc63 3710 pid = ptid_get_lwp (inferior_ptid);
4aa995e1 3711 if (pid == 0)
dfd4cc63 3712 pid = ptid_get_pid (inferior_ptid);
4aa995e1
PA
3713
3714 if (offset > sizeof (siginfo))
2ed4b548 3715 return TARGET_XFER_E_IO;
4aa995e1
PA
3716
3717 errno = 0;
3718 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3719 if (errno != 0)
2ed4b548 3720 return TARGET_XFER_E_IO;
4aa995e1 3721
5b009018
PA
3722 /* When GDB is built as a 64-bit application, ptrace writes into
3723 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3724 inferior with a 64-bit GDB should look the same as debugging it
3725 with a 32-bit GDB, we need to convert it. GDB core always sees
3726 the converted layout, so any read/write will have to be done
3727 post-conversion. */
3728 siginfo_fixup (&siginfo, inf_siginfo, 0);
3729
4aa995e1
PA
3730 if (offset + len > sizeof (siginfo))
3731 len = sizeof (siginfo) - offset;
3732
3733 if (readbuf != NULL)
5b009018 3734 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
3735 else
3736 {
5b009018
PA
3737 memcpy (inf_siginfo + offset, writebuf, len);
3738
3739 /* Convert back to ptrace layout before flushing it out. */
3740 siginfo_fixup (&siginfo, inf_siginfo, 1);
3741
4aa995e1
PA
3742 errno = 0;
3743 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3744 if (errno != 0)
2ed4b548 3745 return TARGET_XFER_E_IO;
4aa995e1
PA
3746 }
3747
9b409511
YQ
3748 *xfered_len = len;
3749 return TARGET_XFER_OK;
4aa995e1
PA
3750}
3751
9b409511 3752static enum target_xfer_status
10d6c8cd
DJ
3753linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3754 const char *annex, gdb_byte *readbuf,
3755 const gdb_byte *writebuf,
9b409511 3756 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
d6b0e80f 3757{
4aa995e1 3758 struct cleanup *old_chain;
9b409511 3759 enum target_xfer_status xfer;
d6b0e80f 3760
4aa995e1
PA
3761 if (object == TARGET_OBJECT_SIGNAL_INFO)
3762 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
9b409511 3763 offset, len, xfered_len);
4aa995e1 3764
c35b1492
PA
3765 /* The target is connected but no live inferior is selected. Pass
3766 this request down to a lower stratum (e.g., the executable
3767 file). */
3768 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
9b409511 3769 return TARGET_XFER_EOF;
c35b1492 3770
4aa995e1
PA
3771 old_chain = save_inferior_ptid ();
3772
dfd4cc63
LM
3773 if (ptid_lwp_p (inferior_ptid))
3774 inferior_ptid = pid_to_ptid (ptid_get_lwp (inferior_ptid));
d6b0e80f 3775
10d6c8cd 3776 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511 3777 offset, len, xfered_len);
d6b0e80f
AC
3778
3779 do_cleanups (old_chain);
3780 return xfer;
3781}
3782
3783static int
28439f5e 3784linux_thread_alive (ptid_t ptid)
d6b0e80f 3785{
8c6a60d1 3786 int err, tmp_errno;
4c28f408 3787
dfd4cc63 3788 gdb_assert (ptid_lwp_p (ptid));
d6b0e80f 3789
4c28f408
PA
3790 /* Send signal 0 instead of anything ptrace, because ptracing a
3791 running thread errors out claiming that the thread doesn't
3792 exist. */
dfd4cc63 3793 err = kill_lwp (ptid_get_lwp (ptid), 0);
8c6a60d1 3794 tmp_errno = errno;
d6b0e80f
AC
3795 if (debug_linux_nat)
3796 fprintf_unfiltered (gdb_stdlog,
4c28f408 3797 "LLTA: KILL(SIG0) %s (%s)\n",
d6b0e80f 3798 target_pid_to_str (ptid),
8c6a60d1 3799 err ? safe_strerror (tmp_errno) : "OK");
9c0dd46b 3800
4c28f408 3801 if (err != 0)
d6b0e80f
AC
3802 return 0;
3803
3804 return 1;
3805}
3806
28439f5e
PA
3807static int
3808linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
3809{
3810 return linux_thread_alive (ptid);
3811}
3812
d6b0e80f 3813static char *
117de6a9 3814linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
d6b0e80f
AC
3815{
3816 static char buf[64];
3817
dfd4cc63
LM
3818 if (ptid_lwp_p (ptid)
3819 && (ptid_get_pid (ptid) != ptid_get_lwp (ptid)
3820 || num_lwps (ptid_get_pid (ptid)) > 1))
d6b0e80f 3821 {
dfd4cc63 3822 snprintf (buf, sizeof (buf), "LWP %ld", ptid_get_lwp (ptid));
d6b0e80f
AC
3823 return buf;
3824 }
3825
3826 return normal_pid_to_str (ptid);
3827}
3828
4694da01 3829static char *
503a628d 3830linux_nat_thread_name (struct target_ops *self, struct thread_info *thr)
4694da01
TT
3831{
3832 int pid = ptid_get_pid (thr->ptid);
3833 long lwp = ptid_get_lwp (thr->ptid);
3834#define FORMAT "/proc/%d/task/%ld/comm"
3835 char buf[sizeof (FORMAT) + 30];
3836 FILE *comm_file;
3837 char *result = NULL;
3838
3839 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
614c279d 3840 comm_file = gdb_fopen_cloexec (buf, "r");
4694da01
TT
3841 if (comm_file)
3842 {
3843 /* Not exported by the kernel, so we define it here. */
3844#define COMM_LEN 16
3845 static char line[COMM_LEN + 1];
3846
3847 if (fgets (line, sizeof (line), comm_file))
3848 {
3849 char *nl = strchr (line, '\n');
3850
3851 if (nl)
3852 *nl = '\0';
3853 if (*line != '\0')
3854 result = line;
3855 }
3856
3857 fclose (comm_file);
3858 }
3859
3860#undef COMM_LEN
3861#undef FORMAT
3862
3863 return result;
3864}
3865
dba24537
AC
3866/* Accepts an integer PID; Returns a string representing a file that
3867 can be opened to get the symbols for the child process. */
3868
6d8fd2b7 3869static char *
8dd27370 3870linux_child_pid_to_exec_file (struct target_ops *self, int pid)
dba24537 3871{
b4ab256d
HZ
3872 static char buf[PATH_MAX];
3873 char name[PATH_MAX];
dba24537 3874
b4ab256d
HZ
3875 xsnprintf (name, PATH_MAX, "/proc/%d/exe", pid);
3876 memset (buf, 0, PATH_MAX);
3877 if (readlink (name, buf, PATH_MAX - 1) <= 0)
3878 strcpy (buf, name);
dba24537 3879
b4ab256d 3880 return buf;
dba24537
AC
3881}
3882
10d6c8cd
DJ
3883/* Implement the to_xfer_partial interface for memory reads using the /proc
3884 filesystem. Because we can use a single read() call for /proc, this
3885 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3886 but it doesn't support writes. */
3887
9b409511 3888static enum target_xfer_status
10d6c8cd
DJ
3889linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3890 const char *annex, gdb_byte *readbuf,
3891 const gdb_byte *writebuf,
9b409511 3892 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
dba24537 3893{
10d6c8cd
DJ
3894 LONGEST ret;
3895 int fd;
dba24537
AC
3896 char filename[64];
3897
10d6c8cd 3898 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
3899 return 0;
3900
3901 /* Don't bother for one word. */
3902 if (len < 3 * sizeof (long))
9b409511 3903 return TARGET_XFER_EOF;
dba24537
AC
3904
3905 /* We could keep this file open and cache it - possibly one per
3906 thread. That requires some juggling, but is even faster. */
cde33bf1
YQ
3907 xsnprintf (filename, sizeof filename, "/proc/%d/mem",
3908 ptid_get_pid (inferior_ptid));
614c279d 3909 fd = gdb_open_cloexec (filename, O_RDONLY | O_LARGEFILE, 0);
dba24537 3910 if (fd == -1)
9b409511 3911 return TARGET_XFER_EOF;
dba24537
AC
3912
3913 /* If pread64 is available, use it. It's faster if the kernel
3914 supports it (only one syscall), and it's 64-bit safe even on
3915 32-bit platforms (for instance, SPARC debugging a SPARC64
3916 application). */
3917#ifdef HAVE_PREAD64
10d6c8cd 3918 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 3919#else
10d6c8cd 3920 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
3921#endif
3922 ret = 0;
3923 else
3924 ret = len;
3925
3926 close (fd);
9b409511
YQ
3927
3928 if (ret == 0)
3929 return TARGET_XFER_EOF;
3930 else
3931 {
3932 *xfered_len = ret;
3933 return TARGET_XFER_OK;
3934 }
dba24537
AC
3935}
3936
efcbbd14
UW
3937
3938/* Enumerate spufs IDs for process PID. */
3939static LONGEST
b55e14c7 3940spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, ULONGEST len)
efcbbd14 3941{
f5656ead 3942 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
efcbbd14
UW
3943 LONGEST pos = 0;
3944 LONGEST written = 0;
3945 char path[128];
3946 DIR *dir;
3947 struct dirent *entry;
3948
3949 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
3950 dir = opendir (path);
3951 if (!dir)
3952 return -1;
3953
3954 rewinddir (dir);
3955 while ((entry = readdir (dir)) != NULL)
3956 {
3957 struct stat st;
3958 struct statfs stfs;
3959 int fd;
3960
3961 fd = atoi (entry->d_name);
3962 if (!fd)
3963 continue;
3964
3965 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
3966 if (stat (path, &st) != 0)
3967 continue;
3968 if (!S_ISDIR (st.st_mode))
3969 continue;
3970
3971 if (statfs (path, &stfs) != 0)
3972 continue;
3973 if (stfs.f_type != SPUFS_MAGIC)
3974 continue;
3975
3976 if (pos >= offset && pos + 4 <= offset + len)
3977 {
3978 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
3979 written += 4;
3980 }
3981 pos += 4;
3982 }
3983
3984 closedir (dir);
3985 return written;
3986}
3987
3988/* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
3989 object type, using the /proc file system. */
9b409511
YQ
3990
3991static enum target_xfer_status
efcbbd14
UW
3992linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
3993 const char *annex, gdb_byte *readbuf,
3994 const gdb_byte *writebuf,
9b409511 3995 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
efcbbd14
UW
3996{
3997 char buf[128];
3998 int fd = 0;
3999 int ret = -1;
dfd4cc63 4000 int pid = ptid_get_pid (inferior_ptid);
efcbbd14
UW
4001
4002 if (!annex)
4003 {
4004 if (!readbuf)
2ed4b548 4005 return TARGET_XFER_E_IO;
efcbbd14 4006 else
9b409511
YQ
4007 {
4008 LONGEST l = spu_enumerate_spu_ids (pid, readbuf, offset, len);
4009
4010 if (l < 0)
4011 return TARGET_XFER_E_IO;
4012 else if (l == 0)
4013 return TARGET_XFER_EOF;
4014 else
4015 {
4016 *xfered_len = (ULONGEST) l;
4017 return TARGET_XFER_OK;
4018 }
4019 }
efcbbd14
UW
4020 }
4021
4022 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
614c279d 4023 fd = gdb_open_cloexec (buf, writebuf? O_WRONLY : O_RDONLY, 0);
efcbbd14 4024 if (fd <= 0)
2ed4b548 4025 return TARGET_XFER_E_IO;
efcbbd14
UW
4026
4027 if (offset != 0
4028 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4029 {
4030 close (fd);
9b409511 4031 return TARGET_XFER_EOF;
efcbbd14
UW
4032 }
4033
4034 if (writebuf)
4035 ret = write (fd, writebuf, (size_t) len);
4036 else if (readbuf)
4037 ret = read (fd, readbuf, (size_t) len);
4038
4039 close (fd);
9b409511
YQ
4040
4041 if (ret < 0)
4042 return TARGET_XFER_E_IO;
4043 else if (ret == 0)
4044 return TARGET_XFER_EOF;
4045 else
4046 {
4047 *xfered_len = (ULONGEST) ret;
4048 return TARGET_XFER_OK;
4049 }
efcbbd14
UW
4050}
4051
4052
dba24537
AC
4053/* Parse LINE as a signal set and add its set bits to SIGS. */
4054
4055static void
4056add_line_to_sigset (const char *line, sigset_t *sigs)
4057{
4058 int len = strlen (line) - 1;
4059 const char *p;
4060 int signum;
4061
4062 if (line[len] != '\n')
8a3fe4f8 4063 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4064
4065 p = line;
4066 signum = len * 4;
4067 while (len-- > 0)
4068 {
4069 int digit;
4070
4071 if (*p >= '0' && *p <= '9')
4072 digit = *p - '0';
4073 else if (*p >= 'a' && *p <= 'f')
4074 digit = *p - 'a' + 10;
4075 else
8a3fe4f8 4076 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4077
4078 signum -= 4;
4079
4080 if (digit & 1)
4081 sigaddset (sigs, signum + 1);
4082 if (digit & 2)
4083 sigaddset (sigs, signum + 2);
4084 if (digit & 4)
4085 sigaddset (sigs, signum + 3);
4086 if (digit & 8)
4087 sigaddset (sigs, signum + 4);
4088
4089 p++;
4090 }
4091}
4092
4093/* Find process PID's pending signals from /proc/pid/status and set
4094 SIGS to match. */
4095
4096void
3e43a32a
MS
4097linux_proc_pending_signals (int pid, sigset_t *pending,
4098 sigset_t *blocked, sigset_t *ignored)
dba24537
AC
4099{
4100 FILE *procfile;
d8d2a3ee 4101 char buffer[PATH_MAX], fname[PATH_MAX];
7c8a8b04 4102 struct cleanup *cleanup;
dba24537
AC
4103
4104 sigemptyset (pending);
4105 sigemptyset (blocked);
4106 sigemptyset (ignored);
cde33bf1 4107 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
614c279d 4108 procfile = gdb_fopen_cloexec (fname, "r");
dba24537 4109 if (procfile == NULL)
8a3fe4f8 4110 error (_("Could not open %s"), fname);
7c8a8b04 4111 cleanup = make_cleanup_fclose (procfile);
dba24537 4112
d8d2a3ee 4113 while (fgets (buffer, PATH_MAX, procfile) != NULL)
dba24537
AC
4114 {
4115 /* Normal queued signals are on the SigPnd line in the status
4116 file. However, 2.6 kernels also have a "shared" pending
4117 queue for delivering signals to a thread group, so check for
4118 a ShdPnd line also.
4119
4120 Unfortunately some Red Hat kernels include the shared pending
4121 queue but not the ShdPnd status field. */
4122
4123 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
4124 add_line_to_sigset (buffer + 8, pending);
4125 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
4126 add_line_to_sigset (buffer + 8, pending);
4127 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
4128 add_line_to_sigset (buffer + 8, blocked);
4129 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
4130 add_line_to_sigset (buffer + 8, ignored);
4131 }
4132
7c8a8b04 4133 do_cleanups (cleanup);
dba24537
AC
4134}
4135
9b409511 4136static enum target_xfer_status
07e059b5 4137linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
e0881a8e 4138 const char *annex, gdb_byte *readbuf,
9b409511
YQ
4139 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4140 ULONGEST *xfered_len)
07e059b5 4141{
07e059b5
VP
4142 gdb_assert (object == TARGET_OBJECT_OSDATA);
4143
9b409511
YQ
4144 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4145 if (*xfered_len == 0)
4146 return TARGET_XFER_EOF;
4147 else
4148 return TARGET_XFER_OK;
07e059b5
VP
4149}
4150
9b409511 4151static enum target_xfer_status
10d6c8cd
DJ
4152linux_xfer_partial (struct target_ops *ops, enum target_object object,
4153 const char *annex, gdb_byte *readbuf,
9b409511
YQ
4154 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4155 ULONGEST *xfered_len)
10d6c8cd 4156{
9b409511 4157 enum target_xfer_status xfer;
10d6c8cd
DJ
4158
4159 if (object == TARGET_OBJECT_AUXV)
9f2982ff 4160 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
9b409511 4161 offset, len, xfered_len);
10d6c8cd 4162
07e059b5
VP
4163 if (object == TARGET_OBJECT_OSDATA)
4164 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
9b409511 4165 offset, len, xfered_len);
07e059b5 4166
efcbbd14
UW
4167 if (object == TARGET_OBJECT_SPU)
4168 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
9b409511 4169 offset, len, xfered_len);
efcbbd14 4170
8f313923
JK
4171 /* GDB calculates all the addresses in possibly larget width of the address.
4172 Address width needs to be masked before its final use - either by
4173 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4174
4175 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4176
4177 if (object == TARGET_OBJECT_MEMORY)
4178 {
f5656ead 4179 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
8f313923
JK
4180
4181 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4182 offset &= ((ULONGEST) 1 << addr_bit) - 1;
4183 }
4184
10d6c8cd 4185 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511
YQ
4186 offset, len, xfered_len);
4187 if (xfer != TARGET_XFER_EOF)
10d6c8cd
DJ
4188 return xfer;
4189
4190 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511 4191 offset, len, xfered_len);
10d6c8cd
DJ
4192}
4193
5808517f
YQ
4194static void
4195cleanup_target_stop (void *arg)
4196{
4197 ptid_t *ptid = (ptid_t *) arg;
4198
4199 gdb_assert (arg != NULL);
4200
4201 /* Unpause all */
a493e3e2 4202 target_resume (*ptid, 0, GDB_SIGNAL_0);
5808517f
YQ
4203}
4204
4205static VEC(static_tracepoint_marker_p) *
c686c57f
TT
4206linux_child_static_tracepoint_markers_by_strid (struct target_ops *self,
4207 const char *strid)
5808517f
YQ
4208{
4209 char s[IPA_CMD_BUF_SIZE];
4210 struct cleanup *old_chain;
4211 int pid = ptid_get_pid (inferior_ptid);
4212 VEC(static_tracepoint_marker_p) *markers = NULL;
4213 struct static_tracepoint_marker *marker = NULL;
4214 char *p = s;
4215 ptid_t ptid = ptid_build (pid, 0, 0);
4216
4217 /* Pause all */
4218 target_stop (ptid);
4219
4220 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4221 s[sizeof ("qTfSTM")] = 0;
4222
42476b70 4223 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4224
4225 old_chain = make_cleanup (free_current_marker, &marker);
4226 make_cleanup (cleanup_target_stop, &ptid);
4227
4228 while (*p++ == 'm')
4229 {
4230 if (marker == NULL)
4231 marker = XCNEW (struct static_tracepoint_marker);
4232
4233 do
4234 {
4235 parse_static_tracepoint_marker_definition (p, &p, marker);
4236
4237 if (strid == NULL || strcmp (strid, marker->str_id) == 0)
4238 {
4239 VEC_safe_push (static_tracepoint_marker_p,
4240 markers, marker);
4241 marker = NULL;
4242 }
4243 else
4244 {
4245 release_static_tracepoint_marker (marker);
4246 memset (marker, 0, sizeof (*marker));
4247 }
4248 }
4249 while (*p++ == ','); /* comma-separated list */
4250
4251 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4252 s[sizeof ("qTsSTM")] = 0;
42476b70 4253 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4254 p = s;
4255 }
4256
4257 do_cleanups (old_chain);
4258
4259 return markers;
4260}
4261
e9efe249 4262/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
4263 it with local methods. */
4264
910122bf
UW
4265static void
4266linux_target_install_ops (struct target_ops *t)
10d6c8cd 4267{
6d8fd2b7 4268 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
eb73ad13 4269 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
6d8fd2b7 4270 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
eb73ad13 4271 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
6d8fd2b7 4272 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
eb73ad13 4273 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
a96d9b2e 4274 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
6d8fd2b7 4275 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 4276 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
4277 t->to_post_attach = linux_child_post_attach;
4278 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
4279
4280 super_xfer_partial = t->to_xfer_partial;
4281 t->to_xfer_partial = linux_xfer_partial;
5808517f
YQ
4282
4283 t->to_static_tracepoint_markers_by_strid
4284 = linux_child_static_tracepoint_markers_by_strid;
910122bf
UW
4285}
4286
4287struct target_ops *
4288linux_target (void)
4289{
4290 struct target_ops *t;
4291
4292 t = inf_ptrace_target ();
4293 linux_target_install_ops (t);
4294
4295 return t;
4296}
4297
4298struct target_ops *
7714d83a 4299linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
4300{
4301 struct target_ops *t;
4302
4303 t = inf_ptrace_trad_target (register_u_offset);
4304 linux_target_install_ops (t);
10d6c8cd 4305
10d6c8cd
DJ
4306 return t;
4307}
4308
b84876c2
PA
4309/* target_is_async_p implementation. */
4310
4311static int
6a109b6b 4312linux_nat_is_async_p (struct target_ops *ops)
b84876c2
PA
4313{
4314 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 4315 it explicitly with the "set target-async" command.
b84876c2 4316 Someday, linux will always be async. */
3dd5b83d 4317 return target_async_permitted;
b84876c2
PA
4318}
4319
4320/* target_can_async_p implementation. */
4321
4322static int
6a109b6b 4323linux_nat_can_async_p (struct target_ops *ops)
b84876c2
PA
4324{
4325 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 4326 it explicitly with the "set target-async" command.
b84876c2 4327 Someday, linux will always be async. */
3dd5b83d 4328 return target_async_permitted;
b84876c2
PA
4329}
4330
9908b566 4331static int
2a9a2795 4332linux_nat_supports_non_stop (struct target_ops *self)
9908b566
VP
4333{
4334 return 1;
4335}
4336
d90e17a7
PA
4337/* True if we want to support multi-process. To be removed when GDB
4338 supports multi-exec. */
4339
2277426b 4340int linux_multi_process = 1;
d90e17a7
PA
4341
4342static int
86ce2668 4343linux_nat_supports_multi_process (struct target_ops *self)
d90e17a7
PA
4344{
4345 return linux_multi_process;
4346}
4347
03583c20 4348static int
2bfc0540 4349linux_nat_supports_disable_randomization (struct target_ops *self)
03583c20
UW
4350{
4351#ifdef HAVE_PERSONALITY
4352 return 1;
4353#else
4354 return 0;
4355#endif
4356}
4357
b84876c2
PA
4358static int async_terminal_is_ours = 1;
4359
4d4ca2a1
DE
4360/* target_terminal_inferior implementation.
4361
4362 This is a wrapper around child_terminal_inferior to add async support. */
b84876c2
PA
4363
4364static void
d2f640d4 4365linux_nat_terminal_inferior (struct target_ops *self)
b84876c2
PA
4366{
4367 if (!target_is_async_p ())
4368 {
4369 /* Async mode is disabled. */
d6b64346 4370 child_terminal_inferior (self);
b84876c2
PA
4371 return;
4372 }
4373
d6b64346 4374 child_terminal_inferior (self);
b84876c2 4375
d9d2d8b6 4376 /* Calls to target_terminal_*() are meant to be idempotent. */
b84876c2
PA
4377 if (!async_terminal_is_ours)
4378 return;
4379
4380 delete_file_handler (input_fd);
4381 async_terminal_is_ours = 0;
4382 set_sigint_trap ();
4383}
4384
4d4ca2a1
DE
4385/* target_terminal_ours implementation.
4386
4387 This is a wrapper around child_terminal_ours to add async support (and
4388 implement the target_terminal_ours vs target_terminal_ours_for_output
4389 distinction). child_terminal_ours is currently no different than
4390 child_terminal_ours_for_output.
4391 We leave target_terminal_ours_for_output alone, leaving it to
4392 child_terminal_ours_for_output. */
b84876c2 4393
2c0b251b 4394static void
e3594fd1 4395linux_nat_terminal_ours (struct target_ops *self)
b84876c2
PA
4396{
4397 if (!target_is_async_p ())
4398 {
4399 /* Async mode is disabled. */
d6b64346 4400 child_terminal_ours (self);
b84876c2
PA
4401 return;
4402 }
4403
4404 /* GDB should never give the terminal to the inferior if the
4405 inferior is running in the background (run&, continue&, etc.),
4406 but claiming it sure should. */
d6b64346 4407 child_terminal_ours (self);
b84876c2 4408
b84876c2
PA
4409 if (async_terminal_is_ours)
4410 return;
4411
4412 clear_sigint_trap ();
4413 add_file_handler (input_fd, stdin_event_handler, 0);
4414 async_terminal_is_ours = 1;
4415}
4416
4417static void (*async_client_callback) (enum inferior_event_type event_type,
4418 void *context);
4419static void *async_client_context;
4420
7feb7d06
PA
4421/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4422 so we notice when any child changes state, and notify the
4423 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4424 above to wait for the arrival of a SIGCHLD. */
4425
b84876c2 4426static void
7feb7d06 4427sigchld_handler (int signo)
b84876c2 4428{
7feb7d06
PA
4429 int old_errno = errno;
4430
01124a23
DE
4431 if (debug_linux_nat)
4432 ui_file_write_async_safe (gdb_stdlog,
4433 "sigchld\n", sizeof ("sigchld\n") - 1);
7feb7d06
PA
4434
4435 if (signo == SIGCHLD
4436 && linux_nat_event_pipe[0] != -1)
4437 async_file_mark (); /* Let the event loop know that there are
4438 events to handle. */
4439
4440 errno = old_errno;
4441}
4442
4443/* Callback registered with the target events file descriptor. */
4444
4445static void
4446handle_target_event (int error, gdb_client_data client_data)
4447{
4448 (*async_client_callback) (INF_REG_EVENT, async_client_context);
4449}
4450
4451/* Create/destroy the target events pipe. Returns previous state. */
4452
4453static int
4454linux_async_pipe (int enable)
4455{
4456 int previous = (linux_nat_event_pipe[0] != -1);
4457
4458 if (previous != enable)
4459 {
4460 sigset_t prev_mask;
4461
12696c10
PA
4462 /* Block child signals while we create/destroy the pipe, as
4463 their handler writes to it. */
7feb7d06
PA
4464 block_child_signals (&prev_mask);
4465
4466 if (enable)
4467 {
614c279d 4468 if (gdb_pipe_cloexec (linux_nat_event_pipe) == -1)
7feb7d06
PA
4469 internal_error (__FILE__, __LINE__,
4470 "creating event pipe failed.");
4471
4472 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4473 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4474 }
4475 else
4476 {
4477 close (linux_nat_event_pipe[0]);
4478 close (linux_nat_event_pipe[1]);
4479 linux_nat_event_pipe[0] = -1;
4480 linux_nat_event_pipe[1] = -1;
4481 }
4482
4483 restore_child_signals_mask (&prev_mask);
4484 }
4485
4486 return previous;
b84876c2
PA
4487}
4488
4489/* target_async implementation. */
4490
4491static void
6a109b6b
TT
4492linux_nat_async (struct target_ops *ops,
4493 void (*callback) (enum inferior_event_type event_type,
4494 void *context),
4495 void *context)
b84876c2 4496{
b84876c2
PA
4497 if (callback != NULL)
4498 {
4499 async_client_callback = callback;
4500 async_client_context = context;
7feb7d06
PA
4501 if (!linux_async_pipe (1))
4502 {
4503 add_file_handler (linux_nat_event_pipe[0],
4504 handle_target_event, NULL);
4505 /* There may be pending events to handle. Tell the event loop
4506 to poll them. */
4507 async_file_mark ();
4508 }
b84876c2
PA
4509 }
4510 else
4511 {
4512 async_client_callback = callback;
4513 async_client_context = context;
b84876c2 4514 delete_file_handler (linux_nat_event_pipe[0]);
7feb7d06 4515 linux_async_pipe (0);
b84876c2
PA
4516 }
4517 return;
4518}
4519
a493e3e2 4520/* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
252fbfc8
PA
4521 event came out. */
4522
4c28f408 4523static int
252fbfc8 4524linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4c28f408 4525{
d90e17a7 4526 if (!lwp->stopped)
252fbfc8 4527 {
d90e17a7
PA
4528 if (debug_linux_nat)
4529 fprintf_unfiltered (gdb_stdlog,
4530 "LNSL: running -> suspending %s\n",
4531 target_pid_to_str (lwp->ptid));
252fbfc8 4532
252fbfc8 4533
25289eb2
PA
4534 if (lwp->last_resume_kind == resume_stop)
4535 {
4536 if (debug_linux_nat)
4537 fprintf_unfiltered (gdb_stdlog,
4538 "linux-nat: already stopping LWP %ld at "
4539 "GDB's request\n",
4540 ptid_get_lwp (lwp->ptid));
4541 return 0;
4542 }
252fbfc8 4543
25289eb2
PA
4544 stop_callback (lwp, NULL);
4545 lwp->last_resume_kind = resume_stop;
d90e17a7
PA
4546 }
4547 else
4548 {
4549 /* Already known to be stopped; do nothing. */
252fbfc8 4550
d90e17a7
PA
4551 if (debug_linux_nat)
4552 {
e09875d4 4553 if (find_thread_ptid (lwp->ptid)->stop_requested)
3e43a32a
MS
4554 fprintf_unfiltered (gdb_stdlog,
4555 "LNSL: already stopped/stop_requested %s\n",
d90e17a7
PA
4556 target_pid_to_str (lwp->ptid));
4557 else
3e43a32a
MS
4558 fprintf_unfiltered (gdb_stdlog,
4559 "LNSL: already stopped/no "
4560 "stop_requested yet %s\n",
d90e17a7 4561 target_pid_to_str (lwp->ptid));
252fbfc8
PA
4562 }
4563 }
4c28f408
PA
4564 return 0;
4565}
4566
4567static void
1eab8a48 4568linux_nat_stop (struct target_ops *self, ptid_t ptid)
4c28f408
PA
4569{
4570 if (non_stop)
d90e17a7 4571 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4c28f408 4572 else
1eab8a48 4573 linux_ops->to_stop (linux_ops, ptid);
4c28f408
PA
4574}
4575
d90e17a7 4576static void
de90e03d 4577linux_nat_close (struct target_ops *self)
d90e17a7
PA
4578{
4579 /* Unregister from the event loop. */
9debeba0
DE
4580 if (linux_nat_is_async_p (self))
4581 linux_nat_async (self, NULL, NULL);
d90e17a7 4582
d90e17a7 4583 if (linux_ops->to_close)
de90e03d 4584 linux_ops->to_close (linux_ops);
6a3cb8e8
PA
4585
4586 super_close (self);
d90e17a7
PA
4587}
4588
c0694254
PA
4589/* When requests are passed down from the linux-nat layer to the
4590 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4591 used. The address space pointer is stored in the inferior object,
4592 but the common code that is passed such ptid can't tell whether
4593 lwpid is a "main" process id or not (it assumes so). We reverse
4594 look up the "main" process id from the lwp here. */
4595
70221824 4596static struct address_space *
c0694254
PA
4597linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
4598{
4599 struct lwp_info *lwp;
4600 struct inferior *inf;
4601 int pid;
4602
dfd4cc63 4603 if (ptid_get_lwp (ptid) == 0)
c0694254
PA
4604 {
4605 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4606 tgid. */
4607 lwp = find_lwp_pid (ptid);
dfd4cc63 4608 pid = ptid_get_pid (lwp->ptid);
c0694254
PA
4609 }
4610 else
4611 {
4612 /* A (pid,lwpid,0) ptid. */
dfd4cc63 4613 pid = ptid_get_pid (ptid);
c0694254
PA
4614 }
4615
4616 inf = find_inferior_pid (pid);
4617 gdb_assert (inf != NULL);
4618 return inf->aspace;
4619}
4620
dc146f7c
VP
4621/* Return the cached value of the processor core for thread PTID. */
4622
70221824 4623static int
dc146f7c
VP
4624linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
4625{
4626 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 4627
dc146f7c
VP
4628 if (info)
4629 return info->core;
4630 return -1;
4631}
4632
f973ed9c
DJ
4633void
4634linux_nat_add_target (struct target_ops *t)
4635{
f973ed9c
DJ
4636 /* Save the provided single-threaded target. We save this in a separate
4637 variable because another target we've inherited from (e.g. inf-ptrace)
4638 may have saved a pointer to T; we want to use it for the final
4639 process stratum target. */
4640 linux_ops_saved = *t;
4641 linux_ops = &linux_ops_saved;
4642
4643 /* Override some methods for multithreading. */
b84876c2 4644 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
4645 t->to_attach = linux_nat_attach;
4646 t->to_detach = linux_nat_detach;
4647 t->to_resume = linux_nat_resume;
4648 t->to_wait = linux_nat_wait;
2455069d 4649 t->to_pass_signals = linux_nat_pass_signals;
f973ed9c
DJ
4650 t->to_xfer_partial = linux_nat_xfer_partial;
4651 t->to_kill = linux_nat_kill;
4652 t->to_mourn_inferior = linux_nat_mourn_inferior;
4653 t->to_thread_alive = linux_nat_thread_alive;
4654 t->to_pid_to_str = linux_nat_pid_to_str;
4694da01 4655 t->to_thread_name = linux_nat_thread_name;
f973ed9c 4656 t->to_has_thread_control = tc_schedlock;
c0694254 4657 t->to_thread_address_space = linux_nat_thread_address_space;
ebec9a0f
PA
4658 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
4659 t->to_stopped_data_address = linux_nat_stopped_data_address;
f973ed9c 4660
b84876c2
PA
4661 t->to_can_async_p = linux_nat_can_async_p;
4662 t->to_is_async_p = linux_nat_is_async_p;
9908b566 4663 t->to_supports_non_stop = linux_nat_supports_non_stop;
b84876c2 4664 t->to_async = linux_nat_async;
b84876c2
PA
4665 t->to_terminal_inferior = linux_nat_terminal_inferior;
4666 t->to_terminal_ours = linux_nat_terminal_ours;
6a3cb8e8
PA
4667
4668 super_close = t->to_close;
d90e17a7 4669 t->to_close = linux_nat_close;
b84876c2 4670
4c28f408
PA
4671 /* Methods for non-stop support. */
4672 t->to_stop = linux_nat_stop;
4673
d90e17a7
PA
4674 t->to_supports_multi_process = linux_nat_supports_multi_process;
4675
03583c20
UW
4676 t->to_supports_disable_randomization
4677 = linux_nat_supports_disable_randomization;
4678
dc146f7c
VP
4679 t->to_core_of_thread = linux_nat_core_of_thread;
4680
f973ed9c
DJ
4681 /* We don't change the stratum; this target will sit at
4682 process_stratum and thread_db will set at thread_stratum. This
4683 is a little strange, since this is a multi-threaded-capable
4684 target, but we want to be on the stack below thread_db, and we
4685 also want to be used for single-threaded processes. */
4686
4687 add_target (t);
f973ed9c
DJ
4688}
4689
9f0bdab8
DJ
4690/* Register a method to call whenever a new thread is attached. */
4691void
7b50312a
PA
4692linux_nat_set_new_thread (struct target_ops *t,
4693 void (*new_thread) (struct lwp_info *))
9f0bdab8
DJ
4694{
4695 /* Save the pointer. We only support a single registered instance
4696 of the GNU/Linux native target, so we do not need to map this to
4697 T. */
4698 linux_nat_new_thread = new_thread;
4699}
4700
26cb8b7c
PA
4701/* See declaration in linux-nat.h. */
4702
4703void
4704linux_nat_set_new_fork (struct target_ops *t,
4705 linux_nat_new_fork_ftype *new_fork)
4706{
4707 /* Save the pointer. */
4708 linux_nat_new_fork = new_fork;
4709}
4710
4711/* See declaration in linux-nat.h. */
4712
4713void
4714linux_nat_set_forget_process (struct target_ops *t,
4715 linux_nat_forget_process_ftype *fn)
4716{
4717 /* Save the pointer. */
4718 linux_nat_forget_process_hook = fn;
4719}
4720
4721/* See declaration in linux-nat.h. */
4722
4723void
4724linux_nat_forget_process (pid_t pid)
4725{
4726 if (linux_nat_forget_process_hook != NULL)
4727 linux_nat_forget_process_hook (pid);
4728}
4729
5b009018
PA
4730/* Register a method that converts a siginfo object between the layout
4731 that ptrace returns, and the layout in the architecture of the
4732 inferior. */
4733void
4734linux_nat_set_siginfo_fixup (struct target_ops *t,
a5362b9a 4735 int (*siginfo_fixup) (siginfo_t *,
5b009018
PA
4736 gdb_byte *,
4737 int))
4738{
4739 /* Save the pointer. */
4740 linux_nat_siginfo_fixup = siginfo_fixup;
4741}
4742
7b50312a
PA
4743/* Register a method to call prior to resuming a thread. */
4744
4745void
4746linux_nat_set_prepare_to_resume (struct target_ops *t,
4747 void (*prepare_to_resume) (struct lwp_info *))
4748{
4749 /* Save the pointer. */
4750 linux_nat_prepare_to_resume = prepare_to_resume;
4751}
4752
f865ee35
JK
4753/* See linux-nat.h. */
4754
4755int
4756linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
9f0bdab8 4757{
da559b09 4758 int pid;
9f0bdab8 4759
dfd4cc63 4760 pid = ptid_get_lwp (ptid);
da559b09 4761 if (pid == 0)
dfd4cc63 4762 pid = ptid_get_pid (ptid);
f865ee35 4763
da559b09
JK
4764 errno = 0;
4765 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
4766 if (errno != 0)
4767 {
4768 memset (siginfo, 0, sizeof (*siginfo));
4769 return 0;
4770 }
f865ee35 4771 return 1;
9f0bdab8
DJ
4772}
4773
2c0b251b
PA
4774/* Provide a prototype to silence -Wmissing-prototypes. */
4775extern initialize_file_ftype _initialize_linux_nat;
4776
d6b0e80f
AC
4777void
4778_initialize_linux_nat (void)
4779{
ccce17b0
YQ
4780 add_setshow_zuinteger_cmd ("lin-lwp", class_maintenance,
4781 &debug_linux_nat, _("\
b84876c2
PA
4782Set debugging of GNU/Linux lwp module."), _("\
4783Show debugging of GNU/Linux lwp module."), _("\
4784Enables printf debugging output."),
ccce17b0
YQ
4785 NULL,
4786 show_debug_linux_nat,
4787 &setdebuglist, &showdebuglist);
b84876c2 4788
b84876c2 4789 /* Save this mask as the default. */
d6b0e80f
AC
4790 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4791
7feb7d06
PA
4792 /* Install a SIGCHLD handler. */
4793 sigchld_action.sa_handler = sigchld_handler;
4794 sigemptyset (&sigchld_action.sa_mask);
4795 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
4796
4797 /* Make it the default. */
7feb7d06 4798 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
4799
4800 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4801 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4802 sigdelset (&suspend_mask, SIGCHLD);
4803
7feb7d06 4804 sigemptyset (&blocked_mask);
8009206a
TT
4805
4806 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to
4807 support read-only process state. */
4808 linux_ptrace_set_additional_flags (PTRACE_O_TRACESYSGOOD
4809 | PTRACE_O_TRACEVFORKDONE
4810 | PTRACE_O_TRACEVFORK
4811 | PTRACE_O_TRACEFORK
4812 | PTRACE_O_TRACEEXEC);
d6b0e80f
AC
4813}
4814\f
4815
4816/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4817 the GNU/Linux Threads library and therefore doesn't really belong
4818 here. */
4819
4820/* Read variable NAME in the target and return its value if found.
4821 Otherwise return zero. It is assumed that the type of the variable
4822 is `int'. */
4823
4824static int
4825get_signo (const char *name)
4826{
3b7344d5 4827 struct bound_minimal_symbol ms;
d6b0e80f
AC
4828 int signo;
4829
4830 ms = lookup_minimal_symbol (name, NULL, NULL);
3b7344d5 4831 if (ms.minsym == NULL)
d6b0e80f
AC
4832 return 0;
4833
77e371c0 4834 if (target_read_memory (BMSYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
4835 sizeof (signo)) != 0)
4836 return 0;
4837
4838 return signo;
4839}
4840
4841/* Return the set of signals used by the threads library in *SET. */
4842
4843void
4844lin_thread_get_thread_signals (sigset_t *set)
4845{
4846 struct sigaction action;
4847 int restart, cancel;
4848
b84876c2 4849 sigemptyset (&blocked_mask);
d6b0e80f
AC
4850 sigemptyset (set);
4851
4852 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
4853 cancel = get_signo ("__pthread_sig_cancel");
4854
4855 /* LinuxThreads normally uses the first two RT signals, but in some legacy
4856 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
4857 not provide any way for the debugger to query the signal numbers -
4858 fortunately they don't change! */
4859
d6b0e80f 4860 if (restart == 0)
17fbb0bd 4861 restart = __SIGRTMIN;
d6b0e80f 4862
d6b0e80f 4863 if (cancel == 0)
17fbb0bd 4864 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
4865
4866 sigaddset (set, restart);
4867 sigaddset (set, cancel);
4868
4869 /* The GNU/Linux Threads library makes terminating threads send a
4870 special "cancel" signal instead of SIGCHLD. Make sure we catch
4871 those (to prevent them from terminating GDB itself, which is
4872 likely to be their default action) and treat them the same way as
4873 SIGCHLD. */
4874
4875 action.sa_handler = sigchld_handler;
4876 sigemptyset (&action.sa_mask);
58aecb61 4877 action.sa_flags = SA_RESTART;
d6b0e80f
AC
4878 sigaction (cancel, &action, NULL);
4879
4880 /* We block the "cancel" signal throughout this code ... */
4881 sigaddset (&blocked_mask, cancel);
4882 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
4883
4884 /* ... except during a sigsuspend. */
4885 sigdelset (&suspend_mask, cancel);
4886}