]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/linux-nat.c
2009-10-19 Pedro Alves <pedro@codesourcery.com>
[thirdparty/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
0fb0cc75 3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
e26af52f 4 Free Software Foundation, Inc.
3993f6b1
DJ
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
20
21#include "defs.h"
22#include "inferior.h"
23#include "target.h"
d6b0e80f 24#include "gdb_string.h"
3993f6b1 25#include "gdb_wait.h"
d6b0e80f
AC
26#include "gdb_assert.h"
27#ifdef HAVE_TKILL_SYSCALL
28#include <unistd.h>
29#include <sys/syscall.h>
30#endif
3993f6b1 31#include <sys/ptrace.h>
0274a8ce 32#include "linux-nat.h"
ac264b3b 33#include "linux-fork.h"
d6b0e80f
AC
34#include "gdbthread.h"
35#include "gdbcmd.h"
36#include "regcache.h"
4f844a66 37#include "regset.h"
10d6c8cd
DJ
38#include "inf-ptrace.h"
39#include "auxv.h"
dba24537
AC
40#include <sys/param.h> /* for MAXPATHLEN */
41#include <sys/procfs.h> /* for elf_gregset etc. */
42#include "elf-bfd.h" /* for elfcore_write_* */
43#include "gregset.h" /* for gregset */
44#include "gdbcore.h" /* for get_exec_file */
45#include <ctype.h> /* for isdigit */
46#include "gdbthread.h" /* for struct thread_info etc. */
47#include "gdb_stat.h" /* for struct stat */
48#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
49#include "inf-loop.h"
50#include "event-loop.h"
51#include "event-top.h"
07e059b5
VP
52#include <pwd.h>
53#include <sys/types.h>
54#include "gdb_dirent.h"
55#include "xml-support.h"
191c4426 56#include "terminal.h"
efcbbd14 57#include <sys/vfs.h>
6c95b8df 58#include "solib.h"
efcbbd14
UW
59
60#ifndef SPUFS_MAGIC
61#define SPUFS_MAGIC 0x23c9b64e
62#endif
dba24537 63
10568435
JK
64#ifdef HAVE_PERSONALITY
65# include <sys/personality.h>
66# if !HAVE_DECL_ADDR_NO_RANDOMIZE
67# define ADDR_NO_RANDOMIZE 0x0040000
68# endif
69#endif /* HAVE_PERSONALITY */
70
8a77dff3
VP
71/* This comment documents high-level logic of this file.
72
73Waiting for events in sync mode
74===============================
75
76When waiting for an event in a specific thread, we just use waitpid, passing
77the specific pid, and not passing WNOHANG.
78
79When waiting for an event in all threads, waitpid is not quite good. Prior to
80version 2.4, Linux can either wait for event in main thread, or in secondary
81threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
82miss an event. The solution is to use non-blocking waitpid, together with
83sigsuspend. First, we use non-blocking waitpid to get an event in the main
84process, if any. Second, we use non-blocking waitpid with the __WCLONED
85flag to check for events in cloned processes. If nothing is found, we use
86sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
87happened to a child process -- and SIGCHLD will be delivered both for events
88in main debugged process and in cloned processes. As soon as we know there's
89an event, we get back to calling nonblocking waitpid with and without __WCLONED.
90
91Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
92so that we don't miss a signal. If SIGCHLD arrives in between, when it's
93blocked, the signal becomes pending and sigsuspend immediately
94notices it and returns.
95
96Waiting for events in async mode
97================================
98
7feb7d06
PA
99In async mode, GDB should always be ready to handle both user input
100and target events, so neither blocking waitpid nor sigsuspend are
101viable options. Instead, we should asynchronously notify the GDB main
102event loop whenever there's an unprocessed event from the target. We
103detect asynchronous target events by handling SIGCHLD signals. To
104notify the event loop about target events, the self-pipe trick is used
105--- a pipe is registered as waitable event source in the event loop,
106the event loop select/poll's on the read end of this pipe (as well on
107other event sources, e.g., stdin), and the SIGCHLD handler writes a
108byte to this pipe. This is more portable than relying on
109pselect/ppoll, since on kernels that lack those syscalls, libc
110emulates them with select/poll+sigprocmask, and that is racy
111(a.k.a. plain broken).
112
113Obviously, if we fail to notify the event loop if there's a target
114event, it's bad. OTOH, if we notify the event loop when there's no
115event from the target, linux_nat_wait will detect that there's no real
116event to report, and return event of type TARGET_WAITKIND_IGNORE.
117This is mostly harmless, but it will waste time and is better avoided.
118
119The main design point is that every time GDB is outside linux-nat.c,
120we have a SIGCHLD handler installed that is called when something
121happens to the target and notifies the GDB event loop. Whenever GDB
122core decides to handle the event, and calls into linux-nat.c, we
123process things as in sync mode, except that the we never block in
124sigsuspend.
125
126While processing an event, we may end up momentarily blocked in
127waitpid calls. Those waitpid calls, while blocking, are guarantied to
128return quickly. E.g., in all-stop mode, before reporting to the core
129that an LWP hit a breakpoint, all LWPs are stopped by sending them
130SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
131Note that this is different from blocking indefinitely waiting for the
132next event --- here, we're already handling an event.
8a77dff3
VP
133
134Use of signals
135==============
136
137We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
138signal is not entirely significant; we just need for a signal to be delivered,
139so that we can intercept it. SIGSTOP's advantage is that it can not be
140blocked. A disadvantage is that it is not a real-time signal, so it can only
141be queued once; we do not keep track of other sources of SIGSTOP.
142
143Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
144use them, because they have special behavior when the signal is generated -
145not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
146kills the entire thread group.
147
148A delivered SIGSTOP would stop the entire thread group, not just the thread we
149tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
150cancel it (by PTRACE_CONT without passing SIGSTOP).
151
152We could use a real-time signal instead. This would solve those problems; we
153could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
154But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
155generates it, and there are races with trying to find a signal that is not
156blocked. */
a0ef4274 157
dba24537
AC
158#ifndef O_LARGEFILE
159#define O_LARGEFILE 0
160#endif
0274a8ce 161
3993f6b1
DJ
162/* If the system headers did not provide the constants, hard-code the normal
163 values. */
164#ifndef PTRACE_EVENT_FORK
165
166#define PTRACE_SETOPTIONS 0x4200
167#define PTRACE_GETEVENTMSG 0x4201
168
169/* options set using PTRACE_SETOPTIONS */
170#define PTRACE_O_TRACESYSGOOD 0x00000001
171#define PTRACE_O_TRACEFORK 0x00000002
172#define PTRACE_O_TRACEVFORK 0x00000004
173#define PTRACE_O_TRACECLONE 0x00000008
174#define PTRACE_O_TRACEEXEC 0x00000010
9016a515
DJ
175#define PTRACE_O_TRACEVFORKDONE 0x00000020
176#define PTRACE_O_TRACEEXIT 0x00000040
3993f6b1
DJ
177
178/* Wait extended result codes for the above trace options. */
179#define PTRACE_EVENT_FORK 1
180#define PTRACE_EVENT_VFORK 2
181#define PTRACE_EVENT_CLONE 3
182#define PTRACE_EVENT_EXEC 4
c874c7fc 183#define PTRACE_EVENT_VFORK_DONE 5
9016a515 184#define PTRACE_EVENT_EXIT 6
3993f6b1
DJ
185
186#endif /* PTRACE_EVENT_FORK */
187
ca2163eb
PA
188/* Unlike other extended result codes, WSTOPSIG (status) on
189 PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
190 instead SIGTRAP with bit 7 set. */
191#define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
192
3993f6b1
DJ
193/* We can't always assume that this flag is available, but all systems
194 with the ptrace event handlers also have __WALL, so it's safe to use
195 here. */
196#ifndef __WALL
197#define __WALL 0x40000000 /* Wait for any child. */
198#endif
199
02d3ff8c 200#ifndef PTRACE_GETSIGINFO
1ef18d08
PA
201# define PTRACE_GETSIGINFO 0x4202
202# define PTRACE_SETSIGINFO 0x4203
02d3ff8c
UW
203#endif
204
10d6c8cd
DJ
205/* The single-threaded native GNU/Linux target_ops. We save a pointer for
206 the use of the multi-threaded target. */
207static struct target_ops *linux_ops;
f973ed9c 208static struct target_ops linux_ops_saved;
10d6c8cd 209
9f0bdab8
DJ
210/* The method to call, if any, when a new thread is attached. */
211static void (*linux_nat_new_thread) (ptid_t);
212
5b009018
PA
213/* The method to call, if any, when the siginfo object needs to be
214 converted between the layout returned by ptrace, and the layout in
215 the architecture of the inferior. */
216static int (*linux_nat_siginfo_fixup) (struct siginfo *,
217 gdb_byte *,
218 int);
219
ac264b3b
MS
220/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
221 Called by our to_xfer_partial. */
222static LONGEST (*super_xfer_partial) (struct target_ops *,
223 enum target_object,
224 const char *, gdb_byte *,
225 const gdb_byte *,
10d6c8cd
DJ
226 ULONGEST, LONGEST);
227
d6b0e80f 228static int debug_linux_nat;
920d2a44
AC
229static void
230show_debug_linux_nat (struct ui_file *file, int from_tty,
231 struct cmd_list_element *c, const char *value)
232{
233 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
234 value);
235}
d6b0e80f 236
b84876c2
PA
237static int debug_linux_nat_async = 0;
238static void
239show_debug_linux_nat_async (struct ui_file *file, int from_tty,
240 struct cmd_list_element *c, const char *value)
241{
242 fprintf_filtered (file, _("Debugging of GNU/Linux async lwp module is %s.\n"),
243 value);
244}
245
10568435
JK
246static int disable_randomization = 1;
247
248static void
249show_disable_randomization (struct ui_file *file, int from_tty,
250 struct cmd_list_element *c, const char *value)
251{
252#ifdef HAVE_PERSONALITY
253 fprintf_filtered (file, _("\
254Disabling randomization of debuggee's virtual address space is %s.\n"),
255 value);
256#else /* !HAVE_PERSONALITY */
257 fputs_filtered (_("\
258Disabling randomization of debuggee's virtual address space is unsupported on\n\
259this platform.\n"), file);
260#endif /* !HAVE_PERSONALITY */
261}
262
263static void
264set_disable_randomization (char *args, int from_tty, struct cmd_list_element *c)
265{
266#ifndef HAVE_PERSONALITY
267 error (_("\
268Disabling randomization of debuggee's virtual address space is unsupported on\n\
269this platform."));
270#endif /* !HAVE_PERSONALITY */
271}
272
9016a515
DJ
273static int linux_parent_pid;
274
ae087d01
DJ
275struct simple_pid_list
276{
277 int pid;
3d799a95 278 int status;
ae087d01
DJ
279 struct simple_pid_list *next;
280};
281struct simple_pid_list *stopped_pids;
282
3993f6b1
DJ
283/* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
284 can not be used, 1 if it can. */
285
286static int linux_supports_tracefork_flag = -1;
287
a96d9b2e
SDJ
288/* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACESYSGOOD
289 can not be used, 1 if it can. */
290
291static int linux_supports_tracesysgood_flag = -1;
292
9016a515
DJ
293/* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
294 PTRACE_O_TRACEVFORKDONE. */
295
296static int linux_supports_tracevforkdone_flag = -1;
297
b84876c2
PA
298/* Async mode support */
299
b84876c2
PA
300/* Zero if the async mode, although enabled, is masked, which means
301 linux_nat_wait should behave as if async mode was off. */
302static int linux_nat_async_mask_value = 1;
303
a96d9b2e
SDJ
304/* Stores the current used ptrace() options. */
305static int current_ptrace_options = 0;
306
b84876c2
PA
307/* The read/write ends of the pipe registered as waitable file in the
308 event loop. */
309static int linux_nat_event_pipe[2] = { -1, -1 };
310
7feb7d06 311/* Flush the event pipe. */
b84876c2 312
7feb7d06
PA
313static void
314async_file_flush (void)
b84876c2 315{
7feb7d06
PA
316 int ret;
317 char buf;
b84876c2 318
7feb7d06 319 do
b84876c2 320 {
7feb7d06 321 ret = read (linux_nat_event_pipe[0], &buf, 1);
b84876c2 322 }
7feb7d06 323 while (ret >= 0 || (ret == -1 && errno == EINTR));
b84876c2
PA
324}
325
7feb7d06
PA
326/* Put something (anything, doesn't matter what, or how much) in event
327 pipe, so that the select/poll in the event-loop realizes we have
328 something to process. */
252fbfc8 329
b84876c2 330static void
7feb7d06 331async_file_mark (void)
b84876c2 332{
7feb7d06 333 int ret;
b84876c2 334
7feb7d06
PA
335 /* It doesn't really matter what the pipe contains, as long we end
336 up with something in it. Might as well flush the previous
337 left-overs. */
338 async_file_flush ();
b84876c2 339
7feb7d06 340 do
b84876c2 341 {
7feb7d06 342 ret = write (linux_nat_event_pipe[1], "+", 1);
b84876c2 343 }
7feb7d06 344 while (ret == -1 && errno == EINTR);
b84876c2 345
7feb7d06
PA
346 /* Ignore EAGAIN. If the pipe is full, the event loop will already
347 be awakened anyway. */
b84876c2
PA
348}
349
7feb7d06
PA
350static void linux_nat_async (void (*callback)
351 (enum inferior_event_type event_type, void *context),
352 void *context);
353static int linux_nat_async_mask (int mask);
354static int kill_lwp (int lwpid, int signo);
355
356static int stop_callback (struct lwp_info *lp, void *data);
357
358static void block_child_signals (sigset_t *prev_mask);
359static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
360
361struct lwp_info;
362static struct lwp_info *add_lwp (ptid_t ptid);
363static void purge_lwp_list (int pid);
364static struct lwp_info *find_lwp_pid (ptid_t ptid);
365
ae087d01
DJ
366\f
367/* Trivial list manipulation functions to keep track of a list of
368 new stopped processes. */
369static void
3d799a95 370add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
371{
372 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
373 new_pid->pid = pid;
3d799a95 374 new_pid->status = status;
ae087d01
DJ
375 new_pid->next = *listp;
376 *listp = new_pid;
377}
378
379static int
3d799a95 380pull_pid_from_list (struct simple_pid_list **listp, int pid, int *status)
ae087d01
DJ
381{
382 struct simple_pid_list **p;
383
384 for (p = listp; *p != NULL; p = &(*p)->next)
385 if ((*p)->pid == pid)
386 {
387 struct simple_pid_list *next = (*p)->next;
3d799a95 388 *status = (*p)->status;
ae087d01
DJ
389 xfree (*p);
390 *p = next;
391 return 1;
392 }
393 return 0;
394}
395
3d799a95
DJ
396static void
397linux_record_stopped_pid (int pid, int status)
ae087d01 398{
3d799a95 399 add_to_pid_list (&stopped_pids, pid, status);
ae087d01
DJ
400}
401
3993f6b1
DJ
402\f
403/* A helper function for linux_test_for_tracefork, called after fork (). */
404
405static void
406linux_tracefork_child (void)
407{
408 int ret;
409
410 ptrace (PTRACE_TRACEME, 0, 0, 0);
411 kill (getpid (), SIGSTOP);
412 fork ();
48bb3cce 413 _exit (0);
3993f6b1
DJ
414}
415
7feb7d06 416/* Wrapper function for waitpid which handles EINTR. */
b957e937
DJ
417
418static int
419my_waitpid (int pid, int *status, int flags)
420{
421 int ret;
b84876c2 422
b957e937
DJ
423 do
424 {
425 ret = waitpid (pid, status, flags);
426 }
427 while (ret == -1 && errno == EINTR);
428
429 return ret;
430}
431
432/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
433
434 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
435 we know that the feature is not available. This may change the tracing
436 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
437
438 However, if it succeeds, we don't know for sure that the feature is
439 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
3993f6b1 440 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
b957e937
DJ
441 fork tracing, and let it fork. If the process exits, we assume that we
442 can't use TRACEFORK; if we get the fork notification, and we can extract
443 the new child's PID, then we assume that we can. */
3993f6b1
DJ
444
445static void
b957e937 446linux_test_for_tracefork (int original_pid)
3993f6b1
DJ
447{
448 int child_pid, ret, status;
449 long second_pid;
7feb7d06 450 sigset_t prev_mask;
4c28f408 451
7feb7d06
PA
452 /* We don't want those ptrace calls to be interrupted. */
453 block_child_signals (&prev_mask);
3993f6b1 454
b957e937
DJ
455 linux_supports_tracefork_flag = 0;
456 linux_supports_tracevforkdone_flag = 0;
457
458 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
459 if (ret != 0)
7feb7d06
PA
460 {
461 restore_child_signals_mask (&prev_mask);
462 return;
463 }
b957e937 464
3993f6b1
DJ
465 child_pid = fork ();
466 if (child_pid == -1)
e2e0b3e5 467 perror_with_name (("fork"));
3993f6b1
DJ
468
469 if (child_pid == 0)
470 linux_tracefork_child ();
471
b957e937 472 ret = my_waitpid (child_pid, &status, 0);
3993f6b1 473 if (ret == -1)
e2e0b3e5 474 perror_with_name (("waitpid"));
3993f6b1 475 else if (ret != child_pid)
8a3fe4f8 476 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
3993f6b1 477 if (! WIFSTOPPED (status))
8a3fe4f8 478 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
3993f6b1 479
3993f6b1
DJ
480 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
481 if (ret != 0)
482 {
b957e937
DJ
483 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
484 if (ret != 0)
485 {
8a3fe4f8 486 warning (_("linux_test_for_tracefork: failed to kill child"));
7feb7d06 487 restore_child_signals_mask (&prev_mask);
b957e937
DJ
488 return;
489 }
490
491 ret = my_waitpid (child_pid, &status, 0);
492 if (ret != child_pid)
8a3fe4f8 493 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
b957e937 494 else if (!WIFSIGNALED (status))
8a3fe4f8
AC
495 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
496 "killed child"), status);
b957e937 497
7feb7d06 498 restore_child_signals_mask (&prev_mask);
3993f6b1
DJ
499 return;
500 }
501
9016a515
DJ
502 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
503 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
504 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
505 linux_supports_tracevforkdone_flag = (ret == 0);
506
b957e937
DJ
507 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
508 if (ret != 0)
8a3fe4f8 509 warning (_("linux_test_for_tracefork: failed to resume child"));
b957e937
DJ
510
511 ret = my_waitpid (child_pid, &status, 0);
512
3993f6b1
DJ
513 if (ret == child_pid && WIFSTOPPED (status)
514 && status >> 16 == PTRACE_EVENT_FORK)
515 {
516 second_pid = 0;
517 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
518 if (ret == 0 && second_pid != 0)
519 {
520 int second_status;
521
522 linux_supports_tracefork_flag = 1;
b957e937
DJ
523 my_waitpid (second_pid, &second_status, 0);
524 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
525 if (ret != 0)
8a3fe4f8 526 warning (_("linux_test_for_tracefork: failed to kill second child"));
97725dc4 527 my_waitpid (second_pid, &status, 0);
3993f6b1
DJ
528 }
529 }
b957e937 530 else
8a3fe4f8
AC
531 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
532 "(%d, status 0x%x)"), ret, status);
3993f6b1 533
b957e937
DJ
534 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
535 if (ret != 0)
8a3fe4f8 536 warning (_("linux_test_for_tracefork: failed to kill child"));
b957e937 537 my_waitpid (child_pid, &status, 0);
4c28f408 538
7feb7d06 539 restore_child_signals_mask (&prev_mask);
3993f6b1
DJ
540}
541
a96d9b2e
SDJ
542/* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
543
544 We try to enable syscall tracing on ORIGINAL_PID. If this fails,
545 we know that the feature is not available. This may change the tracing
546 options for ORIGINAL_PID, but we'll be setting them shortly anyway. */
547
548static void
549linux_test_for_tracesysgood (int original_pid)
550{
551 int ret;
552 sigset_t prev_mask;
553
554 /* We don't want those ptrace calls to be interrupted. */
555 block_child_signals (&prev_mask);
556
557 linux_supports_tracesysgood_flag = 0;
558
559 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
560 if (ret != 0)
561 goto out;
562
563 linux_supports_tracesysgood_flag = 1;
564out:
565 restore_child_signals_mask (&prev_mask);
566}
567
568/* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
569 This function also sets linux_supports_tracesysgood_flag. */
570
571static int
572linux_supports_tracesysgood (int pid)
573{
574 if (linux_supports_tracesysgood_flag == -1)
575 linux_test_for_tracesysgood (pid);
576 return linux_supports_tracesysgood_flag;
577}
578
3993f6b1
DJ
579/* Return non-zero iff we have tracefork functionality available.
580 This function also sets linux_supports_tracefork_flag. */
581
582static int
b957e937 583linux_supports_tracefork (int pid)
3993f6b1
DJ
584{
585 if (linux_supports_tracefork_flag == -1)
b957e937 586 linux_test_for_tracefork (pid);
3993f6b1
DJ
587 return linux_supports_tracefork_flag;
588}
589
9016a515 590static int
b957e937 591linux_supports_tracevforkdone (int pid)
9016a515
DJ
592{
593 if (linux_supports_tracefork_flag == -1)
b957e937 594 linux_test_for_tracefork (pid);
9016a515
DJ
595 return linux_supports_tracevforkdone_flag;
596}
597
a96d9b2e
SDJ
598static void
599linux_enable_tracesysgood (ptid_t ptid)
600{
601 int pid = ptid_get_lwp (ptid);
602
603 if (pid == 0)
604 pid = ptid_get_pid (ptid);
605
606 if (linux_supports_tracesysgood (pid) == 0)
607 return;
608
609 current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
610
611 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
612}
613
3993f6b1 614\f
4de4c07c
DJ
615void
616linux_enable_event_reporting (ptid_t ptid)
617{
d3587048 618 int pid = ptid_get_lwp (ptid);
4de4c07c 619
d3587048
DJ
620 if (pid == 0)
621 pid = ptid_get_pid (ptid);
622
b957e937 623 if (! linux_supports_tracefork (pid))
4de4c07c
DJ
624 return;
625
a96d9b2e
SDJ
626 current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
627 | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
628
b957e937 629 if (linux_supports_tracevforkdone (pid))
a96d9b2e 630 current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
9016a515
DJ
631
632 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
633 read-only process state. */
4de4c07c 634
a96d9b2e 635 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
4de4c07c
DJ
636}
637
6d8fd2b7
UW
638static void
639linux_child_post_attach (int pid)
4de4c07c
DJ
640{
641 linux_enable_event_reporting (pid_to_ptid (pid));
0ec9a092 642 check_for_thread_db ();
a96d9b2e 643 linux_enable_tracesysgood (pid_to_ptid (pid));
4de4c07c
DJ
644}
645
10d6c8cd 646static void
4de4c07c
DJ
647linux_child_post_startup_inferior (ptid_t ptid)
648{
649 linux_enable_event_reporting (ptid);
0ec9a092 650 check_for_thread_db ();
a96d9b2e 651 linux_enable_tracesysgood (ptid);
4de4c07c
DJ
652}
653
6d8fd2b7
UW
654static int
655linux_child_follow_fork (struct target_ops *ops, int follow_child)
3993f6b1 656{
7feb7d06 657 sigset_t prev_mask;
9016a515 658 int has_vforked;
4de4c07c
DJ
659 int parent_pid, child_pid;
660
7feb7d06 661 block_child_signals (&prev_mask);
b84876c2 662
e58b0e63
PA
663 has_vforked = (inferior_thread ()->pending_follow.kind
664 == TARGET_WAITKIND_VFORKED);
665 parent_pid = ptid_get_lwp (inferior_ptid);
d3587048 666 if (parent_pid == 0)
e58b0e63
PA
667 parent_pid = ptid_get_pid (inferior_ptid);
668 child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid);
4de4c07c 669
2277426b
PA
670 if (!detach_fork)
671 linux_enable_event_reporting (pid_to_ptid (child_pid));
672
6c95b8df
PA
673 if (has_vforked
674 && !non_stop /* Non-stop always resumes both branches. */
675 && (!target_is_async_p () || sync_execution)
676 && !(follow_child || detach_fork || sched_multi))
677 {
678 /* The parent stays blocked inside the vfork syscall until the
679 child execs or exits. If we don't let the child run, then
680 the parent stays blocked. If we're telling the parent to run
681 in the foreground, the user will not be able to ctrl-c to get
682 back the terminal, effectively hanging the debug session. */
683 fprintf_filtered (gdb_stderr, _("\
684Can not resume the parent process over vfork in the foreground while \n\
685holding the child stopped. Try \"set detach-on-fork\" or \
686\"set schedule-multiple\".\n"));
687 return 1;
688 }
689
4de4c07c
DJ
690 if (! follow_child)
691 {
6c95b8df 692 struct lwp_info *child_lp = NULL;
4de4c07c 693
6c95b8df 694 /* We're already attached to the parent, by default. */
4de4c07c 695
ac264b3b
MS
696 /* Detach new forked process? */
697 if (detach_fork)
f75c00e4 698 {
6c95b8df
PA
699 /* Before detaching from the child, remove all breakpoints
700 from it. If we forked, then this has already been taken
701 care of by infrun.c. If we vforked however, any
702 breakpoint inserted in the parent is visible in the
703 child, even those added while stopped in a vfork
704 catchpoint. This will remove the breakpoints from the
705 parent also, but they'll be reinserted below. */
706 if (has_vforked)
707 {
708 /* keep breakpoints list in sync. */
709 remove_breakpoints_pid (GET_PID (inferior_ptid));
710 }
711
e85a822c 712 if (info_verbose || debug_linux_nat)
ac264b3b
MS
713 {
714 target_terminal_ours ();
715 fprintf_filtered (gdb_stdlog,
716 "Detaching after fork from child process %d.\n",
717 child_pid);
718 }
4de4c07c 719
ac264b3b
MS
720 ptrace (PTRACE_DETACH, child_pid, 0, 0);
721 }
722 else
723 {
77435e4c 724 struct inferior *parent_inf, *child_inf;
2277426b 725 struct cleanup *old_chain;
7f9f62ba
PA
726
727 /* Add process to GDB's tables. */
77435e4c
PA
728 child_inf = add_inferior (child_pid);
729
e58b0e63 730 parent_inf = current_inferior ();
77435e4c 731 child_inf->attach_flag = parent_inf->attach_flag;
191c4426 732 copy_terminal_info (child_inf, parent_inf);
7f9f62ba 733
2277426b 734 old_chain = save_inferior_ptid ();
6c95b8df 735 save_current_program_space ();
2277426b
PA
736
737 inferior_ptid = ptid_build (child_pid, child_pid, 0);
738 add_thread (inferior_ptid);
6c95b8df
PA
739 child_lp = add_lwp (inferior_ptid);
740 child_lp->stopped = 1;
741 child_lp->resumed = 1;
2277426b 742
6c95b8df
PA
743 /* If this is a vfork child, then the address-space is
744 shared with the parent. */
745 if (has_vforked)
746 {
747 child_inf->pspace = parent_inf->pspace;
748 child_inf->aspace = parent_inf->aspace;
749
750 /* The parent will be frozen until the child is done
751 with the shared region. Keep track of the
752 parent. */
753 child_inf->vfork_parent = parent_inf;
754 child_inf->pending_detach = 0;
755 parent_inf->vfork_child = child_inf;
756 parent_inf->pending_detach = 0;
757 }
758 else
759 {
760 child_inf->aspace = new_address_space ();
761 child_inf->pspace = add_program_space (child_inf->aspace);
762 child_inf->removable = 1;
763 set_current_program_space (child_inf->pspace);
764 clone_program_space (child_inf->pspace, parent_inf->pspace);
765
766 /* Let the shared library layer (solib-svr4) learn about
767 this new process, relocate the cloned exec, pull in
768 shared libraries, and install the solib event
769 breakpoint. If a "cloned-VM" event was propagated
770 better throughout the core, this wouldn't be
771 required. */
772 solib_create_inferior_hook ();
773 }
774
775 /* Let the thread_db layer learn about this new process. */
2277426b
PA
776 check_for_thread_db ();
777
778 do_cleanups (old_chain);
ac264b3b 779 }
9016a515
DJ
780
781 if (has_vforked)
782 {
6c95b8df
PA
783 struct lwp_info *lp;
784 struct inferior *parent_inf;
785
786 parent_inf = current_inferior ();
787
788 /* If we detached from the child, then we have to be careful
789 to not insert breakpoints in the parent until the child
790 is done with the shared memory region. However, if we're
791 staying attached to the child, then we can and should
792 insert breakpoints, so that we can debug it. A
793 subsequent child exec or exit is enough to know when does
794 the child stops using the parent's address space. */
795 parent_inf->waiting_for_vfork_done = detach_fork;
796
797 lp = find_lwp_pid (pid_to_ptid (parent_pid));
b957e937
DJ
798 gdb_assert (linux_supports_tracefork_flag >= 0);
799 if (linux_supports_tracevforkdone (0))
9016a515 800 {
6c95b8df
PA
801 if (debug_linux_nat)
802 fprintf_unfiltered (gdb_stdlog,
803 "LCFF: waiting for VFORK_DONE on %d\n",
804 parent_pid);
805
806 lp->stopped = 1;
807 lp->resumed = 1;
9016a515 808
6c95b8df
PA
809 /* We'll handle the VFORK_DONE event like any other
810 event, in target_wait. */
9016a515
DJ
811 }
812 else
813 {
814 /* We can't insert breakpoints until the child has
815 finished with the shared memory region. We need to
816 wait until that happens. Ideal would be to just
817 call:
818 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
819 - waitpid (parent_pid, &status, __WALL);
820 However, most architectures can't handle a syscall
821 being traced on the way out if it wasn't traced on
822 the way in.
823
824 We might also think to loop, continuing the child
825 until it exits or gets a SIGTRAP. One problem is
826 that the child might call ptrace with PTRACE_TRACEME.
827
828 There's no simple and reliable way to figure out when
829 the vforked child will be done with its copy of the
830 shared memory. We could step it out of the syscall,
831 two instructions, let it go, and then single-step the
832 parent once. When we have hardware single-step, this
833 would work; with software single-step it could still
834 be made to work but we'd have to be able to insert
835 single-step breakpoints in the child, and we'd have
836 to insert -just- the single-step breakpoint in the
837 parent. Very awkward.
838
839 In the end, the best we can do is to make sure it
840 runs for a little while. Hopefully it will be out of
841 range of any breakpoints we reinsert. Usually this
842 is only the single-step breakpoint at vfork's return
843 point. */
844
6c95b8df
PA
845 if (debug_linux_nat)
846 fprintf_unfiltered (gdb_stdlog,
847 "LCFF: no VFORK_DONE support, sleeping a bit\n");
848
9016a515 849 usleep (10000);
9016a515 850
6c95b8df
PA
851 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
852 and leave it pending. The next linux_nat_resume call
853 will notice a pending event, and bypasses actually
854 resuming the inferior. */
855 lp->status = 0;
856 lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
857 lp->stopped = 0;
858 lp->resumed = 1;
859
860 /* If we're in async mode, need to tell the event loop
861 there's something here to process. */
862 if (target_can_async_p ())
863 async_file_mark ();
864 }
9016a515 865 }
4de4c07c 866 }
3993f6b1 867 else
4de4c07c 868 {
4e1c45ea 869 struct thread_info *tp;
77435e4c 870 struct inferior *parent_inf, *child_inf;
2277426b 871 struct lwp_info *lp;
6c95b8df 872 struct program_space *parent_pspace;
4de4c07c 873
e85a822c 874 if (info_verbose || debug_linux_nat)
f75c00e4
DJ
875 {
876 target_terminal_ours ();
6c95b8df
PA
877 if (has_vforked)
878 fprintf_filtered (gdb_stdlog, _("\
879Attaching after process %d vfork to child process %d.\n"),
880 parent_pid, child_pid);
881 else
882 fprintf_filtered (gdb_stdlog, _("\
883Attaching after process %d fork to child process %d.\n"),
884 parent_pid, child_pid);
f75c00e4 885 }
4de4c07c 886
7a7d3353
PA
887 /* Add the new inferior first, so that the target_detach below
888 doesn't unpush the target. */
889
77435e4c
PA
890 child_inf = add_inferior (child_pid);
891
e58b0e63 892 parent_inf = current_inferior ();
77435e4c 893 child_inf->attach_flag = parent_inf->attach_flag;
191c4426 894 copy_terminal_info (child_inf, parent_inf);
7a7d3353 895
6c95b8df 896 parent_pspace = parent_inf->pspace;
9016a515 897
6c95b8df
PA
898 /* If we're vforking, we want to hold on to the parent until the
899 child exits or execs. At child exec or exit time we can
900 remove the old breakpoints from the parent and detach or
901 resume debugging it. Otherwise, detach the parent now; we'll
902 want to reuse it's program/address spaces, but we can't set
903 them to the child before removing breakpoints from the
904 parent, otherwise, the breakpoints module could decide to
905 remove breakpoints from the wrong process (since they'd be
906 assigned to the same address space). */
9016a515
DJ
907
908 if (has_vforked)
7f9f62ba 909 {
6c95b8df
PA
910 gdb_assert (child_inf->vfork_parent == NULL);
911 gdb_assert (parent_inf->vfork_child == NULL);
912 child_inf->vfork_parent = parent_inf;
913 child_inf->pending_detach = 0;
914 parent_inf->vfork_child = child_inf;
915 parent_inf->pending_detach = detach_fork;
916 parent_inf->waiting_for_vfork_done = 0;
ac264b3b 917 }
2277426b 918 else if (detach_fork)
b84876c2 919 target_detach (NULL, 0);
4de4c07c 920
6c95b8df
PA
921 /* Note that the detach above makes PARENT_INF dangling. */
922
923 /* Add the child thread to the appropriate lists, and switch to
924 this new thread, before cloning the program space, and
925 informing the solib layer about this new process. */
926
9f0bdab8 927 inferior_ptid = ptid_build (child_pid, child_pid, 0);
2277426b
PA
928 add_thread (inferior_ptid);
929 lp = add_lwp (inferior_ptid);
930 lp->stopped = 1;
6c95b8df
PA
931 lp->resumed = 1;
932
933 /* If this is a vfork child, then the address-space is shared
934 with the parent. If we detached from the parent, then we can
935 reuse the parent's program/address spaces. */
936 if (has_vforked || detach_fork)
937 {
938 child_inf->pspace = parent_pspace;
939 child_inf->aspace = child_inf->pspace->aspace;
940 }
941 else
942 {
943 child_inf->aspace = new_address_space ();
944 child_inf->pspace = add_program_space (child_inf->aspace);
945 child_inf->removable = 1;
946 set_current_program_space (child_inf->pspace);
947 clone_program_space (child_inf->pspace, parent_pspace);
948
949 /* Let the shared library layer (solib-svr4) learn about
950 this new process, relocate the cloned exec, pull in
951 shared libraries, and install the solib event breakpoint.
952 If a "cloned-VM" event was propagated better throughout
953 the core, this wouldn't be required. */
954 solib_create_inferior_hook ();
955 }
ac264b3b 956
6c95b8df 957 /* Let the thread_db layer learn about this new process. */
ef29ce1a 958 check_for_thread_db ();
4de4c07c
DJ
959 }
960
7feb7d06 961 restore_child_signals_mask (&prev_mask);
4de4c07c
DJ
962 return 0;
963}
964
4de4c07c 965\f
6d8fd2b7
UW
966static void
967linux_child_insert_fork_catchpoint (int pid)
4de4c07c 968{
b957e937 969 if (! linux_supports_tracefork (pid))
8a3fe4f8 970 error (_("Your system does not support fork catchpoints."));
3993f6b1
DJ
971}
972
6d8fd2b7
UW
973static void
974linux_child_insert_vfork_catchpoint (int pid)
3993f6b1 975{
b957e937 976 if (!linux_supports_tracefork (pid))
8a3fe4f8 977 error (_("Your system does not support vfork catchpoints."));
3993f6b1
DJ
978}
979
6d8fd2b7
UW
980static void
981linux_child_insert_exec_catchpoint (int pid)
3993f6b1 982{
b957e937 983 if (!linux_supports_tracefork (pid))
8a3fe4f8 984 error (_("Your system does not support exec catchpoints."));
3993f6b1
DJ
985}
986
a96d9b2e
SDJ
987static int
988linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
989 int table_size, int *table)
990{
991 if (! linux_supports_tracesysgood (pid))
992 error (_("Your system does not support syscall catchpoints."));
993 /* On GNU/Linux, we ignore the arguments. It means that we only
994 enable the syscall catchpoints, but do not disable them.
995
996 Also, we do not use the `table' information because we do not
997 filter system calls here. We let GDB do the logic for us. */
998 return 0;
999}
1000
d6b0e80f
AC
1001/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
1002 are processes sharing the same VM space. A multi-threaded process
1003 is basically a group of such processes. However, such a grouping
1004 is almost entirely a user-space issue; the kernel doesn't enforce
1005 such a grouping at all (this might change in the future). In
1006 general, we'll rely on the threads library (i.e. the GNU/Linux
1007 Threads library) to provide such a grouping.
1008
1009 It is perfectly well possible to write a multi-threaded application
1010 without the assistance of a threads library, by using the clone
1011 system call directly. This module should be able to give some
1012 rudimentary support for debugging such applications if developers
1013 specify the CLONE_PTRACE flag in the clone system call, and are
1014 using the Linux kernel 2.4 or above.
1015
1016 Note that there are some peculiarities in GNU/Linux that affect
1017 this code:
1018
1019 - In general one should specify the __WCLONE flag to waitpid in
1020 order to make it report events for any of the cloned processes
1021 (and leave it out for the initial process). However, if a cloned
1022 process has exited the exit status is only reported if the
1023 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
1024 we cannot use it since GDB must work on older systems too.
1025
1026 - When a traced, cloned process exits and is waited for by the
1027 debugger, the kernel reassigns it to the original parent and
1028 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
1029 library doesn't notice this, which leads to the "zombie problem":
1030 When debugged a multi-threaded process that spawns a lot of
1031 threads will run out of processes, even if the threads exit,
1032 because the "zombies" stay around. */
1033
1034/* List of known LWPs. */
9f0bdab8 1035struct lwp_info *lwp_list;
d6b0e80f
AC
1036\f
1037
d6b0e80f
AC
1038/* Original signal mask. */
1039static sigset_t normal_mask;
1040
1041/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
1042 _initialize_linux_nat. */
1043static sigset_t suspend_mask;
1044
7feb7d06
PA
1045/* Signals to block to make that sigsuspend work. */
1046static sigset_t blocked_mask;
1047
1048/* SIGCHLD action. */
1049struct sigaction sigchld_action;
b84876c2 1050
7feb7d06
PA
1051/* Block child signals (SIGCHLD and linux threads signals), and store
1052 the previous mask in PREV_MASK. */
84e46146 1053
7feb7d06
PA
1054static void
1055block_child_signals (sigset_t *prev_mask)
1056{
1057 /* Make sure SIGCHLD is blocked. */
1058 if (!sigismember (&blocked_mask, SIGCHLD))
1059 sigaddset (&blocked_mask, SIGCHLD);
1060
1061 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
1062}
1063
1064/* Restore child signals mask, previously returned by
1065 block_child_signals. */
1066
1067static void
1068restore_child_signals_mask (sigset_t *prev_mask)
1069{
1070 sigprocmask (SIG_SETMASK, prev_mask, NULL);
1071}
d6b0e80f
AC
1072\f
1073
1074/* Prototypes for local functions. */
1075static int stop_wait_callback (struct lwp_info *lp, void *data);
28439f5e 1076static int linux_thread_alive (ptid_t ptid);
6d8fd2b7 1077static char *linux_child_pid_to_exec_file (int pid);
710151dd
PA
1078static int cancel_breakpoint (struct lwp_info *lp);
1079
d6b0e80f
AC
1080\f
1081/* Convert wait status STATUS to a string. Used for printing debug
1082 messages only. */
1083
1084static char *
1085status_to_str (int status)
1086{
1087 static char buf[64];
1088
1089 if (WIFSTOPPED (status))
206aa767 1090 {
ca2163eb 1091 if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
206aa767
DE
1092 snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
1093 strsignal (SIGTRAP));
1094 else
1095 snprintf (buf, sizeof (buf), "%s (stopped)",
1096 strsignal (WSTOPSIG (status)));
1097 }
d6b0e80f
AC
1098 else if (WIFSIGNALED (status))
1099 snprintf (buf, sizeof (buf), "%s (terminated)",
1100 strsignal (WSTOPSIG (status)));
1101 else
1102 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1103
1104 return buf;
1105}
1106
1107/* Initialize the list of LWPs. Note that this module, contrary to
1108 what GDB's generic threads layer does for its thread list,
1109 re-initializes the LWP lists whenever we mourn or detach (which
1110 doesn't involve mourning) the inferior. */
1111
1112static void
1113init_lwp_list (void)
1114{
1115 struct lwp_info *lp, *lpnext;
1116
1117 for (lp = lwp_list; lp; lp = lpnext)
1118 {
1119 lpnext = lp->next;
1120 xfree (lp);
1121 }
1122
1123 lwp_list = NULL;
d90e17a7
PA
1124}
1125
1126/* Remove all LWPs belong to PID from the lwp list. */
1127
1128static void
1129purge_lwp_list (int pid)
1130{
1131 struct lwp_info *lp, *lpprev, *lpnext;
1132
1133 lpprev = NULL;
1134
1135 for (lp = lwp_list; lp; lp = lpnext)
1136 {
1137 lpnext = lp->next;
1138
1139 if (ptid_get_pid (lp->ptid) == pid)
1140 {
1141 if (lp == lwp_list)
1142 lwp_list = lp->next;
1143 else
1144 lpprev->next = lp->next;
1145
1146 xfree (lp);
1147 }
1148 else
1149 lpprev = lp;
1150 }
1151}
1152
1153/* Return the number of known LWPs in the tgid given by PID. */
1154
1155static int
1156num_lwps (int pid)
1157{
1158 int count = 0;
1159 struct lwp_info *lp;
1160
1161 for (lp = lwp_list; lp; lp = lp->next)
1162 if (ptid_get_pid (lp->ptid) == pid)
1163 count++;
1164
1165 return count;
d6b0e80f
AC
1166}
1167
f973ed9c 1168/* Add the LWP specified by PID to the list. Return a pointer to the
9f0bdab8
DJ
1169 structure describing the new LWP. The LWP should already be stopped
1170 (with an exception for the very first LWP). */
d6b0e80f
AC
1171
1172static struct lwp_info *
1173add_lwp (ptid_t ptid)
1174{
1175 struct lwp_info *lp;
1176
1177 gdb_assert (is_lwp (ptid));
1178
1179 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1180
1181 memset (lp, 0, sizeof (struct lwp_info));
1182
1183 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1184
1185 lp->ptid = ptid;
1186
1187 lp->next = lwp_list;
1188 lwp_list = lp;
d6b0e80f 1189
d90e17a7 1190 if (num_lwps (GET_PID (ptid)) > 1 && linux_nat_new_thread != NULL)
9f0bdab8
DJ
1191 linux_nat_new_thread (ptid);
1192
d6b0e80f
AC
1193 return lp;
1194}
1195
1196/* Remove the LWP specified by PID from the list. */
1197
1198static void
1199delete_lwp (ptid_t ptid)
1200{
1201 struct lwp_info *lp, *lpprev;
1202
1203 lpprev = NULL;
1204
1205 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1206 if (ptid_equal (lp->ptid, ptid))
1207 break;
1208
1209 if (!lp)
1210 return;
1211
d6b0e80f
AC
1212 if (lpprev)
1213 lpprev->next = lp->next;
1214 else
1215 lwp_list = lp->next;
1216
1217 xfree (lp);
1218}
1219
1220/* Return a pointer to the structure describing the LWP corresponding
1221 to PID. If no corresponding LWP could be found, return NULL. */
1222
1223static struct lwp_info *
1224find_lwp_pid (ptid_t ptid)
1225{
1226 struct lwp_info *lp;
1227 int lwp;
1228
1229 if (is_lwp (ptid))
1230 lwp = GET_LWP (ptid);
1231 else
1232 lwp = GET_PID (ptid);
1233
1234 for (lp = lwp_list; lp; lp = lp->next)
1235 if (lwp == GET_LWP (lp->ptid))
1236 return lp;
1237
1238 return NULL;
1239}
1240
d90e17a7
PA
1241/* Returns true if PTID matches filter FILTER. FILTER can be the wild
1242 card MINUS_ONE_PTID (all ptid match it); can be a ptid representing
1243 a process (ptid_is_pid returns true), in which case, all lwps of
1244 that give process match, lwps of other process do not; or, it can
1245 represent a specific thread, in which case, only that thread will
1246 match true. PTID must represent an LWP, it can never be a wild
1247 card. */
1248
1249static int
1250ptid_match (ptid_t ptid, ptid_t filter)
1251{
1252 /* Since both parameters have the same type, prevent easy mistakes
1253 from happening. */
1254 gdb_assert (!ptid_equal (ptid, minus_one_ptid)
1255 && !ptid_equal (ptid, null_ptid));
1256
1257 if (ptid_equal (filter, minus_one_ptid))
1258 return 1;
1259 if (ptid_is_pid (filter)
1260 && ptid_get_pid (ptid) == ptid_get_pid (filter))
1261 return 1;
1262 else if (ptid_equal (ptid, filter))
1263 return 1;
1264
1265 return 0;
1266}
1267
d6b0e80f
AC
1268/* Call CALLBACK with its second argument set to DATA for every LWP in
1269 the list. If CALLBACK returns 1 for a particular LWP, return a
1270 pointer to the structure describing that LWP immediately.
1271 Otherwise return NULL. */
1272
1273struct lwp_info *
d90e17a7
PA
1274iterate_over_lwps (ptid_t filter,
1275 int (*callback) (struct lwp_info *, void *),
1276 void *data)
d6b0e80f
AC
1277{
1278 struct lwp_info *lp, *lpnext;
1279
1280 for (lp = lwp_list; lp; lp = lpnext)
1281 {
1282 lpnext = lp->next;
d90e17a7
PA
1283
1284 if (ptid_match (lp->ptid, filter))
1285 {
1286 if ((*callback) (lp, data))
1287 return lp;
1288 }
d6b0e80f
AC
1289 }
1290
1291 return NULL;
1292}
1293
2277426b
PA
1294/* Update our internal state when changing from one checkpoint to
1295 another indicated by NEW_PTID. We can only switch single-threaded
1296 applications, so we only create one new LWP, and the previous list
1297 is discarded. */
f973ed9c
DJ
1298
1299void
1300linux_nat_switch_fork (ptid_t new_ptid)
1301{
1302 struct lwp_info *lp;
1303
2277426b
PA
1304 purge_lwp_list (GET_PID (inferior_ptid));
1305
f973ed9c
DJ
1306 lp = add_lwp (new_ptid);
1307 lp->stopped = 1;
e26af52f 1308
2277426b
PA
1309 /* This changes the thread's ptid while preserving the gdb thread
1310 num. Also changes the inferior pid, while preserving the
1311 inferior num. */
1312 thread_change_ptid (inferior_ptid, new_ptid);
1313
1314 /* We've just told GDB core that the thread changed target id, but,
1315 in fact, it really is a different thread, with different register
1316 contents. */
1317 registers_changed ();
e26af52f
DJ
1318}
1319
e26af52f
DJ
1320/* Handle the exit of a single thread LP. */
1321
1322static void
1323exit_lwp (struct lwp_info *lp)
1324{
e09875d4 1325 struct thread_info *th = find_thread_ptid (lp->ptid);
063bfe2e
VP
1326
1327 if (th)
e26af52f 1328 {
17faa917
DJ
1329 if (print_thread_events)
1330 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1331
4f8d22e3 1332 delete_thread (lp->ptid);
e26af52f
DJ
1333 }
1334
1335 delete_lwp (lp->ptid);
1336}
1337
4d062f1a
PA
1338/* Return an lwp's tgid, found in `/proc/PID/status'. */
1339
1340int
1341linux_proc_get_tgid (int lwpid)
1342{
1343 FILE *status_file;
1344 char buf[100];
1345 int tgid = -1;
1346
1347 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) lwpid);
1348 status_file = fopen (buf, "r");
1349 if (status_file != NULL)
1350 {
1351 while (fgets (buf, sizeof (buf), status_file))
1352 {
1353 if (strncmp (buf, "Tgid:", 5) == 0)
1354 {
1355 tgid = strtoul (buf + strlen ("Tgid:"), NULL, 10);
1356 break;
1357 }
1358 }
1359
1360 fclose (status_file);
1361 }
1362
1363 return tgid;
1364}
1365
a0ef4274
DJ
1366/* Detect `T (stopped)' in `/proc/PID/status'.
1367 Other states including `T (tracing stop)' are reported as false. */
1368
1369static int
1370pid_is_stopped (pid_t pid)
1371{
1372 FILE *status_file;
1373 char buf[100];
1374 int retval = 0;
1375
1376 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid);
1377 status_file = fopen (buf, "r");
1378 if (status_file != NULL)
1379 {
1380 int have_state = 0;
1381
1382 while (fgets (buf, sizeof (buf), status_file))
1383 {
1384 if (strncmp (buf, "State:", 6) == 0)
1385 {
1386 have_state = 1;
1387 break;
1388 }
1389 }
1390 if (have_state && strstr (buf, "T (stopped)") != NULL)
1391 retval = 1;
1392 fclose (status_file);
1393 }
1394 return retval;
1395}
1396
1397/* Wait for the LWP specified by LP, which we have just attached to.
1398 Returns a wait status for that LWP, to cache. */
1399
1400static int
1401linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1402 int *signalled)
1403{
1404 pid_t new_pid, pid = GET_LWP (ptid);
1405 int status;
1406
1407 if (pid_is_stopped (pid))
1408 {
1409 if (debug_linux_nat)
1410 fprintf_unfiltered (gdb_stdlog,
1411 "LNPAW: Attaching to a stopped process\n");
1412
1413 /* The process is definitely stopped. It is in a job control
1414 stop, unless the kernel predates the TASK_STOPPED /
1415 TASK_TRACED distinction, in which case it might be in a
1416 ptrace stop. Make sure it is in a ptrace stop; from there we
1417 can kill it, signal it, et cetera.
1418
1419 First make sure there is a pending SIGSTOP. Since we are
1420 already attached, the process can not transition from stopped
1421 to running without a PTRACE_CONT; so we know this signal will
1422 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1423 probably already in the queue (unless this kernel is old
1424 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1425 is not an RT signal, it can only be queued once. */
1426 kill_lwp (pid, SIGSTOP);
1427
1428 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1429 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1430 ptrace (PTRACE_CONT, pid, 0, 0);
1431 }
1432
1433 /* Make sure the initial process is stopped. The user-level threads
1434 layer might want to poke around in the inferior, and that won't
1435 work if things haven't stabilized yet. */
1436 new_pid = my_waitpid (pid, &status, 0);
1437 if (new_pid == -1 && errno == ECHILD)
1438 {
1439 if (first)
1440 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1441
1442 /* Try again with __WCLONE to check cloned processes. */
1443 new_pid = my_waitpid (pid, &status, __WCLONE);
1444 *cloned = 1;
1445 }
1446
dacc9cb2
PP
1447 gdb_assert (pid == new_pid);
1448
1449 if (!WIFSTOPPED (status))
1450 {
1451 /* The pid we tried to attach has apparently just exited. */
1452 if (debug_linux_nat)
1453 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1454 pid, status_to_str (status));
1455 return status;
1456 }
a0ef4274
DJ
1457
1458 if (WSTOPSIG (status) != SIGSTOP)
1459 {
1460 *signalled = 1;
1461 if (debug_linux_nat)
1462 fprintf_unfiltered (gdb_stdlog,
1463 "LNPAW: Received %s after attaching\n",
1464 status_to_str (status));
1465 }
1466
1467 return status;
1468}
1469
1470/* Attach to the LWP specified by PID. Return 0 if successful or -1
1471 if the new LWP could not be attached. */
d6b0e80f 1472
9ee57c33 1473int
93815fbf 1474lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 1475{
9ee57c33 1476 struct lwp_info *lp;
7feb7d06 1477 sigset_t prev_mask;
d6b0e80f
AC
1478
1479 gdb_assert (is_lwp (ptid));
1480
7feb7d06 1481 block_child_signals (&prev_mask);
d6b0e80f 1482
9ee57c33 1483 lp = find_lwp_pid (ptid);
d6b0e80f
AC
1484
1485 /* We assume that we're already attached to any LWP that has an id
1486 equal to the overall process id, and to any LWP that is already
1487 in our list of LWPs. If we're not seeing exit events from threads
1488 and we've had PID wraparound since we last tried to stop all threads,
1489 this assumption might be wrong; fortunately, this is very unlikely
1490 to happen. */
9ee57c33 1491 if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
d6b0e80f 1492 {
a0ef4274 1493 int status, cloned = 0, signalled = 0;
d6b0e80f
AC
1494
1495 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
9ee57c33
DJ
1496 {
1497 /* If we fail to attach to the thread, issue a warning,
1498 but continue. One way this can happen is if thread
e9efe249 1499 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1500 bug may place threads in the thread list and then fail
1501 to create them. */
1502 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1503 safe_strerror (errno));
7feb7d06 1504 restore_child_signals_mask (&prev_mask);
9ee57c33
DJ
1505 return -1;
1506 }
1507
d6b0e80f
AC
1508 if (debug_linux_nat)
1509 fprintf_unfiltered (gdb_stdlog,
1510 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1511 target_pid_to_str (ptid));
1512
a0ef4274 1513 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
dacc9cb2
PP
1514 if (!WIFSTOPPED (status))
1515 return -1;
1516
a0ef4274
DJ
1517 lp = add_lwp (ptid);
1518 lp->stopped = 1;
1519 lp->cloned = cloned;
1520 lp->signalled = signalled;
1521 if (WSTOPSIG (status) != SIGSTOP)
d6b0e80f 1522 {
a0ef4274
DJ
1523 lp->resumed = 1;
1524 lp->status = status;
d6b0e80f
AC
1525 }
1526
a0ef4274 1527 target_post_attach (GET_LWP (lp->ptid));
d6b0e80f
AC
1528
1529 if (debug_linux_nat)
1530 {
1531 fprintf_unfiltered (gdb_stdlog,
1532 "LLAL: waitpid %s received %s\n",
1533 target_pid_to_str (ptid),
1534 status_to_str (status));
1535 }
1536 }
1537 else
1538 {
1539 /* We assume that the LWP representing the original process is
1540 already stopped. Mark it as stopped in the data structure
155bd5d1
AC
1541 that the GNU/linux ptrace layer uses to keep track of
1542 threads. Note that this won't have already been done since
1543 the main thread will have, we assume, been stopped by an
1544 attach from a different layer. */
9ee57c33
DJ
1545 if (lp == NULL)
1546 lp = add_lwp (ptid);
d6b0e80f
AC
1547 lp->stopped = 1;
1548 }
9ee57c33 1549
7feb7d06 1550 restore_child_signals_mask (&prev_mask);
9ee57c33 1551 return 0;
d6b0e80f
AC
1552}
1553
b84876c2 1554static void
136d6dae
VP
1555linux_nat_create_inferior (struct target_ops *ops,
1556 char *exec_file, char *allargs, char **env,
b84876c2
PA
1557 int from_tty)
1558{
10568435
JK
1559#ifdef HAVE_PERSONALITY
1560 int personality_orig = 0, personality_set = 0;
1561#endif /* HAVE_PERSONALITY */
b84876c2
PA
1562
1563 /* The fork_child mechanism is synchronous and calls target_wait, so
1564 we have to mask the async mode. */
1565
10568435
JK
1566#ifdef HAVE_PERSONALITY
1567 if (disable_randomization)
1568 {
1569 errno = 0;
1570 personality_orig = personality (0xffffffff);
1571 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1572 {
1573 personality_set = 1;
1574 personality (personality_orig | ADDR_NO_RANDOMIZE);
1575 }
1576 if (errno != 0 || (personality_set
1577 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1578 warning (_("Error disabling address space randomization: %s"),
1579 safe_strerror (errno));
1580 }
1581#endif /* HAVE_PERSONALITY */
1582
136d6dae 1583 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
b84876c2 1584
10568435
JK
1585#ifdef HAVE_PERSONALITY
1586 if (personality_set)
1587 {
1588 errno = 0;
1589 personality (personality_orig);
1590 if (errno != 0)
1591 warning (_("Error restoring address space randomization: %s"),
1592 safe_strerror (errno));
1593 }
1594#endif /* HAVE_PERSONALITY */
b84876c2
PA
1595}
1596
d6b0e80f 1597static void
136d6dae 1598linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f
AC
1599{
1600 struct lwp_info *lp;
d6b0e80f 1601 int status;
af990527 1602 ptid_t ptid;
d6b0e80f 1603
136d6dae 1604 linux_ops->to_attach (ops, args, from_tty);
d6b0e80f 1605
af990527
PA
1606 /* The ptrace base target adds the main thread with (pid,0,0)
1607 format. Decorate it with lwp info. */
1608 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1609 thread_change_ptid (inferior_ptid, ptid);
1610
9f0bdab8 1611 /* Add the initial process as the first LWP to the list. */
af990527 1612 lp = add_lwp (ptid);
a0ef4274
DJ
1613
1614 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1615 &lp->signalled);
dacc9cb2
PP
1616 if (!WIFSTOPPED (status))
1617 {
1618 if (WIFEXITED (status))
1619 {
1620 int exit_code = WEXITSTATUS (status);
1621
1622 target_terminal_ours ();
1623 target_mourn_inferior ();
1624 if (exit_code == 0)
1625 error (_("Unable to attach: program exited normally."));
1626 else
1627 error (_("Unable to attach: program exited with code %d."),
1628 exit_code);
1629 }
1630 else if (WIFSIGNALED (status))
1631 {
1632 enum target_signal signo;
1633
1634 target_terminal_ours ();
1635 target_mourn_inferior ();
1636
1637 signo = target_signal_from_host (WTERMSIG (status));
1638 error (_("Unable to attach: program terminated with signal "
1639 "%s, %s."),
1640 target_signal_to_name (signo),
1641 target_signal_to_string (signo));
1642 }
1643
1644 internal_error (__FILE__, __LINE__,
1645 _("unexpected status %d for PID %ld"),
1646 status, (long) GET_LWP (ptid));
1647 }
1648
a0ef4274 1649 lp->stopped = 1;
9f0bdab8 1650
a0ef4274 1651 /* Save the wait status to report later. */
d6b0e80f 1652 lp->resumed = 1;
a0ef4274
DJ
1653 if (debug_linux_nat)
1654 fprintf_unfiltered (gdb_stdlog,
1655 "LNA: waitpid %ld, saving status %s\n",
1656 (long) GET_PID (lp->ptid), status_to_str (status));
710151dd 1657
7feb7d06
PA
1658 lp->status = status;
1659
1660 if (target_can_async_p ())
1661 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1662}
1663
a0ef4274
DJ
1664/* Get pending status of LP. */
1665static int
1666get_pending_status (struct lwp_info *lp, int *status)
1667{
ca2163eb
PA
1668 enum target_signal signo = TARGET_SIGNAL_0;
1669
1670 /* If we paused threads momentarily, we may have stored pending
1671 events in lp->status or lp->waitstatus (see stop_wait_callback),
1672 and GDB core hasn't seen any signal for those threads.
1673 Otherwise, the last signal reported to the core is found in the
1674 thread object's stop_signal.
1675
1676 There's a corner case that isn't handled here at present. Only
1677 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1678 stop_signal make sense as a real signal to pass to the inferior.
1679 Some catchpoint related events, like
1680 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1681 to TARGET_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1682 those traps are debug API (ptrace in our case) related and
1683 induced; the inferior wouldn't see them if it wasn't being
1684 traced. Hence, we should never pass them to the inferior, even
1685 when set to pass state. Since this corner case isn't handled by
1686 infrun.c when proceeding with a signal, for consistency, neither
1687 do we handle it here (or elsewhere in the file we check for
1688 signal pass state). Normally SIGTRAP isn't set to pass state, so
1689 this is really a corner case. */
1690
1691 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1692 signo = TARGET_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1693 else if (lp->status)
1694 signo = target_signal_from_host (WSTOPSIG (lp->status));
1695 else if (non_stop && !is_executing (lp->ptid))
1696 {
1697 struct thread_info *tp = find_thread_ptid (lp->ptid);
1698 signo = tp->stop_signal;
1699 }
1700 else if (!non_stop)
a0ef4274 1701 {
ca2163eb
PA
1702 struct target_waitstatus last;
1703 ptid_t last_ptid;
4c28f408 1704
ca2163eb 1705 get_last_target_status (&last_ptid, &last);
4c28f408 1706
ca2163eb
PA
1707 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1708 {
e09875d4 1709 struct thread_info *tp = find_thread_ptid (lp->ptid);
2020b7ab 1710 signo = tp->stop_signal;
4c28f408 1711 }
ca2163eb 1712 }
4c28f408 1713
ca2163eb 1714 *status = 0;
4c28f408 1715
ca2163eb
PA
1716 if (signo == TARGET_SIGNAL_0)
1717 {
1718 if (debug_linux_nat)
1719 fprintf_unfiltered (gdb_stdlog,
1720 "GPT: lwp %s has no pending signal\n",
1721 target_pid_to_str (lp->ptid));
1722 }
1723 else if (!signal_pass_state (signo))
1724 {
1725 if (debug_linux_nat)
1726 fprintf_unfiltered (gdb_stdlog, "\
1727GPT: lwp %s had signal %s, but it is in no pass state\n",
1728 target_pid_to_str (lp->ptid),
1729 target_signal_to_string (signo));
a0ef4274 1730 }
a0ef4274 1731 else
4c28f408 1732 {
ca2163eb
PA
1733 *status = W_STOPCODE (target_signal_to_host (signo));
1734
1735 if (debug_linux_nat)
1736 fprintf_unfiltered (gdb_stdlog,
1737 "GPT: lwp %s has pending signal %s\n",
1738 target_pid_to_str (lp->ptid),
1739 target_signal_to_string (signo));
4c28f408 1740 }
a0ef4274
DJ
1741
1742 return 0;
1743}
1744
d6b0e80f
AC
1745static int
1746detach_callback (struct lwp_info *lp, void *data)
1747{
1748 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1749
1750 if (debug_linux_nat && lp->status)
1751 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1752 strsignal (WSTOPSIG (lp->status)),
1753 target_pid_to_str (lp->ptid));
1754
a0ef4274
DJ
1755 /* If there is a pending SIGSTOP, get rid of it. */
1756 if (lp->signalled)
d6b0e80f 1757 {
d6b0e80f
AC
1758 if (debug_linux_nat)
1759 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1760 "DC: Sending SIGCONT to %s\n",
1761 target_pid_to_str (lp->ptid));
d6b0e80f 1762
a0ef4274 1763 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
d6b0e80f 1764 lp->signalled = 0;
d6b0e80f
AC
1765 }
1766
1767 /* We don't actually detach from the LWP that has an id equal to the
1768 overall process id just yet. */
1769 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1770 {
a0ef4274
DJ
1771 int status = 0;
1772
1773 /* Pass on any pending signal for this LWP. */
1774 get_pending_status (lp, &status);
1775
d6b0e80f
AC
1776 errno = 0;
1777 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
a0ef4274 1778 WSTOPSIG (status)) < 0)
8a3fe4f8 1779 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1780 safe_strerror (errno));
1781
1782 if (debug_linux_nat)
1783 fprintf_unfiltered (gdb_stdlog,
1784 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1785 target_pid_to_str (lp->ptid),
7feb7d06 1786 strsignal (WSTOPSIG (status)));
d6b0e80f
AC
1787
1788 delete_lwp (lp->ptid);
1789 }
1790
1791 return 0;
1792}
1793
1794static void
136d6dae 1795linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f 1796{
b84876c2 1797 int pid;
a0ef4274
DJ
1798 int status;
1799 enum target_signal sig;
d90e17a7
PA
1800 struct lwp_info *main_lwp;
1801
1802 pid = GET_PID (inferior_ptid);
a0ef4274 1803
b84876c2
PA
1804 if (target_can_async_p ())
1805 linux_nat_async (NULL, 0);
1806
4c28f408
PA
1807 /* Stop all threads before detaching. ptrace requires that the
1808 thread is stopped to sucessfully detach. */
d90e17a7 1809 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
4c28f408
PA
1810 /* ... and wait until all of them have reported back that
1811 they're no longer running. */
d90e17a7 1812 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
4c28f408 1813
d90e17a7 1814 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
d6b0e80f
AC
1815
1816 /* Only the initial process should be left right now. */
d90e17a7
PA
1817 gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1);
1818
1819 main_lwp = find_lwp_pid (pid_to_ptid (pid));
d6b0e80f 1820
a0ef4274
DJ
1821 /* Pass on any pending signal for the last LWP. */
1822 if ((args == NULL || *args == '\0')
d90e17a7 1823 && get_pending_status (main_lwp, &status) != -1
a0ef4274
DJ
1824 && WIFSTOPPED (status))
1825 {
1826 /* Put the signal number in ARGS so that inf_ptrace_detach will
1827 pass it along with PTRACE_DETACH. */
1828 args = alloca (8);
1829 sprintf (args, "%d", (int) WSTOPSIG (status));
1830 fprintf_unfiltered (gdb_stdlog,
1831 "LND: Sending signal %s to %s\n",
1832 args,
d90e17a7 1833 target_pid_to_str (main_lwp->ptid));
a0ef4274
DJ
1834 }
1835
d90e17a7 1836 delete_lwp (main_lwp->ptid);
b84876c2 1837
7a7d3353
PA
1838 if (forks_exist_p ())
1839 {
1840 /* Multi-fork case. The current inferior_ptid is being detached
1841 from, but there are other viable forks to debug. Detach from
1842 the current fork, and context-switch to the first
1843 available. */
1844 linux_fork_detach (args, from_tty);
1845
1846 if (non_stop && target_can_async_p ())
1847 target_async (inferior_event_handler, 0);
1848 }
1849 else
1850 linux_ops->to_detach (ops, args, from_tty);
d6b0e80f
AC
1851}
1852
1853/* Resume LP. */
1854
1855static int
1856resume_callback (struct lwp_info *lp, void *data)
1857{
6c95b8df
PA
1858 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
1859
1860 if (lp->stopped && inf->vfork_child != NULL)
1861 {
1862 if (debug_linux_nat)
1863 fprintf_unfiltered (gdb_stdlog,
1864 "RC: Not resuming %s (vfork parent)\n",
1865 target_pid_to_str (lp->ptid));
1866 }
1867 else if (lp->stopped && lp->status == 0)
d6b0e80f 1868 {
d90e17a7
PA
1869 if (debug_linux_nat)
1870 fprintf_unfiltered (gdb_stdlog,
1871 "RC: PTRACE_CONT %s, 0, 0 (resuming sibling)\n",
1872 target_pid_to_str (lp->ptid));
1873
28439f5e
PA
1874 linux_ops->to_resume (linux_ops,
1875 pid_to_ptid (GET_LWP (lp->ptid)),
10d6c8cd 1876 0, TARGET_SIGNAL_0);
d6b0e80f
AC
1877 if (debug_linux_nat)
1878 fprintf_unfiltered (gdb_stdlog,
1879 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1880 target_pid_to_str (lp->ptid));
1881 lp->stopped = 0;
1882 lp->step = 0;
9f0bdab8 1883 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
d6b0e80f 1884 }
57380f4e
DJ
1885 else if (lp->stopped && debug_linux_nat)
1886 fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (has pending)\n",
1887 target_pid_to_str (lp->ptid));
1888 else if (debug_linux_nat)
1889 fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (not stopped)\n",
1890 target_pid_to_str (lp->ptid));
d6b0e80f
AC
1891
1892 return 0;
1893}
1894
1895static int
1896resume_clear_callback (struct lwp_info *lp, void *data)
1897{
1898 lp->resumed = 0;
1899 return 0;
1900}
1901
1902static int
1903resume_set_callback (struct lwp_info *lp, void *data)
1904{
1905 lp->resumed = 1;
1906 return 0;
1907}
1908
1909static void
28439f5e
PA
1910linux_nat_resume (struct target_ops *ops,
1911 ptid_t ptid, int step, enum target_signal signo)
d6b0e80f 1912{
7feb7d06 1913 sigset_t prev_mask;
d6b0e80f 1914 struct lwp_info *lp;
d90e17a7 1915 int resume_many;
d6b0e80f 1916
76f50ad1
DJ
1917 if (debug_linux_nat)
1918 fprintf_unfiltered (gdb_stdlog,
1919 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1920 step ? "step" : "resume",
1921 target_pid_to_str (ptid),
1922 signo ? strsignal (signo) : "0",
1923 target_pid_to_str (inferior_ptid));
1924
7feb7d06 1925 block_child_signals (&prev_mask);
b84876c2 1926
d6b0e80f 1927 /* A specific PTID means `step only this process id'. */
d90e17a7
PA
1928 resume_many = (ptid_equal (minus_one_ptid, ptid)
1929 || ptid_is_pid (ptid));
4c28f408
PA
1930
1931 if (!non_stop)
1932 {
d90e17a7
PA
1933 /* Mark the lwps we're resuming as resumed. */
1934 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
1935 iterate_over_lwps (ptid, resume_set_callback, NULL);
4c28f408 1936 }
d90e17a7
PA
1937 else
1938 iterate_over_lwps (minus_one_ptid, resume_set_callback, NULL);
d6b0e80f 1939
d90e17a7
PA
1940 /* See if it's the current inferior that should be handled
1941 specially. */
1942 if (resume_many)
1943 lp = find_lwp_pid (inferior_ptid);
1944 else
1945 lp = find_lwp_pid (ptid);
9f0bdab8 1946 gdb_assert (lp != NULL);
d6b0e80f 1947
9f0bdab8
DJ
1948 /* Remember if we're stepping. */
1949 lp->step = step;
d6b0e80f 1950
9f0bdab8
DJ
1951 /* If we have a pending wait status for this thread, there is no
1952 point in resuming the process. But first make sure that
1953 linux_nat_wait won't preemptively handle the event - we
1954 should never take this short-circuit if we are going to
1955 leave LP running, since we have skipped resuming all the
1956 other threads. This bit of code needs to be synchronized
1957 with linux_nat_wait. */
76f50ad1 1958
9f0bdab8
DJ
1959 if (lp->status && WIFSTOPPED (lp->status))
1960 {
d6b48e9c
PA
1961 int saved_signo;
1962 struct inferior *inf;
76f50ad1 1963
d90e17a7 1964 inf = find_inferior_pid (ptid_get_pid (lp->ptid));
d6b48e9c
PA
1965 gdb_assert (inf);
1966 saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
1967
1968 /* Defer to common code if we're gaining control of the
1969 inferior. */
1970 if (inf->stop_soon == NO_STOP_QUIETLY
1971 && signal_stop_state (saved_signo) == 0
9f0bdab8
DJ
1972 && signal_print_state (saved_signo) == 0
1973 && signal_pass_state (saved_signo) == 1)
d6b0e80f 1974 {
9f0bdab8
DJ
1975 if (debug_linux_nat)
1976 fprintf_unfiltered (gdb_stdlog,
1977 "LLR: Not short circuiting for ignored "
1978 "status 0x%x\n", lp->status);
1979
d6b0e80f
AC
1980 /* FIXME: What should we do if we are supposed to continue
1981 this thread with a signal? */
1982 gdb_assert (signo == TARGET_SIGNAL_0);
9f0bdab8
DJ
1983 signo = saved_signo;
1984 lp->status = 0;
1985 }
1986 }
76f50ad1 1987
6c95b8df 1988 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
9f0bdab8
DJ
1989 {
1990 /* FIXME: What should we do if we are supposed to continue
1991 this thread with a signal? */
1992 gdb_assert (signo == TARGET_SIGNAL_0);
76f50ad1 1993
9f0bdab8
DJ
1994 if (debug_linux_nat)
1995 fprintf_unfiltered (gdb_stdlog,
1996 "LLR: Short circuiting for status 0x%x\n",
1997 lp->status);
d6b0e80f 1998
7feb7d06
PA
1999 restore_child_signals_mask (&prev_mask);
2000 if (target_can_async_p ())
2001 {
2002 target_async (inferior_event_handler, 0);
2003 /* Tell the event loop we have something to process. */
2004 async_file_mark ();
2005 }
9f0bdab8 2006 return;
d6b0e80f
AC
2007 }
2008
9f0bdab8
DJ
2009 /* Mark LWP as not stopped to prevent it from being continued by
2010 resume_callback. */
2011 lp->stopped = 0;
2012
d90e17a7
PA
2013 if (resume_many)
2014 iterate_over_lwps (ptid, resume_callback, NULL);
2015
2016 /* Convert to something the lower layer understands. */
2017 ptid = pid_to_ptid (GET_LWP (lp->ptid));
d6b0e80f 2018
28439f5e 2019 linux_ops->to_resume (linux_ops, ptid, step, signo);
9f0bdab8
DJ
2020 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2021
d6b0e80f
AC
2022 if (debug_linux_nat)
2023 fprintf_unfiltered (gdb_stdlog,
2024 "LLR: %s %s, %s (resume event thread)\n",
2025 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2026 target_pid_to_str (ptid),
2027 signo ? strsignal (signo) : "0");
b84876c2 2028
7feb7d06 2029 restore_child_signals_mask (&prev_mask);
b84876c2 2030 if (target_can_async_p ())
8ea051c5 2031 target_async (inferior_event_handler, 0);
d6b0e80f
AC
2032}
2033
2034/* Issue kill to specified lwp. */
2035
2036static int tkill_failed;
2037
2038static int
2039kill_lwp (int lwpid, int signo)
2040{
2041 errno = 0;
2042
2043/* Use tkill, if possible, in case we are using nptl threads. If tkill
2044 fails, then we are not using nptl threads and we should be using kill. */
2045
2046#ifdef HAVE_TKILL_SYSCALL
2047 if (!tkill_failed)
2048 {
2049 int ret = syscall (__NR_tkill, lwpid, signo);
2050 if (errno != ENOSYS)
2051 return ret;
2052 errno = 0;
2053 tkill_failed = 1;
2054 }
2055#endif
2056
2057 return kill (lwpid, signo);
2058}
2059
ca2163eb
PA
2060/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
2061 event, check if the core is interested in it: if not, ignore the
2062 event, and keep waiting; otherwise, we need to toggle the LWP's
2063 syscall entry/exit status, since the ptrace event itself doesn't
2064 indicate it, and report the trap to higher layers. */
2065
2066static int
2067linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
2068{
2069 struct target_waitstatus *ourstatus = &lp->waitstatus;
2070 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
2071 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
2072
2073 if (stopping)
2074 {
2075 /* If we're stopping threads, there's a SIGSTOP pending, which
2076 makes it so that the LWP reports an immediate syscall return,
2077 followed by the SIGSTOP. Skip seeing that "return" using
2078 PTRACE_CONT directly, and let stop_wait_callback collect the
2079 SIGSTOP. Later when the thread is resumed, a new syscall
2080 entry event. If we didn't do this (and returned 0), we'd
2081 leave a syscall entry pending, and our caller, by using
2082 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2083 itself. Later, when the user re-resumes this LWP, we'd see
2084 another syscall entry event and we'd mistake it for a return.
2085
2086 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2087 (leaving immediately with LWP->signalled set, without issuing
2088 a PTRACE_CONT), it would still be problematic to leave this
2089 syscall enter pending, as later when the thread is resumed,
2090 it would then see the same syscall exit mentioned above,
2091 followed by the delayed SIGSTOP, while the syscall didn't
2092 actually get to execute. It seems it would be even more
2093 confusing to the user. */
2094
2095 if (debug_linux_nat)
2096 fprintf_unfiltered (gdb_stdlog,
2097 "LHST: ignoring syscall %d "
2098 "for LWP %ld (stopping threads), "
2099 "resuming with PTRACE_CONT for SIGSTOP\n",
2100 syscall_number,
2101 GET_LWP (lp->ptid));
2102
2103 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2104 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2105 return 1;
2106 }
2107
2108 if (catch_syscall_enabled ())
2109 {
2110 /* Always update the entry/return state, even if this particular
2111 syscall isn't interesting to the core now. In async mode,
2112 the user could install a new catchpoint for this syscall
2113 between syscall enter/return, and we'll need to know to
2114 report a syscall return if that happens. */
2115 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2116 ? TARGET_WAITKIND_SYSCALL_RETURN
2117 : TARGET_WAITKIND_SYSCALL_ENTRY);
2118
2119 if (catching_syscall_number (syscall_number))
2120 {
2121 /* Alright, an event to report. */
2122 ourstatus->kind = lp->syscall_state;
2123 ourstatus->value.syscall_number = syscall_number;
2124
2125 if (debug_linux_nat)
2126 fprintf_unfiltered (gdb_stdlog,
2127 "LHST: stopping for %s of syscall %d"
2128 " for LWP %ld\n",
2129 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2130 ? "entry" : "return",
2131 syscall_number,
2132 GET_LWP (lp->ptid));
2133 return 0;
2134 }
2135
2136 if (debug_linux_nat)
2137 fprintf_unfiltered (gdb_stdlog,
2138 "LHST: ignoring %s of syscall %d "
2139 "for LWP %ld\n",
2140 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2141 ? "entry" : "return",
2142 syscall_number,
2143 GET_LWP (lp->ptid));
2144 }
2145 else
2146 {
2147 /* If we had been syscall tracing, and hence used PT_SYSCALL
2148 before on this LWP, it could happen that the user removes all
2149 syscall catchpoints before we get to process this event.
2150 There are two noteworthy issues here:
2151
2152 - When stopped at a syscall entry event, resuming with
2153 PT_STEP still resumes executing the syscall and reports a
2154 syscall return.
2155
2156 - Only PT_SYSCALL catches syscall enters. If we last
2157 single-stepped this thread, then this event can't be a
2158 syscall enter. If we last single-stepped this thread, this
2159 has to be a syscall exit.
2160
2161 The points above mean that the next resume, be it PT_STEP or
2162 PT_CONTINUE, can not trigger a syscall trace event. */
2163 if (debug_linux_nat)
2164 fprintf_unfiltered (gdb_stdlog,
2165 "LHST: caught syscall event with no syscall catchpoints."
2166 " %d for LWP %ld, ignoring\n",
2167 syscall_number,
2168 GET_LWP (lp->ptid));
2169 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2170 }
2171
2172 /* The core isn't interested in this event. For efficiency, avoid
2173 stopping all threads only to have the core resume them all again.
2174 Since we're not stopping threads, if we're still syscall tracing
2175 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2176 subsequent syscall. Simply resume using the inf-ptrace layer,
2177 which knows when to use PT_SYSCALL or PT_CONTINUE. */
2178
2179 /* Note that gdbarch_get_syscall_number may access registers, hence
2180 fill a regcache. */
2181 registers_changed ();
2182 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2183 lp->step, TARGET_SIGNAL_0);
2184 return 1;
2185}
2186
3d799a95
DJ
2187/* Handle a GNU/Linux extended wait response. If we see a clone
2188 event, we need to add the new LWP to our list (and not report the
2189 trap to higher layers). This function returns non-zero if the
2190 event should be ignored and we should wait again. If STOPPING is
2191 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
2192
2193static int
3d799a95
DJ
2194linux_handle_extended_wait (struct lwp_info *lp, int status,
2195 int stopping)
d6b0e80f 2196{
3d799a95
DJ
2197 int pid = GET_LWP (lp->ptid);
2198 struct target_waitstatus *ourstatus = &lp->waitstatus;
2199 struct lwp_info *new_lp = NULL;
2200 int event = status >> 16;
d6b0e80f 2201
3d799a95
DJ
2202 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2203 || event == PTRACE_EVENT_CLONE)
d6b0e80f 2204 {
3d799a95
DJ
2205 unsigned long new_pid;
2206 int ret;
2207
2208 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 2209
3d799a95
DJ
2210 /* If we haven't already seen the new PID stop, wait for it now. */
2211 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2212 {
2213 /* The new child has a pending SIGSTOP. We can't affect it until it
2214 hits the SIGSTOP, but we're already attached. */
2215 ret = my_waitpid (new_pid, &status,
2216 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2217 if (ret == -1)
2218 perror_with_name (_("waiting for new child"));
2219 else if (ret != new_pid)
2220 internal_error (__FILE__, __LINE__,
2221 _("wait returned unexpected PID %d"), ret);
2222 else if (!WIFSTOPPED (status))
2223 internal_error (__FILE__, __LINE__,
2224 _("wait returned unexpected status 0x%x"), status);
2225 }
2226
3a3e9ee3 2227 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95 2228
2277426b
PA
2229 if (event == PTRACE_EVENT_FORK
2230 && linux_fork_checkpointing_p (GET_PID (lp->ptid)))
2231 {
2232 struct fork_info *fp;
2233
2234 /* Handle checkpointing by linux-fork.c here as a special
2235 case. We don't want the follow-fork-mode or 'catch fork'
2236 to interfere with this. */
2237
2238 /* This won't actually modify the breakpoint list, but will
2239 physically remove the breakpoints from the child. */
2240 detach_breakpoints (new_pid);
2241
2242 /* Retain child fork in ptrace (stopped) state. */
2243 fp = find_fork_pid (new_pid);
2244 if (!fp)
2245 fp = add_fork (new_pid);
2246
2247 /* Report as spurious, so that infrun doesn't want to follow
2248 this fork. We're actually doing an infcall in
2249 linux-fork.c. */
2250 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2251 linux_enable_event_reporting (pid_to_ptid (new_pid));
2252
2253 /* Report the stop to the core. */
2254 return 0;
2255 }
2256
3d799a95
DJ
2257 if (event == PTRACE_EVENT_FORK)
2258 ourstatus->kind = TARGET_WAITKIND_FORKED;
2259 else if (event == PTRACE_EVENT_VFORK)
2260 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 2261 else
3d799a95 2262 {
4c28f408
PA
2263 struct cleanup *old_chain;
2264
3d799a95 2265 ourstatus->kind = TARGET_WAITKIND_IGNORE;
d90e17a7 2266 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
3d799a95 2267 new_lp->cloned = 1;
4c28f408 2268 new_lp->stopped = 1;
d6b0e80f 2269
3d799a95
DJ
2270 if (WSTOPSIG (status) != SIGSTOP)
2271 {
2272 /* This can happen if someone starts sending signals to
2273 the new thread before it gets a chance to run, which
2274 have a lower number than SIGSTOP (e.g. SIGUSR1).
2275 This is an unlikely case, and harder to handle for
2276 fork / vfork than for clone, so we do not try - but
2277 we handle it for clone events here. We'll send
2278 the other signal on to the thread below. */
2279
2280 new_lp->signalled = 1;
2281 }
2282 else
2283 status = 0;
d6b0e80f 2284
4c28f408 2285 if (non_stop)
3d799a95 2286 {
4c28f408
PA
2287 /* Add the new thread to GDB's lists as soon as possible
2288 so that:
2289
2290 1) the frontend doesn't have to wait for a stop to
2291 display them, and,
2292
2293 2) we tag it with the correct running state. */
2294
2295 /* If the thread_db layer is active, let it know about
2296 this new thread, and add it to GDB's list. */
2297 if (!thread_db_attach_lwp (new_lp->ptid))
2298 {
2299 /* We're not using thread_db. Add it to GDB's
2300 list. */
2301 target_post_attach (GET_LWP (new_lp->ptid));
2302 add_thread (new_lp->ptid);
2303 }
2304
2305 if (!stopping)
2306 {
2307 set_running (new_lp->ptid, 1);
2308 set_executing (new_lp->ptid, 1);
2309 }
2310 }
2311
ca2163eb
PA
2312 /* Note the need to use the low target ops to resume, to
2313 handle resuming with PT_SYSCALL if we have syscall
2314 catchpoints. */
4c28f408
PA
2315 if (!stopping)
2316 {
ca2163eb
PA
2317 int signo;
2318
4c28f408 2319 new_lp->stopped = 0;
3d799a95 2320 new_lp->resumed = 1;
ca2163eb
PA
2321
2322 signo = (status
2323 ? target_signal_from_host (WSTOPSIG (status))
2324 : TARGET_SIGNAL_0);
2325
2326 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
2327 0, signo);
3d799a95 2328 }
d6b0e80f 2329
3d799a95
DJ
2330 if (debug_linux_nat)
2331 fprintf_unfiltered (gdb_stdlog,
2332 "LHEW: Got clone event from LWP %ld, resuming\n",
2333 GET_LWP (lp->ptid));
ca2163eb
PA
2334 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2335 0, TARGET_SIGNAL_0);
3d799a95
DJ
2336
2337 return 1;
2338 }
2339
2340 return 0;
d6b0e80f
AC
2341 }
2342
3d799a95
DJ
2343 if (event == PTRACE_EVENT_EXEC)
2344 {
a75724bc
PA
2345 if (debug_linux_nat)
2346 fprintf_unfiltered (gdb_stdlog,
2347 "LHEW: Got exec event from LWP %ld\n",
2348 GET_LWP (lp->ptid));
2349
3d799a95
DJ
2350 ourstatus->kind = TARGET_WAITKIND_EXECD;
2351 ourstatus->value.execd_pathname
6d8fd2b7 2352 = xstrdup (linux_child_pid_to_exec_file (pid));
3d799a95 2353
6c95b8df
PA
2354 return 0;
2355 }
2356
2357 if (event == PTRACE_EVENT_VFORK_DONE)
2358 {
2359 if (current_inferior ()->waiting_for_vfork_done)
3d799a95 2360 {
6c95b8df
PA
2361 if (debug_linux_nat)
2362 fprintf_unfiltered (gdb_stdlog, "\
2363LHEW: Got expected PTRACE_EVENT_VFORK_DONE from LWP %ld: stopping\n",
2364 GET_LWP (lp->ptid));
3d799a95 2365
6c95b8df
PA
2366 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2367 return 0;
3d799a95
DJ
2368 }
2369
6c95b8df
PA
2370 if (debug_linux_nat)
2371 fprintf_unfiltered (gdb_stdlog, "\
2372LHEW: Got PTRACE_EVENT_VFORK_DONE from LWP %ld: resuming\n",
2373 GET_LWP (lp->ptid));
2374 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2375 return 1;
3d799a95
DJ
2376 }
2377
2378 internal_error (__FILE__, __LINE__,
2379 _("unknown ptrace event %d"), event);
d6b0e80f
AC
2380}
2381
2382/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2383 exited. */
2384
2385static int
2386wait_lwp (struct lwp_info *lp)
2387{
2388 pid_t pid;
2389 int status;
2390 int thread_dead = 0;
2391
2392 gdb_assert (!lp->stopped);
2393 gdb_assert (lp->status == 0);
2394
58aecb61 2395 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
d6b0e80f
AC
2396 if (pid == -1 && errno == ECHILD)
2397 {
58aecb61 2398 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
d6b0e80f
AC
2399 if (pid == -1 && errno == ECHILD)
2400 {
2401 /* The thread has previously exited. We need to delete it
2402 now because, for some vendor 2.4 kernels with NPTL
2403 support backported, there won't be an exit event unless
2404 it is the main thread. 2.6 kernels will report an exit
2405 event for each thread that exits, as expected. */
2406 thread_dead = 1;
2407 if (debug_linux_nat)
2408 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2409 target_pid_to_str (lp->ptid));
2410 }
2411 }
2412
2413 if (!thread_dead)
2414 {
2415 gdb_assert (pid == GET_LWP (lp->ptid));
2416
2417 if (debug_linux_nat)
2418 {
2419 fprintf_unfiltered (gdb_stdlog,
2420 "WL: waitpid %s received %s\n",
2421 target_pid_to_str (lp->ptid),
2422 status_to_str (status));
2423 }
2424 }
2425
2426 /* Check if the thread has exited. */
2427 if (WIFEXITED (status) || WIFSIGNALED (status))
2428 {
2429 thread_dead = 1;
2430 if (debug_linux_nat)
2431 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2432 target_pid_to_str (lp->ptid));
2433 }
2434
2435 if (thread_dead)
2436 {
e26af52f 2437 exit_lwp (lp);
d6b0e80f
AC
2438 return 0;
2439 }
2440
2441 gdb_assert (WIFSTOPPED (status));
2442
ca2163eb
PA
2443 /* Handle GNU/Linux's syscall SIGTRAPs. */
2444 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2445 {
2446 /* No longer need the sysgood bit. The ptrace event ends up
2447 recorded in lp->waitstatus if we care for it. We can carry
2448 on handling the event like a regular SIGTRAP from here
2449 on. */
2450 status = W_STOPCODE (SIGTRAP);
2451 if (linux_handle_syscall_trap (lp, 1))
2452 return wait_lwp (lp);
2453 }
2454
d6b0e80f
AC
2455 /* Handle GNU/Linux's extended waitstatus for trace events. */
2456 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2457 {
2458 if (debug_linux_nat)
2459 fprintf_unfiltered (gdb_stdlog,
2460 "WL: Handling extended status 0x%06x\n",
2461 status);
3d799a95 2462 if (linux_handle_extended_wait (lp, status, 1))
d6b0e80f
AC
2463 return wait_lwp (lp);
2464 }
2465
2466 return status;
2467}
2468
9f0bdab8
DJ
2469/* Save the most recent siginfo for LP. This is currently only called
2470 for SIGTRAP; some ports use the si_addr field for
2471 target_stopped_data_address. In the future, it may also be used to
2472 restore the siginfo of requeued signals. */
2473
2474static void
2475save_siginfo (struct lwp_info *lp)
2476{
2477 errno = 0;
2478 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2479 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2480
2481 if (errno != 0)
2482 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2483}
2484
d6b0e80f
AC
2485/* Send a SIGSTOP to LP. */
2486
2487static int
2488stop_callback (struct lwp_info *lp, void *data)
2489{
2490 if (!lp->stopped && !lp->signalled)
2491 {
2492 int ret;
2493
2494 if (debug_linux_nat)
2495 {
2496 fprintf_unfiltered (gdb_stdlog,
2497 "SC: kill %s **<SIGSTOP>**\n",
2498 target_pid_to_str (lp->ptid));
2499 }
2500 errno = 0;
2501 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2502 if (debug_linux_nat)
2503 {
2504 fprintf_unfiltered (gdb_stdlog,
2505 "SC: lwp kill %d %s\n",
2506 ret,
2507 errno ? safe_strerror (errno) : "ERRNO-OK");
2508 }
2509
2510 lp->signalled = 1;
2511 gdb_assert (lp->status == 0);
2512 }
2513
2514 return 0;
2515}
2516
57380f4e 2517/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2518
2519static int
57380f4e
DJ
2520linux_nat_has_pending_sigint (int pid)
2521{
2522 sigset_t pending, blocked, ignored;
2523 int i;
2524
2525 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2526
2527 if (sigismember (&pending, SIGINT)
2528 && !sigismember (&ignored, SIGINT))
2529 return 1;
2530
2531 return 0;
2532}
2533
2534/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2535
2536static int
2537set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2538{
57380f4e
DJ
2539 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2540 flag to consume the next one. */
2541 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2542 && WSTOPSIG (lp->status) == SIGINT)
2543 lp->status = 0;
2544 else
2545 lp->ignore_sigint = 1;
2546
2547 return 0;
2548}
2549
2550/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2551 This function is called after we know the LWP has stopped; if the LWP
2552 stopped before the expected SIGINT was delivered, then it will never have
2553 arrived. Also, if the signal was delivered to a shared queue and consumed
2554 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2555
57380f4e
DJ
2556static void
2557maybe_clear_ignore_sigint (struct lwp_info *lp)
2558{
2559 if (!lp->ignore_sigint)
2560 return;
2561
2562 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2563 {
2564 if (debug_linux_nat)
2565 fprintf_unfiltered (gdb_stdlog,
2566 "MCIS: Clearing bogus flag for %s\n",
2567 target_pid_to_str (lp->ptid));
2568 lp->ignore_sigint = 0;
2569 }
2570}
2571
2572/* Wait until LP is stopped. */
2573
2574static int
2575stop_wait_callback (struct lwp_info *lp, void *data)
2576{
6c95b8df
PA
2577 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
2578
2579 /* If this is a vfork parent, bail out, it is not going to report
2580 any SIGSTOP until the vfork is done with. */
2581 if (inf->vfork_child != NULL)
2582 return 0;
2583
d6b0e80f
AC
2584 if (!lp->stopped)
2585 {
2586 int status;
2587
2588 status = wait_lwp (lp);
2589 if (status == 0)
2590 return 0;
2591
57380f4e
DJ
2592 if (lp->ignore_sigint && WIFSTOPPED (status)
2593 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2594 {
57380f4e 2595 lp->ignore_sigint = 0;
d6b0e80f
AC
2596
2597 errno = 0;
2598 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2599 if (debug_linux_nat)
2600 fprintf_unfiltered (gdb_stdlog,
57380f4e 2601 "PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)\n",
d6b0e80f
AC
2602 target_pid_to_str (lp->ptid),
2603 errno ? safe_strerror (errno) : "OK");
2604
57380f4e 2605 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2606 }
2607
57380f4e
DJ
2608 maybe_clear_ignore_sigint (lp);
2609
d6b0e80f
AC
2610 if (WSTOPSIG (status) != SIGSTOP)
2611 {
2612 if (WSTOPSIG (status) == SIGTRAP)
2613 {
2614 /* If a LWP other than the LWP that we're reporting an
2615 event for has hit a GDB breakpoint (as opposed to
2616 some random trap signal), then just arrange for it to
2617 hit it again later. We don't keep the SIGTRAP status
2618 and don't forward the SIGTRAP signal to the LWP. We
2619 will handle the current event, eventually we will
2620 resume all LWPs, and this one will get its breakpoint
2621 trap again.
2622
2623 If we do not do this, then we run the risk that the
2624 user will delete or disable the breakpoint, but the
2625 thread will have already tripped on it. */
2626
9f0bdab8
DJ
2627 /* Save the trap's siginfo in case we need it later. */
2628 save_siginfo (lp);
2629
d6b0e80f
AC
2630 /* Now resume this LWP and get the SIGSTOP event. */
2631 errno = 0;
2632 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2633 if (debug_linux_nat)
2634 {
2635 fprintf_unfiltered (gdb_stdlog,
2636 "PTRACE_CONT %s, 0, 0 (%s)\n",
2637 target_pid_to_str (lp->ptid),
2638 errno ? safe_strerror (errno) : "OK");
2639
2640 fprintf_unfiltered (gdb_stdlog,
2641 "SWC: Candidate SIGTRAP event in %s\n",
2642 target_pid_to_str (lp->ptid));
2643 }
710151dd
PA
2644 /* Hold this event/waitstatus while we check to see if
2645 there are any more (we still want to get that SIGSTOP). */
57380f4e 2646 stop_wait_callback (lp, NULL);
710151dd 2647
7feb7d06
PA
2648 /* Hold the SIGTRAP for handling by linux_nat_wait. If
2649 there's another event, throw it back into the
2650 queue. */
2651 if (lp->status)
710151dd 2652 {
7feb7d06
PA
2653 if (debug_linux_nat)
2654 fprintf_unfiltered (gdb_stdlog,
2655 "SWC: kill %s, %s\n",
2656 target_pid_to_str (lp->ptid),
2657 status_to_str ((int) status));
2658 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
d6b0e80f 2659 }
7feb7d06
PA
2660
2661 /* Save the sigtrap event. */
2662 lp->status = status;
d6b0e80f
AC
2663 return 0;
2664 }
2665 else
2666 {
2667 /* The thread was stopped with a signal other than
2668 SIGSTOP, and didn't accidentally trip a breakpoint. */
2669
2670 if (debug_linux_nat)
2671 {
2672 fprintf_unfiltered (gdb_stdlog,
2673 "SWC: Pending event %s in %s\n",
2674 status_to_str ((int) status),
2675 target_pid_to_str (lp->ptid));
2676 }
2677 /* Now resume this LWP and get the SIGSTOP event. */
2678 errno = 0;
2679 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2680 if (debug_linux_nat)
2681 fprintf_unfiltered (gdb_stdlog,
2682 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2683 target_pid_to_str (lp->ptid),
2684 errno ? safe_strerror (errno) : "OK");
2685
2686 /* Hold this event/waitstatus while we check to see if
2687 there are any more (we still want to get that SIGSTOP). */
57380f4e 2688 stop_wait_callback (lp, NULL);
710151dd
PA
2689
2690 /* If the lp->status field is still empty, use it to
2691 hold this event. If not, then this event must be
2692 returned to the event queue of the LWP. */
7feb7d06 2693 if (lp->status)
d6b0e80f
AC
2694 {
2695 if (debug_linux_nat)
2696 {
2697 fprintf_unfiltered (gdb_stdlog,
2698 "SWC: kill %s, %s\n",
2699 target_pid_to_str (lp->ptid),
2700 status_to_str ((int) status));
2701 }
2702 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2703 }
710151dd
PA
2704 else
2705 lp->status = status;
d6b0e80f
AC
2706 return 0;
2707 }
2708 }
2709 else
2710 {
2711 /* We caught the SIGSTOP that we intended to catch, so
2712 there's no SIGSTOP pending. */
2713 lp->stopped = 1;
2714 lp->signalled = 0;
2715 }
2716 }
2717
2718 return 0;
2719}
2720
d6b0e80f
AC
2721/* Return non-zero if LP has a wait status pending. */
2722
2723static int
2724status_callback (struct lwp_info *lp, void *data)
2725{
2726 /* Only report a pending wait status if we pretend that this has
2727 indeed been resumed. */
ca2163eb
PA
2728 if (!lp->resumed)
2729 return 0;
2730
2731 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2732 {
2733 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
2734 or a a pending process exit. Note that `W_EXITCODE(0,0) ==
2735 0', so a clean process exit can not be stored pending in
2736 lp->status, it is indistinguishable from
2737 no-pending-status. */
2738 return 1;
2739 }
2740
2741 if (lp->status != 0)
2742 return 1;
2743
2744 return 0;
d6b0e80f
AC
2745}
2746
2747/* Return non-zero if LP isn't stopped. */
2748
2749static int
2750running_callback (struct lwp_info *lp, void *data)
2751{
2752 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
2753}
2754
2755/* Count the LWP's that have had events. */
2756
2757static int
2758count_events_callback (struct lwp_info *lp, void *data)
2759{
2760 int *count = data;
2761
2762 gdb_assert (count != NULL);
2763
e09490f1
DJ
2764 /* Count only resumed LWPs that have a SIGTRAP event pending. */
2765 if (lp->status != 0 && lp->resumed
d6b0e80f
AC
2766 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2767 (*count)++;
2768
2769 return 0;
2770}
2771
2772/* Select the LWP (if any) that is currently being single-stepped. */
2773
2774static int
2775select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2776{
2777 if (lp->step && lp->status != 0)
2778 return 1;
2779 else
2780 return 0;
2781}
2782
2783/* Select the Nth LWP that has had a SIGTRAP event. */
2784
2785static int
2786select_event_lwp_callback (struct lwp_info *lp, void *data)
2787{
2788 int *selector = data;
2789
2790 gdb_assert (selector != NULL);
2791
e09490f1
DJ
2792 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2793 if (lp->status != 0 && lp->resumed
d6b0e80f
AC
2794 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2795 if ((*selector)-- == 0)
2796 return 1;
2797
2798 return 0;
2799}
2800
710151dd
PA
2801static int
2802cancel_breakpoint (struct lwp_info *lp)
2803{
2804 /* Arrange for a breakpoint to be hit again later. We don't keep
2805 the SIGTRAP status and don't forward the SIGTRAP signal to the
2806 LWP. We will handle the current event, eventually we will resume
2807 this LWP, and this breakpoint will trap again.
2808
2809 If we do not do this, then we run the risk that the user will
2810 delete or disable the breakpoint, but the LWP will have already
2811 tripped on it. */
2812
515630c5
UW
2813 struct regcache *regcache = get_thread_regcache (lp->ptid);
2814 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2815 CORE_ADDR pc;
2816
2817 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
6c95b8df 2818 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
710151dd
PA
2819 {
2820 if (debug_linux_nat)
2821 fprintf_unfiltered (gdb_stdlog,
2822 "CB: Push back breakpoint for %s\n",
2823 target_pid_to_str (lp->ptid));
2824
2825 /* Back up the PC if necessary. */
515630c5
UW
2826 if (gdbarch_decr_pc_after_break (gdbarch))
2827 regcache_write_pc (regcache, pc);
2828
710151dd
PA
2829 return 1;
2830 }
2831 return 0;
2832}
2833
d6b0e80f
AC
2834static int
2835cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2836{
2837 struct lwp_info *event_lp = data;
2838
2839 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2840 if (lp == event_lp)
2841 return 0;
2842
2843 /* If a LWP other than the LWP that we're reporting an event for has
2844 hit a GDB breakpoint (as opposed to some random trap signal),
2845 then just arrange for it to hit it again later. We don't keep
2846 the SIGTRAP status and don't forward the SIGTRAP signal to the
2847 LWP. We will handle the current event, eventually we will resume
2848 all LWPs, and this one will get its breakpoint trap again.
2849
2850 If we do not do this, then we run the risk that the user will
2851 delete or disable the breakpoint, but the LWP will have already
2852 tripped on it. */
2853
ca2163eb
PA
2854 if (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2855 && lp->status != 0
d6b0e80f 2856 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
710151dd
PA
2857 && cancel_breakpoint (lp))
2858 /* Throw away the SIGTRAP. */
2859 lp->status = 0;
d6b0e80f
AC
2860
2861 return 0;
2862}
2863
2864/* Select one LWP out of those that have events pending. */
2865
2866static void
d90e17a7 2867select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
2868{
2869 int num_events = 0;
2870 int random_selector;
2871 struct lwp_info *event_lp;
2872
ac264b3b 2873 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2874 (*orig_lp)->status = *status;
2875
2876 /* Give preference to any LWP that is being single-stepped. */
d90e17a7
PA
2877 event_lp = iterate_over_lwps (filter,
2878 select_singlestep_lwp_callback, NULL);
d6b0e80f
AC
2879 if (event_lp != NULL)
2880 {
2881 if (debug_linux_nat)
2882 fprintf_unfiltered (gdb_stdlog,
2883 "SEL: Select single-step %s\n",
2884 target_pid_to_str (event_lp->ptid));
2885 }
2886 else
2887 {
2888 /* No single-stepping LWP. Select one at random, out of those
2889 which have had SIGTRAP events. */
2890
2891 /* First see how many SIGTRAP events we have. */
d90e17a7 2892 iterate_over_lwps (filter, count_events_callback, &num_events);
d6b0e80f
AC
2893
2894 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2895 random_selector = (int)
2896 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2897
2898 if (debug_linux_nat && num_events > 1)
2899 fprintf_unfiltered (gdb_stdlog,
2900 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2901 num_events, random_selector);
2902
d90e17a7
PA
2903 event_lp = iterate_over_lwps (filter,
2904 select_event_lwp_callback,
d6b0e80f
AC
2905 &random_selector);
2906 }
2907
2908 if (event_lp != NULL)
2909 {
2910 /* Switch the event LWP. */
2911 *orig_lp = event_lp;
2912 *status = event_lp->status;
2913 }
2914
2915 /* Flush the wait status for the event LWP. */
2916 (*orig_lp)->status = 0;
2917}
2918
2919/* Return non-zero if LP has been resumed. */
2920
2921static int
2922resumed_callback (struct lwp_info *lp, void *data)
2923{
2924 return lp->resumed;
2925}
2926
d6b0e80f
AC
2927/* Stop an active thread, verify it still exists, then resume it. */
2928
2929static int
2930stop_and_resume_callback (struct lwp_info *lp, void *data)
2931{
2932 struct lwp_info *ptr;
2933
2934 if (!lp->stopped && !lp->signalled)
2935 {
2936 stop_callback (lp, NULL);
2937 stop_wait_callback (lp, NULL);
2938 /* Resume if the lwp still exists. */
2939 for (ptr = lwp_list; ptr; ptr = ptr->next)
2940 if (lp == ptr)
2941 {
2942 resume_callback (lp, NULL);
2943 resume_set_callback (lp, NULL);
2944 }
2945 }
2946 return 0;
2947}
2948
02f3fc28 2949/* Check if we should go on and pass this event to common code.
fa2c6a57 2950 Return the affected lwp if we are, or NULL otherwise. */
02f3fc28
PA
2951static struct lwp_info *
2952linux_nat_filter_event (int lwpid, int status, int options)
2953{
2954 struct lwp_info *lp;
2955
2956 lp = find_lwp_pid (pid_to_ptid (lwpid));
2957
2958 /* Check for stop events reported by a process we didn't already
2959 know about - anything not already in our LWP list.
2960
2961 If we're expecting to receive stopped processes after
2962 fork, vfork, and clone events, then we'll just add the
2963 new one to our list and go back to waiting for the event
2964 to be reported - the stopped process might be returned
2965 from waitpid before or after the event is. */
2966 if (WIFSTOPPED (status) && !lp)
2967 {
2968 linux_record_stopped_pid (lwpid, status);
2969 return NULL;
2970 }
2971
2972 /* Make sure we don't report an event for the exit of an LWP not in
2973 our list, i.e. not part of the current process. This can happen
2974 if we detach from a program we original forked and then it
2975 exits. */
2976 if (!WIFSTOPPED (status) && !lp)
2977 return NULL;
2978
2979 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
2980 CLONE_PTRACE processes which do not use the thread library -
2981 otherwise we wouldn't find the new LWP this way. That doesn't
2982 currently work, and the following code is currently unreachable
2983 due to the two blocks above. If it's fixed some day, this code
2984 should be broken out into a function so that we can also pick up
2985 LWPs from the new interface. */
2986 if (!lp)
2987 {
2988 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
2989 if (options & __WCLONE)
2990 lp->cloned = 1;
2991
2992 gdb_assert (WIFSTOPPED (status)
2993 && WSTOPSIG (status) == SIGSTOP);
2994 lp->signalled = 1;
2995
2996 if (!in_thread_list (inferior_ptid))
2997 {
2998 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2999 GET_PID (inferior_ptid));
3000 add_thread (inferior_ptid);
3001 }
3002
3003 add_thread (lp->ptid);
3004 }
3005
ca2163eb
PA
3006 /* Handle GNU/Linux's syscall SIGTRAPs. */
3007 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3008 {
3009 /* No longer need the sysgood bit. The ptrace event ends up
3010 recorded in lp->waitstatus if we care for it. We can carry
3011 on handling the event like a regular SIGTRAP from here
3012 on. */
3013 status = W_STOPCODE (SIGTRAP);
3014 if (linux_handle_syscall_trap (lp, 0))
3015 return NULL;
3016 }
02f3fc28 3017
ca2163eb
PA
3018 /* Handle GNU/Linux's extended waitstatus for trace events. */
3019 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
02f3fc28
PA
3020 {
3021 if (debug_linux_nat)
3022 fprintf_unfiltered (gdb_stdlog,
3023 "LLW: Handling extended status 0x%06x\n",
3024 status);
3025 if (linux_handle_extended_wait (lp, status, 0))
3026 return NULL;
3027 }
3028
ca2163eb
PA
3029 /* Save the trap's siginfo in case we need it later. */
3030 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
3031 save_siginfo (lp);
3032
02f3fc28 3033 /* Check if the thread has exited. */
d90e17a7
PA
3034 if ((WIFEXITED (status) || WIFSIGNALED (status))
3035 && num_lwps (GET_PID (lp->ptid)) > 1)
02f3fc28 3036 {
9db03742
JB
3037 /* If this is the main thread, we must stop all threads and verify
3038 if they are still alive. This is because in the nptl thread model
3039 on Linux 2.4, there is no signal issued for exiting LWPs
02f3fc28
PA
3040 other than the main thread. We only get the main thread exit
3041 signal once all child threads have already exited. If we
3042 stop all the threads and use the stop_wait_callback to check
3043 if they have exited we can determine whether this signal
3044 should be ignored or whether it means the end of the debugged
3045 application, regardless of which threading model is being
5d3b6af6 3046 used. */
02f3fc28
PA
3047 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
3048 {
3049 lp->stopped = 1;
d90e17a7
PA
3050 iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)),
3051 stop_and_resume_callback, NULL);
02f3fc28
PA
3052 }
3053
3054 if (debug_linux_nat)
3055 fprintf_unfiltered (gdb_stdlog,
3056 "LLW: %s exited.\n",
3057 target_pid_to_str (lp->ptid));
3058
d90e17a7 3059 if (num_lwps (GET_PID (lp->ptid)) > 1)
9db03742
JB
3060 {
3061 /* If there is at least one more LWP, then the exit signal
3062 was not the end of the debugged application and should be
3063 ignored. */
3064 exit_lwp (lp);
3065 return NULL;
3066 }
02f3fc28
PA
3067 }
3068
3069 /* Check if the current LWP has previously exited. In the nptl
3070 thread model, LWPs other than the main thread do not issue
3071 signals when they exit so we must check whenever the thread has
3072 stopped. A similar check is made in stop_wait_callback(). */
d90e17a7 3073 if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
02f3fc28 3074 {
d90e17a7
PA
3075 ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid));
3076
02f3fc28
PA
3077 if (debug_linux_nat)
3078 fprintf_unfiltered (gdb_stdlog,
3079 "LLW: %s exited.\n",
3080 target_pid_to_str (lp->ptid));
3081
3082 exit_lwp (lp);
3083
3084 /* Make sure there is at least one thread running. */
d90e17a7 3085 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
02f3fc28
PA
3086
3087 /* Discard the event. */
3088 return NULL;
3089 }
3090
3091 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3092 an attempt to stop an LWP. */
3093 if (lp->signalled
3094 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3095 {
3096 if (debug_linux_nat)
3097 fprintf_unfiltered (gdb_stdlog,
3098 "LLW: Delayed SIGSTOP caught for %s.\n",
3099 target_pid_to_str (lp->ptid));
3100
3101 /* This is a delayed SIGSTOP. */
3102 lp->signalled = 0;
3103
3104 registers_changed ();
3105
28439f5e 3106 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
02f3fc28
PA
3107 lp->step, TARGET_SIGNAL_0);
3108 if (debug_linux_nat)
3109 fprintf_unfiltered (gdb_stdlog,
3110 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3111 lp->step ?
3112 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3113 target_pid_to_str (lp->ptid));
3114
3115 lp->stopped = 0;
3116 gdb_assert (lp->resumed);
3117
3118 /* Discard the event. */
3119 return NULL;
3120 }
3121
57380f4e
DJ
3122 /* Make sure we don't report a SIGINT that we have already displayed
3123 for another thread. */
3124 if (lp->ignore_sigint
3125 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3126 {
3127 if (debug_linux_nat)
3128 fprintf_unfiltered (gdb_stdlog,
3129 "LLW: Delayed SIGINT caught for %s.\n",
3130 target_pid_to_str (lp->ptid));
3131
3132 /* This is a delayed SIGINT. */
3133 lp->ignore_sigint = 0;
3134
3135 registers_changed ();
28439f5e 3136 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
57380f4e
DJ
3137 lp->step, TARGET_SIGNAL_0);
3138 if (debug_linux_nat)
3139 fprintf_unfiltered (gdb_stdlog,
3140 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3141 lp->step ?
3142 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3143 target_pid_to_str (lp->ptid));
3144
3145 lp->stopped = 0;
3146 gdb_assert (lp->resumed);
3147
3148 /* Discard the event. */
3149 return NULL;
3150 }
3151
02f3fc28
PA
3152 /* An interesting event. */
3153 gdb_assert (lp);
ca2163eb 3154 lp->status = status;
02f3fc28
PA
3155 return lp;
3156}
3157
d6b0e80f 3158static ptid_t
7feb7d06 3159linux_nat_wait_1 (struct target_ops *ops,
47608cb1
PA
3160 ptid_t ptid, struct target_waitstatus *ourstatus,
3161 int target_options)
d6b0e80f 3162{
7feb7d06 3163 static sigset_t prev_mask;
d6b0e80f
AC
3164 struct lwp_info *lp = NULL;
3165 int options = 0;
3166 int status = 0;
d90e17a7 3167 pid_t pid;
d6b0e80f 3168
b84876c2
PA
3169 if (debug_linux_nat_async)
3170 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3171
f973ed9c
DJ
3172 /* The first time we get here after starting a new inferior, we may
3173 not have added it to the LWP list yet - this is the earliest
3174 moment at which we know its PID. */
d90e17a7 3175 if (ptid_is_pid (inferior_ptid))
f973ed9c 3176 {
27c9d204
PA
3177 /* Upgrade the main thread's ptid. */
3178 thread_change_ptid (inferior_ptid,
3179 BUILD_LWP (GET_PID (inferior_ptid),
3180 GET_PID (inferior_ptid)));
3181
f973ed9c
DJ
3182 lp = add_lwp (inferior_ptid);
3183 lp->resumed = 1;
3184 }
3185
7feb7d06
PA
3186 /* Make sure SIGCHLD is blocked. */
3187 block_child_signals (&prev_mask);
d6b0e80f 3188
d90e17a7
PA
3189 if (ptid_equal (ptid, minus_one_ptid))
3190 pid = -1;
3191 else if (ptid_is_pid (ptid))
3192 /* A request to wait for a specific tgid. This is not possible
3193 with waitpid, so instead, we wait for any child, and leave
3194 children we're not interested in right now with a pending
3195 status to report later. */
3196 pid = -1;
3197 else
3198 pid = GET_LWP (ptid);
3199
d6b0e80f 3200retry:
d90e17a7
PA
3201 lp = NULL;
3202 status = 0;
d6b0e80f 3203
f973ed9c 3204 /* Make sure there is at least one LWP that has been resumed. */
d90e17a7 3205 gdb_assert (iterate_over_lwps (ptid, resumed_callback, NULL));
d6b0e80f
AC
3206
3207 /* First check if there is a LWP with a wait status pending. */
3208 if (pid == -1)
3209 {
3210 /* Any LWP that's been resumed will do. */
d90e17a7 3211 lp = iterate_over_lwps (ptid, status_callback, NULL);
d6b0e80f
AC
3212 if (lp)
3213 {
ca2163eb 3214 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3215 fprintf_unfiltered (gdb_stdlog,
3216 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3217 status_to_str (lp->status),
d6b0e80f
AC
3218 target_pid_to_str (lp->ptid));
3219 }
3220
b84876c2 3221 /* But if we don't find one, we'll have to wait, and check both
7feb7d06
PA
3222 cloned and uncloned processes. We start with the cloned
3223 processes. */
d6b0e80f
AC
3224 options = __WCLONE | WNOHANG;
3225 }
3226 else if (is_lwp (ptid))
3227 {
3228 if (debug_linux_nat)
3229 fprintf_unfiltered (gdb_stdlog,
3230 "LLW: Waiting for specific LWP %s.\n",
3231 target_pid_to_str (ptid));
3232
3233 /* We have a specific LWP to check. */
3234 lp = find_lwp_pid (ptid);
3235 gdb_assert (lp);
d6b0e80f 3236
ca2163eb 3237 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3238 fprintf_unfiltered (gdb_stdlog,
3239 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3240 status_to_str (lp->status),
d6b0e80f
AC
3241 target_pid_to_str (lp->ptid));
3242
3243 /* If we have to wait, take into account whether PID is a cloned
3244 process or not. And we have to convert it to something that
3245 the layer beneath us can understand. */
3246 options = lp->cloned ? __WCLONE : 0;
3247 pid = GET_LWP (ptid);
d90e17a7
PA
3248
3249 /* We check for lp->waitstatus in addition to lp->status,
3250 because we can have pending process exits recorded in
3251 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3252 an additional lp->status_p flag. */
ca2163eb 3253 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
d90e17a7 3254 lp = NULL;
d6b0e80f
AC
3255 }
3256
d90e17a7 3257 if (lp && lp->signalled)
d6b0e80f
AC
3258 {
3259 /* A pending SIGSTOP may interfere with the normal stream of
3260 events. In a typical case where interference is a problem,
3261 we have a SIGSTOP signal pending for LWP A while
3262 single-stepping it, encounter an event in LWP B, and take the
3263 pending SIGSTOP while trying to stop LWP A. After processing
3264 the event in LWP B, LWP A is continued, and we'll never see
3265 the SIGTRAP associated with the last time we were
3266 single-stepping LWP A. */
3267
3268 /* Resume the thread. It should halt immediately returning the
3269 pending SIGSTOP. */
3270 registers_changed ();
28439f5e 3271 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
10d6c8cd 3272 lp->step, TARGET_SIGNAL_0);
d6b0e80f
AC
3273 if (debug_linux_nat)
3274 fprintf_unfiltered (gdb_stdlog,
3275 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
3276 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3277 target_pid_to_str (lp->ptid));
3278 lp->stopped = 0;
3279 gdb_assert (lp->resumed);
3280
ca2163eb
PA
3281 /* Catch the pending SIGSTOP. */
3282 status = lp->status;
3283 lp->status = 0;
3284
d6b0e80f 3285 stop_wait_callback (lp, NULL);
ca2163eb
PA
3286
3287 /* If the lp->status field isn't empty, we caught another signal
3288 while flushing the SIGSTOP. Return it back to the event
3289 queue of the LWP, as we already have an event to handle. */
3290 if (lp->status)
3291 {
3292 if (debug_linux_nat)
3293 fprintf_unfiltered (gdb_stdlog,
3294 "LLW: kill %s, %s\n",
3295 target_pid_to_str (lp->ptid),
3296 status_to_str (lp->status));
3297 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
3298 }
3299
3300 lp->status = status;
d6b0e80f
AC
3301 }
3302
b84876c2
PA
3303 if (!target_can_async_p ())
3304 {
3305 /* Causes SIGINT to be passed on to the attached process. */
3306 set_sigint_trap ();
b84876c2 3307 }
d6b0e80f 3308
47608cb1
PA
3309 /* Translate generic target_wait options into waitpid options. */
3310 if (target_options & TARGET_WNOHANG)
3311 options |= WNOHANG;
7feb7d06 3312
d90e17a7 3313 while (lp == NULL)
d6b0e80f
AC
3314 {
3315 pid_t lwpid;
3316
7feb7d06 3317 lwpid = my_waitpid (pid, &status, options);
b84876c2 3318
d6b0e80f
AC
3319 if (lwpid > 0)
3320 {
3321 gdb_assert (pid == -1 || lwpid == pid);
3322
3323 if (debug_linux_nat)
3324 {
3325 fprintf_unfiltered (gdb_stdlog,
3326 "LLW: waitpid %ld received %s\n",
3327 (long) lwpid, status_to_str (status));
3328 }
3329
02f3fc28 3330 lp = linux_nat_filter_event (lwpid, status, options);
d90e17a7
PA
3331
3332 if (lp
3333 && ptid_is_pid (ptid)
3334 && ptid_get_pid (lp->ptid) != ptid_get_pid (ptid))
d6b0e80f 3335 {
d90e17a7
PA
3336 if (debug_linux_nat)
3337 fprintf (stderr, "LWP %ld got an event %06x, leaving pending.\n",
3338 ptid_get_lwp (lp->ptid), status);
3339
ca2163eb 3340 if (WIFSTOPPED (lp->status))
d90e17a7 3341 {
ca2163eb 3342 if (WSTOPSIG (lp->status) != SIGSTOP)
d90e17a7 3343 {
d90e17a7
PA
3344 stop_callback (lp, NULL);
3345
3346 /* Resume in order to collect the sigstop. */
3347 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
3348
3349 stop_wait_callback (lp, NULL);
3350 }
3351 else
3352 {
3353 lp->stopped = 1;
3354 lp->signalled = 0;
3355 }
3356 }
3357 else if (WIFEXITED (status) || WIFSIGNALED (status))
3358 {
3359 if (debug_linux_nat)
3360 fprintf (stderr, "Process %ld exited while stopping LWPs\n",
3361 ptid_get_lwp (lp->ptid));
3362
3363 /* This was the last lwp in the process. Since
3364 events are serialized to GDB core, and we can't
3365 report this one right now, but GDB core and the
3366 other target layers will want to be notified
3367 about the exit code/signal, leave the status
3368 pending for the next time we're able to report
3369 it. */
d90e17a7
PA
3370
3371 /* Prevent trying to stop this thread again. We'll
3372 never try to resume it because it has a pending
3373 status. */
3374 lp->stopped = 1;
3375
3376 /* Dead LWP's aren't expected to reported a pending
3377 sigstop. */
3378 lp->signalled = 0;
3379
3380 /* Store the pending event in the waitstatus as
3381 well, because W_EXITCODE(0,0) == 0. */
ca2163eb 3382 store_waitstatus (&lp->waitstatus, lp->status);
d90e17a7
PA
3383 }
3384
3385 /* Keep looking. */
3386 lp = NULL;
d6b0e80f
AC
3387 continue;
3388 }
3389
d90e17a7
PA
3390 if (lp)
3391 break;
3392 else
3393 {
3394 if (pid == -1)
3395 {
3396 /* waitpid did return something. Restart over. */
3397 options |= __WCLONE;
3398 }
3399 continue;
3400 }
d6b0e80f
AC
3401 }
3402
3403 if (pid == -1)
3404 {
3405 /* Alternate between checking cloned and uncloned processes. */
3406 options ^= __WCLONE;
3407
b84876c2
PA
3408 /* And every time we have checked both:
3409 In async mode, return to event loop;
3410 In sync mode, suspend waiting for a SIGCHLD signal. */
d6b0e80f 3411 if (options & __WCLONE)
b84876c2 3412 {
47608cb1 3413 if (target_options & TARGET_WNOHANG)
b84876c2
PA
3414 {
3415 /* No interesting event. */
3416 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3417
b84876c2
PA
3418 if (debug_linux_nat_async)
3419 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3420
7feb7d06 3421 restore_child_signals_mask (&prev_mask);
b84876c2
PA
3422 return minus_one_ptid;
3423 }
3424
3425 sigsuspend (&suspend_mask);
3426 }
d6b0e80f 3427 }
28736962
PA
3428 else if (target_options & TARGET_WNOHANG)
3429 {
3430 /* No interesting event for PID yet. */
3431 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3432
3433 if (debug_linux_nat_async)
3434 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3435
3436 restore_child_signals_mask (&prev_mask);
3437 return minus_one_ptid;
3438 }
d6b0e80f
AC
3439
3440 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3441 gdb_assert (lp == NULL);
d6b0e80f
AC
3442 }
3443
b84876c2 3444 if (!target_can_async_p ())
d26b5354 3445 clear_sigint_trap ();
d6b0e80f
AC
3446
3447 gdb_assert (lp);
3448
ca2163eb
PA
3449 status = lp->status;
3450 lp->status = 0;
3451
d6b0e80f
AC
3452 /* Don't report signals that GDB isn't interested in, such as
3453 signals that are neither printed nor stopped upon. Stopping all
3454 threads can be a bit time-consuming so if we want decent
3455 performance with heavily multi-threaded programs, especially when
3456 they're using a high frequency timer, we'd better avoid it if we
3457 can. */
3458
3459 if (WIFSTOPPED (status))
3460 {
3461 int signo = target_signal_from_host (WSTOPSIG (status));
d6b48e9c
PA
3462 struct inferior *inf;
3463
3464 inf = find_inferior_pid (ptid_get_pid (lp->ptid));
3465 gdb_assert (inf);
d6b0e80f 3466
d6b48e9c
PA
3467 /* Defer to common code if we get a signal while
3468 single-stepping, since that may need special care, e.g. to
3469 skip the signal handler, or, if we're gaining control of the
3470 inferior. */
d539ed7e 3471 if (!lp->step
d6b48e9c 3472 && inf->stop_soon == NO_STOP_QUIETLY
d539ed7e 3473 && signal_stop_state (signo) == 0
d6b0e80f
AC
3474 && signal_print_state (signo) == 0
3475 && signal_pass_state (signo) == 1)
3476 {
3477 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3478 here? It is not clear we should. GDB may not expect
3479 other threads to run. On the other hand, not resuming
3480 newly attached threads may cause an unwanted delay in
3481 getting them running. */
3482 registers_changed ();
28439f5e 3483 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
10d6c8cd 3484 lp->step, signo);
d6b0e80f
AC
3485 if (debug_linux_nat)
3486 fprintf_unfiltered (gdb_stdlog,
3487 "LLW: %s %s, %s (preempt 'handle')\n",
3488 lp->step ?
3489 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3490 target_pid_to_str (lp->ptid),
3491 signo ? strsignal (signo) : "0");
3492 lp->stopped = 0;
d6b0e80f
AC
3493 goto retry;
3494 }
3495
1ad15515 3496 if (!non_stop)
d6b0e80f 3497 {
1ad15515
PA
3498 /* Only do the below in all-stop, as we currently use SIGINT
3499 to implement target_stop (see linux_nat_stop) in
3500 non-stop. */
3501 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
3502 {
3503 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3504 forwarded to the entire process group, that is, all LWPs
3505 will receive it - unless they're using CLONE_THREAD to
3506 share signals. Since we only want to report it once, we
3507 mark it as ignored for all LWPs except this one. */
d90e17a7
PA
3508 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3509 set_ignore_sigint, NULL);
1ad15515
PA
3510 lp->ignore_sigint = 0;
3511 }
3512 else
3513 maybe_clear_ignore_sigint (lp);
d6b0e80f
AC
3514 }
3515 }
3516
3517 /* This LWP is stopped now. */
3518 lp->stopped = 1;
3519
3520 if (debug_linux_nat)
3521 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3522 status_to_str (status), target_pid_to_str (lp->ptid));
3523
4c28f408
PA
3524 if (!non_stop)
3525 {
3526 /* Now stop all other LWP's ... */
d90e17a7 3527 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
4c28f408
PA
3528
3529 /* ... and wait until all of them have reported back that
3530 they're no longer running. */
d90e17a7 3531 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
4c28f408
PA
3532
3533 /* If we're not waiting for a specific LWP, choose an event LWP
3534 from among those that have had events. Giving equal priority
3535 to all LWPs that have had events helps prevent
3536 starvation. */
3537 if (pid == -1)
d90e17a7 3538 select_event_lwp (ptid, &lp, &status);
4c28f408 3539 }
d6b0e80f
AC
3540
3541 /* Now that we've selected our final event LWP, cancel any
3542 breakpoints in other LWPs that have hit a GDB breakpoint. See
3543 the comment in cancel_breakpoints_callback to find out why. */
d90e17a7 3544 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
d6b0e80f 3545
d6b0e80f
AC
3546 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
3547 {
d6b0e80f
AC
3548 if (debug_linux_nat)
3549 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3550 "LLW: trap ptid is %s.\n",
3551 target_pid_to_str (lp->ptid));
d6b0e80f 3552 }
d6b0e80f
AC
3553
3554 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3555 {
3556 *ourstatus = lp->waitstatus;
3557 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3558 }
3559 else
3560 store_waitstatus (ourstatus, status);
3561
b84876c2
PA
3562 if (debug_linux_nat_async)
3563 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3564
7feb7d06 3565 restore_child_signals_mask (&prev_mask);
f973ed9c 3566 return lp->ptid;
d6b0e80f
AC
3567}
3568
7feb7d06
PA
3569static ptid_t
3570linux_nat_wait (struct target_ops *ops,
47608cb1
PA
3571 ptid_t ptid, struct target_waitstatus *ourstatus,
3572 int target_options)
7feb7d06
PA
3573{
3574 ptid_t event_ptid;
3575
3576 if (debug_linux_nat)
3577 fprintf_unfiltered (gdb_stdlog, "linux_nat_wait: [%s]\n", target_pid_to_str (ptid));
3578
3579 /* Flush the async file first. */
3580 if (target_can_async_p ())
3581 async_file_flush ();
3582
47608cb1 3583 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
7feb7d06
PA
3584
3585 /* If we requested any event, and something came out, assume there
3586 may be more. If we requested a specific lwp or process, also
3587 assume there may be more. */
3588 if (target_can_async_p ()
3589 && (ourstatus->kind != TARGET_WAITKIND_IGNORE
3590 || !ptid_equal (ptid, minus_one_ptid)))
3591 async_file_mark ();
3592
3593 /* Get ready for the next event. */
3594 if (target_can_async_p ())
3595 target_async (inferior_event_handler, 0);
3596
3597 return event_ptid;
3598}
3599
d6b0e80f
AC
3600static int
3601kill_callback (struct lwp_info *lp, void *data)
3602{
3603 errno = 0;
3604 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
3605 if (debug_linux_nat)
3606 fprintf_unfiltered (gdb_stdlog,
3607 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3608 target_pid_to_str (lp->ptid),
3609 errno ? safe_strerror (errno) : "OK");
3610
3611 return 0;
3612}
3613
3614static int
3615kill_wait_callback (struct lwp_info *lp, void *data)
3616{
3617 pid_t pid;
3618
3619 /* We must make sure that there are no pending events (delayed
3620 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3621 program doesn't interfere with any following debugging session. */
3622
3623 /* For cloned processes we must check both with __WCLONE and
3624 without, since the exit status of a cloned process isn't reported
3625 with __WCLONE. */
3626 if (lp->cloned)
3627 {
3628 do
3629 {
58aecb61 3630 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
e85a822c 3631 if (pid != (pid_t) -1)
d6b0e80f 3632 {
e85a822c
DJ
3633 if (debug_linux_nat)
3634 fprintf_unfiltered (gdb_stdlog,
3635 "KWC: wait %s received unknown.\n",
3636 target_pid_to_str (lp->ptid));
3637 /* The Linux kernel sometimes fails to kill a thread
3638 completely after PTRACE_KILL; that goes from the stop
3639 point in do_fork out to the one in
3640 get_signal_to_deliever and waits again. So kill it
3641 again. */
3642 kill_callback (lp, NULL);
d6b0e80f
AC
3643 }
3644 }
3645 while (pid == GET_LWP (lp->ptid));
3646
3647 gdb_assert (pid == -1 && errno == ECHILD);
3648 }
3649
3650 do
3651 {
58aecb61 3652 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
e85a822c 3653 if (pid != (pid_t) -1)
d6b0e80f 3654 {
e85a822c
DJ
3655 if (debug_linux_nat)
3656 fprintf_unfiltered (gdb_stdlog,
3657 "KWC: wait %s received unk.\n",
3658 target_pid_to_str (lp->ptid));
3659 /* See the call to kill_callback above. */
3660 kill_callback (lp, NULL);
d6b0e80f
AC
3661 }
3662 }
3663 while (pid == GET_LWP (lp->ptid));
3664
3665 gdb_assert (pid == -1 && errno == ECHILD);
3666 return 0;
3667}
3668
3669static void
7d85a9c0 3670linux_nat_kill (struct target_ops *ops)
d6b0e80f 3671{
f973ed9c
DJ
3672 struct target_waitstatus last;
3673 ptid_t last_ptid;
3674 int status;
d6b0e80f 3675
f973ed9c
DJ
3676 /* If we're stopped while forking and we haven't followed yet,
3677 kill the other task. We need to do this first because the
3678 parent will be sleeping if this is a vfork. */
d6b0e80f 3679
f973ed9c 3680 get_last_target_status (&last_ptid, &last);
d6b0e80f 3681
f973ed9c
DJ
3682 if (last.kind == TARGET_WAITKIND_FORKED
3683 || last.kind == TARGET_WAITKIND_VFORKED)
3684 {
3a3e9ee3 3685 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
f973ed9c
DJ
3686 wait (&status);
3687 }
3688
3689 if (forks_exist_p ())
7feb7d06 3690 linux_fork_killall ();
f973ed9c
DJ
3691 else
3692 {
d90e17a7 3693 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
4c28f408
PA
3694 /* Stop all threads before killing them, since ptrace requires
3695 that the thread is stopped to sucessfully PTRACE_KILL. */
d90e17a7 3696 iterate_over_lwps (ptid, stop_callback, NULL);
4c28f408
PA
3697 /* ... and wait until all of them have reported back that
3698 they're no longer running. */
d90e17a7 3699 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4c28f408 3700
f973ed9c 3701 /* Kill all LWP's ... */
d90e17a7 3702 iterate_over_lwps (ptid, kill_callback, NULL);
f973ed9c
DJ
3703
3704 /* ... and wait until we've flushed all events. */
d90e17a7 3705 iterate_over_lwps (ptid, kill_wait_callback, NULL);
f973ed9c
DJ
3706 }
3707
3708 target_mourn_inferior ();
d6b0e80f
AC
3709}
3710
3711static void
136d6dae 3712linux_nat_mourn_inferior (struct target_ops *ops)
d6b0e80f 3713{
d90e17a7 3714 purge_lwp_list (ptid_get_pid (inferior_ptid));
d6b0e80f 3715
f973ed9c 3716 if (! forks_exist_p ())
d90e17a7
PA
3717 /* Normal case, no other forks available. */
3718 linux_ops->to_mourn_inferior (ops);
f973ed9c
DJ
3719 else
3720 /* Multi-fork case. The current inferior_ptid has exited, but
3721 there are other viable forks to debug. Delete the exiting
3722 one and context-switch to the first available. */
3723 linux_fork_mourn_inferior ();
d6b0e80f
AC
3724}
3725
5b009018
PA
3726/* Convert a native/host siginfo object, into/from the siginfo in the
3727 layout of the inferiors' architecture. */
3728
3729static void
3730siginfo_fixup (struct siginfo *siginfo, gdb_byte *inf_siginfo, int direction)
3731{
3732 int done = 0;
3733
3734 if (linux_nat_siginfo_fixup != NULL)
3735 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3736
3737 /* If there was no callback, or the callback didn't do anything,
3738 then just do a straight memcpy. */
3739 if (!done)
3740 {
3741 if (direction == 1)
3742 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
3743 else
3744 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
3745 }
3746}
3747
4aa995e1
PA
3748static LONGEST
3749linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
3750 const char *annex, gdb_byte *readbuf,
3751 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3752{
4aa995e1
PA
3753 int pid;
3754 struct siginfo siginfo;
5b009018 3755 gdb_byte inf_siginfo[sizeof (struct siginfo)];
4aa995e1
PA
3756
3757 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3758 gdb_assert (readbuf || writebuf);
3759
3760 pid = GET_LWP (inferior_ptid);
3761 if (pid == 0)
3762 pid = GET_PID (inferior_ptid);
3763
3764 if (offset > sizeof (siginfo))
3765 return -1;
3766
3767 errno = 0;
3768 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3769 if (errno != 0)
3770 return -1;
3771
5b009018
PA
3772 /* When GDB is built as a 64-bit application, ptrace writes into
3773 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3774 inferior with a 64-bit GDB should look the same as debugging it
3775 with a 32-bit GDB, we need to convert it. GDB core always sees
3776 the converted layout, so any read/write will have to be done
3777 post-conversion. */
3778 siginfo_fixup (&siginfo, inf_siginfo, 0);
3779
4aa995e1
PA
3780 if (offset + len > sizeof (siginfo))
3781 len = sizeof (siginfo) - offset;
3782
3783 if (readbuf != NULL)
5b009018 3784 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
3785 else
3786 {
5b009018
PA
3787 memcpy (inf_siginfo + offset, writebuf, len);
3788
3789 /* Convert back to ptrace layout before flushing it out. */
3790 siginfo_fixup (&siginfo, inf_siginfo, 1);
3791
4aa995e1
PA
3792 errno = 0;
3793 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3794 if (errno != 0)
3795 return -1;
3796 }
3797
3798 return len;
3799}
3800
10d6c8cd
DJ
3801static LONGEST
3802linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3803 const char *annex, gdb_byte *readbuf,
3804 const gdb_byte *writebuf,
3805 ULONGEST offset, LONGEST len)
d6b0e80f 3806{
4aa995e1 3807 struct cleanup *old_chain;
10d6c8cd 3808 LONGEST xfer;
d6b0e80f 3809
4aa995e1
PA
3810 if (object == TARGET_OBJECT_SIGNAL_INFO)
3811 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
3812 offset, len);
3813
c35b1492
PA
3814 /* The target is connected but no live inferior is selected. Pass
3815 this request down to a lower stratum (e.g., the executable
3816 file). */
3817 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
3818 return 0;
3819
4aa995e1
PA
3820 old_chain = save_inferior_ptid ();
3821
d6b0e80f
AC
3822 if (is_lwp (inferior_ptid))
3823 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
3824
10d6c8cd
DJ
3825 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
3826 offset, len);
d6b0e80f
AC
3827
3828 do_cleanups (old_chain);
3829 return xfer;
3830}
3831
3832static int
28439f5e 3833linux_thread_alive (ptid_t ptid)
d6b0e80f 3834{
4c28f408
PA
3835 int err;
3836
d6b0e80f
AC
3837 gdb_assert (is_lwp (ptid));
3838
4c28f408
PA
3839 /* Send signal 0 instead of anything ptrace, because ptracing a
3840 running thread errors out claiming that the thread doesn't
3841 exist. */
3842 err = kill_lwp (GET_LWP (ptid), 0);
3843
d6b0e80f
AC
3844 if (debug_linux_nat)
3845 fprintf_unfiltered (gdb_stdlog,
4c28f408 3846 "LLTA: KILL(SIG0) %s (%s)\n",
d6b0e80f 3847 target_pid_to_str (ptid),
4c28f408 3848 err ? safe_strerror (err) : "OK");
9c0dd46b 3849
4c28f408 3850 if (err != 0)
d6b0e80f
AC
3851 return 0;
3852
3853 return 1;
3854}
3855
28439f5e
PA
3856static int
3857linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
3858{
3859 return linux_thread_alive (ptid);
3860}
3861
d6b0e80f 3862static char *
117de6a9 3863linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
d6b0e80f
AC
3864{
3865 static char buf[64];
3866
a0ef4274 3867 if (is_lwp (ptid)
d90e17a7
PA
3868 && (GET_PID (ptid) != GET_LWP (ptid)
3869 || num_lwps (GET_PID (ptid)) > 1))
d6b0e80f
AC
3870 {
3871 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
3872 return buf;
3873 }
3874
3875 return normal_pid_to_str (ptid);
3876}
3877
dba24537
AC
3878/* Accepts an integer PID; Returns a string representing a file that
3879 can be opened to get the symbols for the child process. */
3880
6d8fd2b7
UW
3881static char *
3882linux_child_pid_to_exec_file (int pid)
dba24537
AC
3883{
3884 char *name1, *name2;
3885
3886 name1 = xmalloc (MAXPATHLEN);
3887 name2 = xmalloc (MAXPATHLEN);
3888 make_cleanup (xfree, name1);
3889 make_cleanup (xfree, name2);
3890 memset (name2, 0, MAXPATHLEN);
3891
3892 sprintf (name1, "/proc/%d/exe", pid);
3893 if (readlink (name1, name2, MAXPATHLEN) > 0)
3894 return name2;
3895 else
3896 return name1;
3897}
3898
3899/* Service function for corefiles and info proc. */
3900
3901static int
3902read_mapping (FILE *mapfile,
3903 long long *addr,
3904 long long *endaddr,
3905 char *permissions,
3906 long long *offset,
3907 char *device, long long *inode, char *filename)
3908{
3909 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
3910 addr, endaddr, permissions, offset, device, inode);
3911
2e14c2ea
MS
3912 filename[0] = '\0';
3913 if (ret > 0 && ret != EOF)
dba24537
AC
3914 {
3915 /* Eat everything up to EOL for the filename. This will prevent
3916 weird filenames (such as one with embedded whitespace) from
3917 confusing this code. It also makes this code more robust in
3918 respect to annotations the kernel may add after the filename.
3919
3920 Note the filename is used for informational purposes
3921 only. */
3922 ret += fscanf (mapfile, "%[^\n]\n", filename);
3923 }
2e14c2ea 3924
dba24537
AC
3925 return (ret != 0 && ret != EOF);
3926}
3927
3928/* Fills the "to_find_memory_regions" target vector. Lists the memory
3929 regions in the inferior for a corefile. */
3930
3931static int
3932linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
3933 unsigned long,
3934 int, int, int, void *), void *obfd)
3935{
89ecc4f5 3936 int pid = PIDGET (inferior_ptid);
dba24537
AC
3937 char mapsfilename[MAXPATHLEN];
3938 FILE *mapsfile;
3939 long long addr, endaddr, size, offset, inode;
3940 char permissions[8], device[8], filename[MAXPATHLEN];
3941 int read, write, exec;
3942 int ret;
7c8a8b04 3943 struct cleanup *cleanup;
dba24537
AC
3944
3945 /* Compose the filename for the /proc memory map, and open it. */
89ecc4f5 3946 sprintf (mapsfilename, "/proc/%d/maps", pid);
dba24537 3947 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
8a3fe4f8 3948 error (_("Could not open %s."), mapsfilename);
7c8a8b04 3949 cleanup = make_cleanup_fclose (mapsfile);
dba24537
AC
3950
3951 if (info_verbose)
3952 fprintf_filtered (gdb_stdout,
3953 "Reading memory regions from %s\n", mapsfilename);
3954
3955 /* Now iterate until end-of-file. */
3956 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
3957 &offset, &device[0], &inode, &filename[0]))
3958 {
3959 size = endaddr - addr;
3960
3961 /* Get the segment's permissions. */
3962 read = (strchr (permissions, 'r') != 0);
3963 write = (strchr (permissions, 'w') != 0);
3964 exec = (strchr (permissions, 'x') != 0);
3965
3966 if (info_verbose)
3967 {
3968 fprintf_filtered (gdb_stdout,
5af949e3
UW
3969 "Save segment, %lld bytes at %s (%c%c%c)",
3970 size, paddress (target_gdbarch, addr),
dba24537
AC
3971 read ? 'r' : ' ',
3972 write ? 'w' : ' ', exec ? 'x' : ' ');
b260b6c1 3973 if (filename[0])
dba24537
AC
3974 fprintf_filtered (gdb_stdout, " for %s", filename);
3975 fprintf_filtered (gdb_stdout, "\n");
3976 }
3977
3978 /* Invoke the callback function to create the corefile
3979 segment. */
3980 func (addr, size, read, write, exec, obfd);
3981 }
7c8a8b04 3982 do_cleanups (cleanup);
dba24537
AC
3983 return 0;
3984}
3985
2020b7ab
PA
3986static int
3987find_signalled_thread (struct thread_info *info, void *data)
3988{
3989 if (info->stop_signal != TARGET_SIGNAL_0
3990 && ptid_get_pid (info->ptid) == ptid_get_pid (inferior_ptid))
3991 return 1;
3992
3993 return 0;
3994}
3995
3996static enum target_signal
3997find_stop_signal (void)
3998{
3999 struct thread_info *info =
4000 iterate_over_threads (find_signalled_thread, NULL);
4001
4002 if (info)
4003 return info->stop_signal;
4004 else
4005 return TARGET_SIGNAL_0;
4006}
4007
dba24537
AC
4008/* Records the thread's register state for the corefile note
4009 section. */
4010
4011static char *
4012linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
2020b7ab
PA
4013 char *note_data, int *note_size,
4014 enum target_signal stop_signal)
dba24537
AC
4015{
4016 gdb_gregset_t gregs;
4017 gdb_fpregset_t fpregs;
dba24537 4018 unsigned long lwp = ptid_get_lwp (ptid);
c2250ad1
UW
4019 struct gdbarch *gdbarch = target_gdbarch;
4020 struct regcache *regcache = get_thread_arch_regcache (ptid, gdbarch);
4f844a66 4021 const struct regset *regset;
55e969c1 4022 int core_regset_p;
594f7785 4023 struct cleanup *old_chain;
17ea7499
CES
4024 struct core_regset_section *sect_list;
4025 char *gdb_regset;
594f7785
UW
4026
4027 old_chain = save_inferior_ptid ();
4028 inferior_ptid = ptid;
4029 target_fetch_registers (regcache, -1);
4030 do_cleanups (old_chain);
4f844a66
DM
4031
4032 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
17ea7499
CES
4033 sect_list = gdbarch_core_regset_sections (gdbarch);
4034
55e969c1
DM
4035 if (core_regset_p
4036 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
4037 sizeof (gregs))) != NULL
4038 && regset->collect_regset != NULL)
594f7785 4039 regset->collect_regset (regset, regcache, -1,
55e969c1 4040 &gregs, sizeof (gregs));
4f844a66 4041 else
594f7785 4042 fill_gregset (regcache, &gregs, -1);
4f844a66 4043
55e969c1
DM
4044 note_data = (char *) elfcore_write_prstatus (obfd,
4045 note_data,
4046 note_size,
4047 lwp,
4048 stop_signal, &gregs);
4049
17ea7499
CES
4050 /* The loop below uses the new struct core_regset_section, which stores
4051 the supported section names and sizes for the core file. Note that
4052 note PRSTATUS needs to be treated specially. But the other notes are
4053 structurally the same, so they can benefit from the new struct. */
4054 if (core_regset_p && sect_list != NULL)
4055 while (sect_list->sect_name != NULL)
4056 {
4057 /* .reg was already handled above. */
4058 if (strcmp (sect_list->sect_name, ".reg") == 0)
4059 {
4060 sect_list++;
4061 continue;
4062 }
4063 regset = gdbarch_regset_from_core_section (gdbarch,
4064 sect_list->sect_name,
4065 sect_list->size);
4066 gdb_assert (regset && regset->collect_regset);
4067 gdb_regset = xmalloc (sect_list->size);
4068 regset->collect_regset (regset, regcache, -1,
4069 gdb_regset, sect_list->size);
4070 note_data = (char *) elfcore_write_register_note (obfd,
4071 note_data,
4072 note_size,
4073 sect_list->sect_name,
4074 gdb_regset,
4075 sect_list->size);
4076 xfree (gdb_regset);
4077 sect_list++;
4078 }
dba24537 4079
17ea7499
CES
4080 /* For architectures that does not have the struct core_regset_section
4081 implemented, we use the old method. When all the architectures have
4082 the new support, the code below should be deleted. */
4f844a66 4083 else
17ea7499
CES
4084 {
4085 if (core_regset_p
4086 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
4087 sizeof (fpregs))) != NULL
4088 && regset->collect_regset != NULL)
4089 regset->collect_regset (regset, regcache, -1,
4090 &fpregs, sizeof (fpregs));
4091 else
4092 fill_fpregset (regcache, &fpregs, -1);
4093
4094 note_data = (char *) elfcore_write_prfpreg (obfd,
4095 note_data,
4096 note_size,
4097 &fpregs, sizeof (fpregs));
4098 }
4f844a66 4099
dba24537
AC
4100 return note_data;
4101}
4102
4103struct linux_nat_corefile_thread_data
4104{
4105 bfd *obfd;
4106 char *note_data;
4107 int *note_size;
4108 int num_notes;
2020b7ab 4109 enum target_signal stop_signal;
dba24537
AC
4110};
4111
4112/* Called by gdbthread.c once per thread. Records the thread's
4113 register state for the corefile note section. */
4114
4115static int
4116linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
4117{
4118 struct linux_nat_corefile_thread_data *args = data;
dba24537 4119
dba24537
AC
4120 args->note_data = linux_nat_do_thread_registers (args->obfd,
4121 ti->ptid,
4122 args->note_data,
2020b7ab
PA
4123 args->note_size,
4124 args->stop_signal);
dba24537 4125 args->num_notes++;
56be3814 4126
dba24537
AC
4127 return 0;
4128}
4129
efcbbd14
UW
4130/* Enumerate spufs IDs for process PID. */
4131
4132static void
4133iterate_over_spus (int pid, void (*callback) (void *, int), void *data)
4134{
4135 char path[128];
4136 DIR *dir;
4137 struct dirent *entry;
4138
4139 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4140 dir = opendir (path);
4141 if (!dir)
4142 return;
4143
4144 rewinddir (dir);
4145 while ((entry = readdir (dir)) != NULL)
4146 {
4147 struct stat st;
4148 struct statfs stfs;
4149 int fd;
4150
4151 fd = atoi (entry->d_name);
4152 if (!fd)
4153 continue;
4154
4155 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4156 if (stat (path, &st) != 0)
4157 continue;
4158 if (!S_ISDIR (st.st_mode))
4159 continue;
4160
4161 if (statfs (path, &stfs) != 0)
4162 continue;
4163 if (stfs.f_type != SPUFS_MAGIC)
4164 continue;
4165
4166 callback (data, fd);
4167 }
4168
4169 closedir (dir);
4170}
4171
4172/* Generate corefile notes for SPU contexts. */
4173
4174struct linux_spu_corefile_data
4175{
4176 bfd *obfd;
4177 char *note_data;
4178 int *note_size;
4179};
4180
4181static void
4182linux_spu_corefile_callback (void *data, int fd)
4183{
4184 struct linux_spu_corefile_data *args = data;
4185 int i;
4186
4187 static const char *spu_files[] =
4188 {
4189 "object-id",
4190 "mem",
4191 "regs",
4192 "fpcr",
4193 "lslr",
4194 "decr",
4195 "decr_status",
4196 "signal1",
4197 "signal1_type",
4198 "signal2",
4199 "signal2_type",
4200 "event_mask",
4201 "event_status",
4202 "mbox_info",
4203 "ibox_info",
4204 "wbox_info",
4205 "dma_info",
4206 "proxydma_info",
4207 };
4208
4209 for (i = 0; i < sizeof (spu_files) / sizeof (spu_files[0]); i++)
4210 {
4211 char annex[32], note_name[32];
4212 gdb_byte *spu_data;
4213 LONGEST spu_len;
4214
4215 xsnprintf (annex, sizeof annex, "%d/%s", fd, spu_files[i]);
4216 spu_len = target_read_alloc (&current_target, TARGET_OBJECT_SPU,
4217 annex, &spu_data);
4218 if (spu_len > 0)
4219 {
4220 xsnprintf (note_name, sizeof note_name, "SPU/%s", annex);
4221 args->note_data = elfcore_write_note (args->obfd, args->note_data,
4222 args->note_size, note_name,
4223 NT_SPU, spu_data, spu_len);
4224 xfree (spu_data);
4225 }
4226 }
4227}
4228
4229static char *
4230linux_spu_make_corefile_notes (bfd *obfd, char *note_data, int *note_size)
4231{
4232 struct linux_spu_corefile_data args;
4233 args.obfd = obfd;
4234 args.note_data = note_data;
4235 args.note_size = note_size;
4236
4237 iterate_over_spus (PIDGET (inferior_ptid),
4238 linux_spu_corefile_callback, &args);
4239
4240 return args.note_data;
4241}
4242
dba24537
AC
4243/* Fills the "to_make_corefile_note" target vector. Builds the note
4244 section for a corefile, and returns it in a malloc buffer. */
4245
4246static char *
4247linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
4248{
4249 struct linux_nat_corefile_thread_data thread_args;
4250 struct cleanup *old_chain;
d99148ef 4251 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
dba24537 4252 char fname[16] = { '\0' };
d99148ef 4253 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
dba24537
AC
4254 char psargs[80] = { '\0' };
4255 char *note_data = NULL;
4256 ptid_t current_ptid = inferior_ptid;
d90e17a7 4257 ptid_t filter = pid_to_ptid (ptid_get_pid (inferior_ptid));
c6826062 4258 gdb_byte *auxv;
dba24537
AC
4259 int auxv_len;
4260
4261 if (get_exec_file (0))
4262 {
4263 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
4264 strncpy (psargs, get_exec_file (0), sizeof (psargs));
4265 if (get_inferior_args ())
4266 {
d99148ef
JK
4267 char *string_end;
4268 char *psargs_end = psargs + sizeof (psargs);
4269
4270 /* linux_elfcore_write_prpsinfo () handles zero unterminated
4271 strings fine. */
4272 string_end = memchr (psargs, 0, sizeof (psargs));
4273 if (string_end != NULL)
4274 {
4275 *string_end++ = ' ';
4276 strncpy (string_end, get_inferior_args (),
4277 psargs_end - string_end);
4278 }
dba24537
AC
4279 }
4280 note_data = (char *) elfcore_write_prpsinfo (obfd,
4281 note_data,
4282 note_size, fname, psargs);
4283 }
4284
4285 /* Dump information for threads. */
4286 thread_args.obfd = obfd;
4287 thread_args.note_data = note_data;
4288 thread_args.note_size = note_size;
4289 thread_args.num_notes = 0;
2020b7ab 4290 thread_args.stop_signal = find_stop_signal ();
d90e17a7 4291 iterate_over_lwps (filter, linux_nat_corefile_thread_callback, &thread_args);
2020b7ab
PA
4292 gdb_assert (thread_args.num_notes != 0);
4293 note_data = thread_args.note_data;
dba24537 4294
13547ab6
DJ
4295 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
4296 NULL, &auxv);
dba24537
AC
4297 if (auxv_len > 0)
4298 {
4299 note_data = elfcore_write_note (obfd, note_data, note_size,
4300 "CORE", NT_AUXV, auxv, auxv_len);
4301 xfree (auxv);
4302 }
4303
efcbbd14
UW
4304 note_data = linux_spu_make_corefile_notes (obfd, note_data, note_size);
4305
dba24537
AC
4306 make_cleanup (xfree, note_data);
4307 return note_data;
4308}
4309
4310/* Implement the "info proc" command. */
4311
4312static void
4313linux_nat_info_proc_cmd (char *args, int from_tty)
4314{
89ecc4f5
DE
4315 /* A long is used for pid instead of an int to avoid a loss of precision
4316 compiler warning from the output of strtoul. */
4317 long pid = PIDGET (inferior_ptid);
dba24537
AC
4318 FILE *procfile;
4319 char **argv = NULL;
4320 char buffer[MAXPATHLEN];
4321 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
4322 int cmdline_f = 1;
4323 int cwd_f = 1;
4324 int exe_f = 1;
4325 int mappings_f = 0;
4326 int environ_f = 0;
4327 int status_f = 0;
4328 int stat_f = 0;
4329 int all = 0;
4330 struct stat dummy;
4331
4332 if (args)
4333 {
4334 /* Break up 'args' into an argv array. */
d1a41061
PP
4335 argv = gdb_buildargv (args);
4336 make_cleanup_freeargv (argv);
dba24537
AC
4337 }
4338 while (argv != NULL && *argv != NULL)
4339 {
4340 if (isdigit (argv[0][0]))
4341 {
4342 pid = strtoul (argv[0], NULL, 10);
4343 }
4344 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
4345 {
4346 mappings_f = 1;
4347 }
4348 else if (strcmp (argv[0], "status") == 0)
4349 {
4350 status_f = 1;
4351 }
4352 else if (strcmp (argv[0], "stat") == 0)
4353 {
4354 stat_f = 1;
4355 }
4356 else if (strcmp (argv[0], "cmd") == 0)
4357 {
4358 cmdline_f = 1;
4359 }
4360 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
4361 {
4362 exe_f = 1;
4363 }
4364 else if (strcmp (argv[0], "cwd") == 0)
4365 {
4366 cwd_f = 1;
4367 }
4368 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
4369 {
4370 all = 1;
4371 }
4372 else
4373 {
4374 /* [...] (future options here) */
4375 }
4376 argv++;
4377 }
4378 if (pid == 0)
8a3fe4f8 4379 error (_("No current process: you must name one."));
dba24537 4380
89ecc4f5 4381 sprintf (fname1, "/proc/%ld", pid);
dba24537 4382 if (stat (fname1, &dummy) != 0)
8a3fe4f8 4383 error (_("No /proc directory: '%s'"), fname1);
dba24537 4384
89ecc4f5 4385 printf_filtered (_("process %ld\n"), pid);
dba24537
AC
4386 if (cmdline_f || all)
4387 {
89ecc4f5 4388 sprintf (fname1, "/proc/%ld/cmdline", pid);
d5d6fca5 4389 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537 4390 {
7c8a8b04 4391 struct cleanup *cleanup = make_cleanup_fclose (procfile);
bf1d7d9c
JB
4392 if (fgets (buffer, sizeof (buffer), procfile))
4393 printf_filtered ("cmdline = '%s'\n", buffer);
4394 else
4395 warning (_("unable to read '%s'"), fname1);
7c8a8b04 4396 do_cleanups (cleanup);
dba24537
AC
4397 }
4398 else
8a3fe4f8 4399 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4400 }
4401 if (cwd_f || all)
4402 {
89ecc4f5 4403 sprintf (fname1, "/proc/%ld/cwd", pid);
dba24537
AC
4404 memset (fname2, 0, sizeof (fname2));
4405 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
4406 printf_filtered ("cwd = '%s'\n", fname2);
4407 else
8a3fe4f8 4408 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
4409 }
4410 if (exe_f || all)
4411 {
89ecc4f5 4412 sprintf (fname1, "/proc/%ld/exe", pid);
dba24537
AC
4413 memset (fname2, 0, sizeof (fname2));
4414 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
4415 printf_filtered ("exe = '%s'\n", fname2);
4416 else
8a3fe4f8 4417 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
4418 }
4419 if (mappings_f || all)
4420 {
89ecc4f5 4421 sprintf (fname1, "/proc/%ld/maps", pid);
d5d6fca5 4422 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
4423 {
4424 long long addr, endaddr, size, offset, inode;
4425 char permissions[8], device[8], filename[MAXPATHLEN];
7c8a8b04 4426 struct cleanup *cleanup;
dba24537 4427
7c8a8b04 4428 cleanup = make_cleanup_fclose (procfile);
a3f17187 4429 printf_filtered (_("Mapped address spaces:\n\n"));
a97b0ac8 4430 if (gdbarch_addr_bit (target_gdbarch) == 32)
dba24537
AC
4431 {
4432 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
4433 "Start Addr",
4434 " End Addr",
4435 " Size", " Offset", "objfile");
4436 }
4437 else
4438 {
4439 printf_filtered (" %18s %18s %10s %10s %7s\n",
4440 "Start Addr",
4441 " End Addr",
4442 " Size", " Offset", "objfile");
4443 }
4444
4445 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
4446 &offset, &device[0], &inode, &filename[0]))
4447 {
4448 size = endaddr - addr;
4449
4450 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
4451 calls here (and possibly above) should be abstracted
4452 out into their own functions? Andrew suggests using
4453 a generic local_address_string instead to print out
4454 the addresses; that makes sense to me, too. */
4455
a97b0ac8 4456 if (gdbarch_addr_bit (target_gdbarch) == 32)
dba24537
AC
4457 {
4458 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
4459 (unsigned long) addr, /* FIXME: pr_addr */
4460 (unsigned long) endaddr,
4461 (int) size,
4462 (unsigned int) offset,
4463 filename[0] ? filename : "");
4464 }
4465 else
4466 {
4467 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
4468 (unsigned long) addr, /* FIXME: pr_addr */
4469 (unsigned long) endaddr,
4470 (int) size,
4471 (unsigned int) offset,
4472 filename[0] ? filename : "");
4473 }
4474 }
4475
7c8a8b04 4476 do_cleanups (cleanup);
dba24537
AC
4477 }
4478 else
8a3fe4f8 4479 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4480 }
4481 if (status_f || all)
4482 {
89ecc4f5 4483 sprintf (fname1, "/proc/%ld/status", pid);
d5d6fca5 4484 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537 4485 {
7c8a8b04 4486 struct cleanup *cleanup = make_cleanup_fclose (procfile);
dba24537
AC
4487 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
4488 puts_filtered (buffer);
7c8a8b04 4489 do_cleanups (cleanup);
dba24537
AC
4490 }
4491 else
8a3fe4f8 4492 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4493 }
4494 if (stat_f || all)
4495 {
89ecc4f5 4496 sprintf (fname1, "/proc/%ld/stat", pid);
d5d6fca5 4497 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
4498 {
4499 int itmp;
4500 char ctmp;
a25694b4 4501 long ltmp;
7c8a8b04 4502 struct cleanup *cleanup = make_cleanup_fclose (procfile);
dba24537
AC
4503
4504 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4505 printf_filtered (_("Process: %d\n"), itmp);
a25694b4 4506 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
a3f17187 4507 printf_filtered (_("Exec file: %s\n"), buffer);
dba24537 4508 if (fscanf (procfile, "%c ", &ctmp) > 0)
a3f17187 4509 printf_filtered (_("State: %c\n"), ctmp);
dba24537 4510 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4511 printf_filtered (_("Parent process: %d\n"), itmp);
dba24537 4512 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4513 printf_filtered (_("Process group: %d\n"), itmp);
dba24537 4514 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4515 printf_filtered (_("Session id: %d\n"), itmp);
dba24537 4516 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4517 printf_filtered (_("TTY: %d\n"), itmp);
dba24537 4518 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4519 printf_filtered (_("TTY owner process group: %d\n"), itmp);
a25694b4
AS
4520 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4521 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
4522 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4523 printf_filtered (_("Minor faults (no memory page): %lu\n"),
4524 (unsigned long) ltmp);
4525 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4526 printf_filtered (_("Minor faults, children: %lu\n"),
4527 (unsigned long) ltmp);
4528 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4529 printf_filtered (_("Major faults (memory page faults): %lu\n"),
4530 (unsigned long) ltmp);
4531 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4532 printf_filtered (_("Major faults, children: %lu\n"),
4533 (unsigned long) ltmp);
4534 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4535 printf_filtered (_("utime: %ld\n"), ltmp);
4536 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4537 printf_filtered (_("stime: %ld\n"), ltmp);
4538 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4539 printf_filtered (_("utime, children: %ld\n"), ltmp);
4540 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4541 printf_filtered (_("stime, children: %ld\n"), ltmp);
4542 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4543 printf_filtered (_("jiffies remaining in current time slice: %ld\n"),
4544 ltmp);
4545 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4546 printf_filtered (_("'nice' value: %ld\n"), ltmp);
4547 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4548 printf_filtered (_("jiffies until next timeout: %lu\n"),
4549 (unsigned long) ltmp);
4550 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4551 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
4552 (unsigned long) ltmp);
4553 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4554 printf_filtered (_("start time (jiffies since system boot): %ld\n"),
4555 ltmp);
4556 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4557 printf_filtered (_("Virtual memory size: %lu\n"),
4558 (unsigned long) ltmp);
4559 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4560 printf_filtered (_("Resident set size: %lu\n"), (unsigned long) ltmp);
4561 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4562 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
4563 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4564 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
4565 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4566 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
4567 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4568 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
dba24537
AC
4569#if 0 /* Don't know how architecture-dependent the rest is...
4570 Anyway the signal bitmap info is available from "status". */
a25694b4
AS
4571 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
4572 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
4573 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
4574 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
4575 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4576 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
4577 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4578 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
4579 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4580 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
4581 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4582 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
4583 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
4584 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
dba24537 4585#endif
7c8a8b04 4586 do_cleanups (cleanup);
dba24537
AC
4587 }
4588 else
8a3fe4f8 4589 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4590 }
4591}
4592
10d6c8cd
DJ
4593/* Implement the to_xfer_partial interface for memory reads using the /proc
4594 filesystem. Because we can use a single read() call for /proc, this
4595 can be much more efficient than banging away at PTRACE_PEEKTEXT,
4596 but it doesn't support writes. */
4597
4598static LONGEST
4599linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
4600 const char *annex, gdb_byte *readbuf,
4601 const gdb_byte *writebuf,
4602 ULONGEST offset, LONGEST len)
dba24537 4603{
10d6c8cd
DJ
4604 LONGEST ret;
4605 int fd;
dba24537
AC
4606 char filename[64];
4607
10d6c8cd 4608 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
4609 return 0;
4610
4611 /* Don't bother for one word. */
4612 if (len < 3 * sizeof (long))
4613 return 0;
4614
4615 /* We could keep this file open and cache it - possibly one per
4616 thread. That requires some juggling, but is even faster. */
4617 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
4618 fd = open (filename, O_RDONLY | O_LARGEFILE);
4619 if (fd == -1)
4620 return 0;
4621
4622 /* If pread64 is available, use it. It's faster if the kernel
4623 supports it (only one syscall), and it's 64-bit safe even on
4624 32-bit platforms (for instance, SPARC debugging a SPARC64
4625 application). */
4626#ifdef HAVE_PREAD64
10d6c8cd 4627 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 4628#else
10d6c8cd 4629 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
4630#endif
4631 ret = 0;
4632 else
4633 ret = len;
4634
4635 close (fd);
4636 return ret;
4637}
4638
efcbbd14
UW
4639
4640/* Enumerate spufs IDs for process PID. */
4641static LONGEST
4642spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len)
4643{
4644 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
4645 LONGEST pos = 0;
4646 LONGEST written = 0;
4647 char path[128];
4648 DIR *dir;
4649 struct dirent *entry;
4650
4651 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4652 dir = opendir (path);
4653 if (!dir)
4654 return -1;
4655
4656 rewinddir (dir);
4657 while ((entry = readdir (dir)) != NULL)
4658 {
4659 struct stat st;
4660 struct statfs stfs;
4661 int fd;
4662
4663 fd = atoi (entry->d_name);
4664 if (!fd)
4665 continue;
4666
4667 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4668 if (stat (path, &st) != 0)
4669 continue;
4670 if (!S_ISDIR (st.st_mode))
4671 continue;
4672
4673 if (statfs (path, &stfs) != 0)
4674 continue;
4675 if (stfs.f_type != SPUFS_MAGIC)
4676 continue;
4677
4678 if (pos >= offset && pos + 4 <= offset + len)
4679 {
4680 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4681 written += 4;
4682 }
4683 pos += 4;
4684 }
4685
4686 closedir (dir);
4687 return written;
4688}
4689
4690/* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4691 object type, using the /proc file system. */
4692static LONGEST
4693linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4694 const char *annex, gdb_byte *readbuf,
4695 const gdb_byte *writebuf,
4696 ULONGEST offset, LONGEST len)
4697{
4698 char buf[128];
4699 int fd = 0;
4700 int ret = -1;
4701 int pid = PIDGET (inferior_ptid);
4702
4703 if (!annex)
4704 {
4705 if (!readbuf)
4706 return -1;
4707 else
4708 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4709 }
4710
4711 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4712 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4713 if (fd <= 0)
4714 return -1;
4715
4716 if (offset != 0
4717 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4718 {
4719 close (fd);
4720 return 0;
4721 }
4722
4723 if (writebuf)
4724 ret = write (fd, writebuf, (size_t) len);
4725 else if (readbuf)
4726 ret = read (fd, readbuf, (size_t) len);
4727
4728 close (fd);
4729 return ret;
4730}
4731
4732
dba24537
AC
4733/* Parse LINE as a signal set and add its set bits to SIGS. */
4734
4735static void
4736add_line_to_sigset (const char *line, sigset_t *sigs)
4737{
4738 int len = strlen (line) - 1;
4739 const char *p;
4740 int signum;
4741
4742 if (line[len] != '\n')
8a3fe4f8 4743 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4744
4745 p = line;
4746 signum = len * 4;
4747 while (len-- > 0)
4748 {
4749 int digit;
4750
4751 if (*p >= '0' && *p <= '9')
4752 digit = *p - '0';
4753 else if (*p >= 'a' && *p <= 'f')
4754 digit = *p - 'a' + 10;
4755 else
8a3fe4f8 4756 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4757
4758 signum -= 4;
4759
4760 if (digit & 1)
4761 sigaddset (sigs, signum + 1);
4762 if (digit & 2)
4763 sigaddset (sigs, signum + 2);
4764 if (digit & 4)
4765 sigaddset (sigs, signum + 3);
4766 if (digit & 8)
4767 sigaddset (sigs, signum + 4);
4768
4769 p++;
4770 }
4771}
4772
4773/* Find process PID's pending signals from /proc/pid/status and set
4774 SIGS to match. */
4775
4776void
4777linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
4778{
4779 FILE *procfile;
4780 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
4781 int signum;
7c8a8b04 4782 struct cleanup *cleanup;
dba24537
AC
4783
4784 sigemptyset (pending);
4785 sigemptyset (blocked);
4786 sigemptyset (ignored);
4787 sprintf (fname, "/proc/%d/status", pid);
4788 procfile = fopen (fname, "r");
4789 if (procfile == NULL)
8a3fe4f8 4790 error (_("Could not open %s"), fname);
7c8a8b04 4791 cleanup = make_cleanup_fclose (procfile);
dba24537
AC
4792
4793 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
4794 {
4795 /* Normal queued signals are on the SigPnd line in the status
4796 file. However, 2.6 kernels also have a "shared" pending
4797 queue for delivering signals to a thread group, so check for
4798 a ShdPnd line also.
4799
4800 Unfortunately some Red Hat kernels include the shared pending
4801 queue but not the ShdPnd status field. */
4802
4803 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
4804 add_line_to_sigset (buffer + 8, pending);
4805 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
4806 add_line_to_sigset (buffer + 8, pending);
4807 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
4808 add_line_to_sigset (buffer + 8, blocked);
4809 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
4810 add_line_to_sigset (buffer + 8, ignored);
4811 }
4812
7c8a8b04 4813 do_cleanups (cleanup);
dba24537
AC
4814}
4815
07e059b5
VP
4816static LONGEST
4817linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
4818 const char *annex, gdb_byte *readbuf,
4819 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4820{
4821 /* We make the process list snapshot when the object starts to be
4822 read. */
4823 static const char *buf;
4824 static LONGEST len_avail = -1;
4825 static struct obstack obstack;
4826
4827 DIR *dirp;
4828
4829 gdb_assert (object == TARGET_OBJECT_OSDATA);
4830
4831 if (strcmp (annex, "processes") != 0)
4832 return 0;
4833
4834 gdb_assert (readbuf && !writebuf);
4835
4836 if (offset == 0)
4837 {
4838 if (len_avail != -1 && len_avail != 0)
4839 obstack_free (&obstack, NULL);
4840 len_avail = 0;
4841 buf = NULL;
4842 obstack_init (&obstack);
4843 obstack_grow_str (&obstack, "<osdata type=\"processes\">\n");
4844
4845 dirp = opendir ("/proc");
4846 if (dirp)
4847 {
4848 struct dirent *dp;
4849 while ((dp = readdir (dirp)) != NULL)
4850 {
4851 struct stat statbuf;
4852 char procentry[sizeof ("/proc/4294967295")];
4853
4854 if (!isdigit (dp->d_name[0])
1a6d2f2f 4855 || NAMELEN (dp) > sizeof ("4294967295") - 1)
07e059b5
VP
4856 continue;
4857
4858 sprintf (procentry, "/proc/%s", dp->d_name);
4859 if (stat (procentry, &statbuf) == 0
4860 && S_ISDIR (statbuf.st_mode))
4861 {
4862 char *pathname;
4863 FILE *f;
4864 char cmd[MAXPATHLEN + 1];
4865 struct passwd *entry;
4866
4867 pathname = xstrprintf ("/proc/%s/cmdline", dp->d_name);
4868 entry = getpwuid (statbuf.st_uid);
4869
4870 if ((f = fopen (pathname, "r")) != NULL)
4871 {
4872 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
4873 if (len > 0)
4874 {
4875 int i;
4876 for (i = 0; i < len; i++)
4877 if (cmd[i] == '\0')
4878 cmd[i] = ' ';
4879 cmd[len] = '\0';
4880
4881 obstack_xml_printf (
4882 &obstack,
4883 "<item>"
4884 "<column name=\"pid\">%s</column>"
4885 "<column name=\"user\">%s</column>"
4886 "<column name=\"command\">%s</column>"
4887 "</item>",
4888 dp->d_name,
4889 entry ? entry->pw_name : "?",
4890 cmd);
4891 }
4892 fclose (f);
4893 }
4894
4895 xfree (pathname);
4896 }
4897 }
4898
4899 closedir (dirp);
4900 }
4901
4902 obstack_grow_str0 (&obstack, "</osdata>\n");
4903 buf = obstack_finish (&obstack);
4904 len_avail = strlen (buf);
4905 }
4906
4907 if (offset >= len_avail)
4908 {
4909 /* Done. Get rid of the obstack. */
4910 obstack_free (&obstack, NULL);
4911 buf = NULL;
4912 len_avail = 0;
4913 return 0;
4914 }
4915
4916 if (len > len_avail - offset)
4917 len = len_avail - offset;
4918 memcpy (readbuf, buf + offset, len);
4919
4920 return len;
4921}
4922
10d6c8cd
DJ
4923static LONGEST
4924linux_xfer_partial (struct target_ops *ops, enum target_object object,
4925 const char *annex, gdb_byte *readbuf,
4926 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4927{
4928 LONGEST xfer;
4929
4930 if (object == TARGET_OBJECT_AUXV)
4931 return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
4932 offset, len);
4933
07e059b5
VP
4934 if (object == TARGET_OBJECT_OSDATA)
4935 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
4936 offset, len);
4937
efcbbd14
UW
4938 if (object == TARGET_OBJECT_SPU)
4939 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
4940 offset, len);
4941
8f313923
JK
4942 /* GDB calculates all the addresses in possibly larget width of the address.
4943 Address width needs to be masked before its final use - either by
4944 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4945
4946 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4947
4948 if (object == TARGET_OBJECT_MEMORY)
4949 {
4950 int addr_bit = gdbarch_addr_bit (target_gdbarch);
4951
4952 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4953 offset &= ((ULONGEST) 1 << addr_bit) - 1;
4954 }
4955
10d6c8cd
DJ
4956 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
4957 offset, len);
4958 if (xfer != 0)
4959 return xfer;
4960
4961 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
4962 offset, len);
4963}
4964
e9efe249 4965/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
4966 it with local methods. */
4967
910122bf
UW
4968static void
4969linux_target_install_ops (struct target_ops *t)
10d6c8cd 4970{
6d8fd2b7
UW
4971 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
4972 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
4973 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
a96d9b2e 4974 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
6d8fd2b7 4975 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 4976 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
4977 t->to_post_attach = linux_child_post_attach;
4978 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
4979 t->to_find_memory_regions = linux_nat_find_memory_regions;
4980 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
4981
4982 super_xfer_partial = t->to_xfer_partial;
4983 t->to_xfer_partial = linux_xfer_partial;
910122bf
UW
4984}
4985
4986struct target_ops *
4987linux_target (void)
4988{
4989 struct target_ops *t;
4990
4991 t = inf_ptrace_target ();
4992 linux_target_install_ops (t);
4993
4994 return t;
4995}
4996
4997struct target_ops *
7714d83a 4998linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
4999{
5000 struct target_ops *t;
5001
5002 t = inf_ptrace_trad_target (register_u_offset);
5003 linux_target_install_ops (t);
10d6c8cd 5004
10d6c8cd
DJ
5005 return t;
5006}
5007
b84876c2
PA
5008/* target_is_async_p implementation. */
5009
5010static int
5011linux_nat_is_async_p (void)
5012{
5013 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 5014 it explicitly with the "set target-async" command.
b84876c2 5015 Someday, linux will always be async. */
c6ebd6cf 5016 if (!target_async_permitted)
b84876c2
PA
5017 return 0;
5018
d90e17a7
PA
5019 /* See target.h/target_async_mask. */
5020 return linux_nat_async_mask_value;
b84876c2
PA
5021}
5022
5023/* target_can_async_p implementation. */
5024
5025static int
5026linux_nat_can_async_p (void)
5027{
5028 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 5029 it explicitly with the "set target-async" command.
b84876c2 5030 Someday, linux will always be async. */
c6ebd6cf 5031 if (!target_async_permitted)
b84876c2
PA
5032 return 0;
5033
5034 /* See target.h/target_async_mask. */
5035 return linux_nat_async_mask_value;
5036}
5037
9908b566
VP
5038static int
5039linux_nat_supports_non_stop (void)
5040{
5041 return 1;
5042}
5043
d90e17a7
PA
5044/* True if we want to support multi-process. To be removed when GDB
5045 supports multi-exec. */
5046
2277426b 5047int linux_multi_process = 1;
d90e17a7
PA
5048
5049static int
5050linux_nat_supports_multi_process (void)
5051{
5052 return linux_multi_process;
5053}
5054
b84876c2
PA
5055/* target_async_mask implementation. */
5056
5057static int
7feb7d06 5058linux_nat_async_mask (int new_mask)
b84876c2 5059{
7feb7d06 5060 int curr_mask = linux_nat_async_mask_value;
b84876c2 5061
7feb7d06 5062 if (curr_mask != new_mask)
b84876c2 5063 {
7feb7d06 5064 if (new_mask == 0)
b84876c2
PA
5065 {
5066 linux_nat_async (NULL, 0);
7feb7d06 5067 linux_nat_async_mask_value = new_mask;
b84876c2
PA
5068 }
5069 else
5070 {
7feb7d06 5071 linux_nat_async_mask_value = new_mask;
84e46146 5072
7feb7d06
PA
5073 /* If we're going out of async-mask in all-stop, then the
5074 inferior is stopped. The next resume will call
5075 target_async. In non-stop, the target event source
5076 should be always registered in the event loop. Do so
5077 now. */
5078 if (non_stop)
5079 linux_nat_async (inferior_event_handler, 0);
b84876c2
PA
5080 }
5081 }
5082
7feb7d06 5083 return curr_mask;
b84876c2
PA
5084}
5085
5086static int async_terminal_is_ours = 1;
5087
5088/* target_terminal_inferior implementation. */
5089
5090static void
5091linux_nat_terminal_inferior (void)
5092{
5093 if (!target_is_async_p ())
5094 {
5095 /* Async mode is disabled. */
5096 terminal_inferior ();
5097 return;
5098 }
5099
b84876c2
PA
5100 terminal_inferior ();
5101
d9d2d8b6 5102 /* Calls to target_terminal_*() are meant to be idempotent. */
b84876c2
PA
5103 if (!async_terminal_is_ours)
5104 return;
5105
5106 delete_file_handler (input_fd);
5107 async_terminal_is_ours = 0;
5108 set_sigint_trap ();
5109}
5110
5111/* target_terminal_ours implementation. */
5112
2c0b251b 5113static void
b84876c2
PA
5114linux_nat_terminal_ours (void)
5115{
5116 if (!target_is_async_p ())
5117 {
5118 /* Async mode is disabled. */
5119 terminal_ours ();
5120 return;
5121 }
5122
5123 /* GDB should never give the terminal to the inferior if the
5124 inferior is running in the background (run&, continue&, etc.),
5125 but claiming it sure should. */
5126 terminal_ours ();
5127
b84876c2
PA
5128 if (async_terminal_is_ours)
5129 return;
5130
5131 clear_sigint_trap ();
5132 add_file_handler (input_fd, stdin_event_handler, 0);
5133 async_terminal_is_ours = 1;
5134}
5135
5136static void (*async_client_callback) (enum inferior_event_type event_type,
5137 void *context);
5138static void *async_client_context;
5139
7feb7d06
PA
5140/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5141 so we notice when any child changes state, and notify the
5142 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
5143 above to wait for the arrival of a SIGCHLD. */
5144
b84876c2 5145static void
7feb7d06 5146sigchld_handler (int signo)
b84876c2 5147{
7feb7d06
PA
5148 int old_errno = errno;
5149
5150 if (debug_linux_nat_async)
5151 fprintf_unfiltered (gdb_stdlog, "sigchld\n");
5152
5153 if (signo == SIGCHLD
5154 && linux_nat_event_pipe[0] != -1)
5155 async_file_mark (); /* Let the event loop know that there are
5156 events to handle. */
5157
5158 errno = old_errno;
5159}
5160
5161/* Callback registered with the target events file descriptor. */
5162
5163static void
5164handle_target_event (int error, gdb_client_data client_data)
5165{
5166 (*async_client_callback) (INF_REG_EVENT, async_client_context);
5167}
5168
5169/* Create/destroy the target events pipe. Returns previous state. */
5170
5171static int
5172linux_async_pipe (int enable)
5173{
5174 int previous = (linux_nat_event_pipe[0] != -1);
5175
5176 if (previous != enable)
5177 {
5178 sigset_t prev_mask;
5179
5180 block_child_signals (&prev_mask);
5181
5182 if (enable)
5183 {
5184 if (pipe (linux_nat_event_pipe) == -1)
5185 internal_error (__FILE__, __LINE__,
5186 "creating event pipe failed.");
5187
5188 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
5189 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
5190 }
5191 else
5192 {
5193 close (linux_nat_event_pipe[0]);
5194 close (linux_nat_event_pipe[1]);
5195 linux_nat_event_pipe[0] = -1;
5196 linux_nat_event_pipe[1] = -1;
5197 }
5198
5199 restore_child_signals_mask (&prev_mask);
5200 }
5201
5202 return previous;
b84876c2
PA
5203}
5204
5205/* target_async implementation. */
5206
5207static void
5208linux_nat_async (void (*callback) (enum inferior_event_type event_type,
5209 void *context), void *context)
5210{
c6ebd6cf 5211 if (linux_nat_async_mask_value == 0 || !target_async_permitted)
b84876c2
PA
5212 internal_error (__FILE__, __LINE__,
5213 "Calling target_async when async is masked");
5214
5215 if (callback != NULL)
5216 {
5217 async_client_callback = callback;
5218 async_client_context = context;
7feb7d06
PA
5219 if (!linux_async_pipe (1))
5220 {
5221 add_file_handler (linux_nat_event_pipe[0],
5222 handle_target_event, NULL);
5223 /* There may be pending events to handle. Tell the event loop
5224 to poll them. */
5225 async_file_mark ();
5226 }
b84876c2
PA
5227 }
5228 else
5229 {
5230 async_client_callback = callback;
5231 async_client_context = context;
b84876c2 5232 delete_file_handler (linux_nat_event_pipe[0]);
7feb7d06 5233 linux_async_pipe (0);
b84876c2
PA
5234 }
5235 return;
5236}
5237
252fbfc8
PA
5238/* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
5239 event came out. */
5240
4c28f408 5241static int
252fbfc8 5242linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4c28f408 5243{
d90e17a7 5244 if (!lwp->stopped)
252fbfc8 5245 {
d90e17a7
PA
5246 int pid, status;
5247 ptid_t ptid = lwp->ptid;
252fbfc8 5248
d90e17a7
PA
5249 if (debug_linux_nat)
5250 fprintf_unfiltered (gdb_stdlog,
5251 "LNSL: running -> suspending %s\n",
5252 target_pid_to_str (lwp->ptid));
252fbfc8 5253
252fbfc8 5254
d90e17a7
PA
5255 stop_callback (lwp, NULL);
5256 stop_wait_callback (lwp, NULL);
252fbfc8 5257
d90e17a7
PA
5258 /* If the lwp exits while we try to stop it, there's nothing
5259 else to do. */
5260 lwp = find_lwp_pid (ptid);
5261 if (lwp == NULL)
5262 return 0;
252fbfc8 5263
d90e17a7
PA
5264 /* If we didn't collect any signal other than SIGSTOP while
5265 stopping the LWP, push a SIGNAL_0 event. In either case, the
5266 event-loop will end up calling target_wait which will collect
5267 these. */
5268 if (lwp->status == 0)
5269 lwp->status = W_STOPCODE (0);
5270 async_file_mark ();
5271 }
5272 else
5273 {
5274 /* Already known to be stopped; do nothing. */
252fbfc8 5275
d90e17a7
PA
5276 if (debug_linux_nat)
5277 {
e09875d4 5278 if (find_thread_ptid (lwp->ptid)->stop_requested)
d90e17a7 5279 fprintf_unfiltered (gdb_stdlog, "\
252fbfc8 5280LNSL: already stopped/stop_requested %s\n",
d90e17a7
PA
5281 target_pid_to_str (lwp->ptid));
5282 else
5283 fprintf_unfiltered (gdb_stdlog, "\
252fbfc8 5284LNSL: already stopped/no stop_requested yet %s\n",
d90e17a7 5285 target_pid_to_str (lwp->ptid));
252fbfc8
PA
5286 }
5287 }
4c28f408
PA
5288 return 0;
5289}
5290
5291static void
5292linux_nat_stop (ptid_t ptid)
5293{
5294 if (non_stop)
d90e17a7 5295 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4c28f408
PA
5296 else
5297 linux_ops->to_stop (ptid);
5298}
5299
d90e17a7
PA
5300static void
5301linux_nat_close (int quitting)
5302{
5303 /* Unregister from the event loop. */
5304 if (target_is_async_p ())
5305 target_async (NULL, 0);
5306
5307 /* Reset the async_masking. */
5308 linux_nat_async_mask_value = 1;
5309
5310 if (linux_ops->to_close)
5311 linux_ops->to_close (quitting);
5312}
5313
f973ed9c
DJ
5314void
5315linux_nat_add_target (struct target_ops *t)
5316{
f973ed9c
DJ
5317 /* Save the provided single-threaded target. We save this in a separate
5318 variable because another target we've inherited from (e.g. inf-ptrace)
5319 may have saved a pointer to T; we want to use it for the final
5320 process stratum target. */
5321 linux_ops_saved = *t;
5322 linux_ops = &linux_ops_saved;
5323
5324 /* Override some methods for multithreading. */
b84876c2 5325 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
5326 t->to_attach = linux_nat_attach;
5327 t->to_detach = linux_nat_detach;
5328 t->to_resume = linux_nat_resume;
5329 t->to_wait = linux_nat_wait;
5330 t->to_xfer_partial = linux_nat_xfer_partial;
5331 t->to_kill = linux_nat_kill;
5332 t->to_mourn_inferior = linux_nat_mourn_inferior;
5333 t->to_thread_alive = linux_nat_thread_alive;
5334 t->to_pid_to_str = linux_nat_pid_to_str;
5335 t->to_has_thread_control = tc_schedlock;
5336
b84876c2
PA
5337 t->to_can_async_p = linux_nat_can_async_p;
5338 t->to_is_async_p = linux_nat_is_async_p;
9908b566 5339 t->to_supports_non_stop = linux_nat_supports_non_stop;
b84876c2
PA
5340 t->to_async = linux_nat_async;
5341 t->to_async_mask = linux_nat_async_mask;
5342 t->to_terminal_inferior = linux_nat_terminal_inferior;
5343 t->to_terminal_ours = linux_nat_terminal_ours;
d90e17a7 5344 t->to_close = linux_nat_close;
b84876c2 5345
4c28f408
PA
5346 /* Methods for non-stop support. */
5347 t->to_stop = linux_nat_stop;
5348
d90e17a7
PA
5349 t->to_supports_multi_process = linux_nat_supports_multi_process;
5350
f973ed9c
DJ
5351 /* We don't change the stratum; this target will sit at
5352 process_stratum and thread_db will set at thread_stratum. This
5353 is a little strange, since this is a multi-threaded-capable
5354 target, but we want to be on the stack below thread_db, and we
5355 also want to be used for single-threaded processes. */
5356
5357 add_target (t);
f973ed9c
DJ
5358}
5359
9f0bdab8
DJ
5360/* Register a method to call whenever a new thread is attached. */
5361void
5362linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t))
5363{
5364 /* Save the pointer. We only support a single registered instance
5365 of the GNU/Linux native target, so we do not need to map this to
5366 T. */
5367 linux_nat_new_thread = new_thread;
5368}
5369
5b009018
PA
5370/* Register a method that converts a siginfo object between the layout
5371 that ptrace returns, and the layout in the architecture of the
5372 inferior. */
5373void
5374linux_nat_set_siginfo_fixup (struct target_ops *t,
5375 int (*siginfo_fixup) (struct siginfo *,
5376 gdb_byte *,
5377 int))
5378{
5379 /* Save the pointer. */
5380 linux_nat_siginfo_fixup = siginfo_fixup;
5381}
5382
9f0bdab8
DJ
5383/* Return the saved siginfo associated with PTID. */
5384struct siginfo *
5385linux_nat_get_siginfo (ptid_t ptid)
5386{
5387 struct lwp_info *lp = find_lwp_pid (ptid);
5388
5389 gdb_assert (lp != NULL);
5390
5391 return &lp->siginfo;
5392}
5393
2c0b251b
PA
5394/* Provide a prototype to silence -Wmissing-prototypes. */
5395extern initialize_file_ftype _initialize_linux_nat;
5396
d6b0e80f
AC
5397void
5398_initialize_linux_nat (void)
5399{
b84876c2 5400 sigset_t mask;
dba24537 5401
1bedd215
AC
5402 add_info ("proc", linux_nat_info_proc_cmd, _("\
5403Show /proc process information about any running process.\n\
dba24537
AC
5404Specify any process id, or use the program being debugged by default.\n\
5405Specify any of the following keywords for detailed info:\n\
5406 mappings -- list of mapped memory regions.\n\
5407 stat -- list a bunch of random process info.\n\
5408 status -- list a different bunch of random process info.\n\
1bedd215 5409 all -- list all available /proc info."));
d6b0e80f 5410
b84876c2
PA
5411 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
5412 &debug_linux_nat, _("\
5413Set debugging of GNU/Linux lwp module."), _("\
5414Show debugging of GNU/Linux lwp module."), _("\
5415Enables printf debugging output."),
5416 NULL,
5417 show_debug_linux_nat,
5418 &setdebuglist, &showdebuglist);
5419
5420 add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance,
5421 &debug_linux_nat_async, _("\
5422Set debugging of GNU/Linux async lwp module."), _("\
5423Show debugging of GNU/Linux async lwp module."), _("\
5424Enables printf debugging output."),
5425 NULL,
5426 show_debug_linux_nat_async,
5427 &setdebuglist, &showdebuglist);
5428
b84876c2 5429 /* Save this mask as the default. */
d6b0e80f
AC
5430 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5431
7feb7d06
PA
5432 /* Install a SIGCHLD handler. */
5433 sigchld_action.sa_handler = sigchld_handler;
5434 sigemptyset (&sigchld_action.sa_mask);
5435 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
5436
5437 /* Make it the default. */
7feb7d06 5438 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
5439
5440 /* Make sure we don't block SIGCHLD during a sigsuspend. */
5441 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5442 sigdelset (&suspend_mask, SIGCHLD);
5443
7feb7d06 5444 sigemptyset (&blocked_mask);
10568435
JK
5445
5446 add_setshow_boolean_cmd ("disable-randomization", class_support,
5447 &disable_randomization, _("\
5448Set disabling of debuggee's virtual address space randomization."), _("\
5449Show disabling of debuggee's virtual address space randomization."), _("\
5450When this mode is on (which is the default), randomization of the virtual\n\
5451address space is disabled. Standalone programs run with the randomization\n\
5452enabled by default on some platforms."),
5453 &set_disable_randomization,
5454 &show_disable_randomization,
5455 &setlist, &showlist);
d6b0e80f
AC
5456}
5457\f
5458
5459/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5460 the GNU/Linux Threads library and therefore doesn't really belong
5461 here. */
5462
5463/* Read variable NAME in the target and return its value if found.
5464 Otherwise return zero. It is assumed that the type of the variable
5465 is `int'. */
5466
5467static int
5468get_signo (const char *name)
5469{
5470 struct minimal_symbol *ms;
5471 int signo;
5472
5473 ms = lookup_minimal_symbol (name, NULL, NULL);
5474 if (ms == NULL)
5475 return 0;
5476
8e70166d 5477 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
5478 sizeof (signo)) != 0)
5479 return 0;
5480
5481 return signo;
5482}
5483
5484/* Return the set of signals used by the threads library in *SET. */
5485
5486void
5487lin_thread_get_thread_signals (sigset_t *set)
5488{
5489 struct sigaction action;
5490 int restart, cancel;
5491
b84876c2 5492 sigemptyset (&blocked_mask);
d6b0e80f
AC
5493 sigemptyset (set);
5494
5495 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
5496 cancel = get_signo ("__pthread_sig_cancel");
5497
5498 /* LinuxThreads normally uses the first two RT signals, but in some legacy
5499 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
5500 not provide any way for the debugger to query the signal numbers -
5501 fortunately they don't change! */
5502
d6b0e80f 5503 if (restart == 0)
17fbb0bd 5504 restart = __SIGRTMIN;
d6b0e80f 5505
d6b0e80f 5506 if (cancel == 0)
17fbb0bd 5507 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
5508
5509 sigaddset (set, restart);
5510 sigaddset (set, cancel);
5511
5512 /* The GNU/Linux Threads library makes terminating threads send a
5513 special "cancel" signal instead of SIGCHLD. Make sure we catch
5514 those (to prevent them from terminating GDB itself, which is
5515 likely to be their default action) and treat them the same way as
5516 SIGCHLD. */
5517
5518 action.sa_handler = sigchld_handler;
5519 sigemptyset (&action.sa_mask);
58aecb61 5520 action.sa_flags = SA_RESTART;
d6b0e80f
AC
5521 sigaction (cancel, &action, NULL);
5522
5523 /* We block the "cancel" signal throughout this code ... */
5524 sigaddset (&blocked_mask, cancel);
5525 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
5526
5527 /* ... except during a sigsuspend. */
5528 sigdelset (&suspend_mask, cancel);
5529}