]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/linux-nat.c
Add lost bit of previous commit.
[thirdparty/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
9b254dd1 3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
e26af52f 4 Free Software Foundation, Inc.
3993f6b1
DJ
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
20
21#include "defs.h"
22#include "inferior.h"
23#include "target.h"
d6b0e80f 24#include "gdb_string.h"
3993f6b1 25#include "gdb_wait.h"
d6b0e80f
AC
26#include "gdb_assert.h"
27#ifdef HAVE_TKILL_SYSCALL
28#include <unistd.h>
29#include <sys/syscall.h>
30#endif
3993f6b1 31#include <sys/ptrace.h>
0274a8ce 32#include "linux-nat.h"
ac264b3b 33#include "linux-fork.h"
d6b0e80f
AC
34#include "gdbthread.h"
35#include "gdbcmd.h"
36#include "regcache.h"
4f844a66 37#include "regset.h"
10d6c8cd
DJ
38#include "inf-ptrace.h"
39#include "auxv.h"
dba24537
AC
40#include <sys/param.h> /* for MAXPATHLEN */
41#include <sys/procfs.h> /* for elf_gregset etc. */
42#include "elf-bfd.h" /* for elfcore_write_* */
43#include "gregset.h" /* for gregset */
44#include "gdbcore.h" /* for get_exec_file */
45#include <ctype.h> /* for isdigit */
46#include "gdbthread.h" /* for struct thread_info etc. */
47#include "gdb_stat.h" /* for struct stat */
48#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
49#include "inf-loop.h"
50#include "event-loop.h"
51#include "event-top.h"
dba24537
AC
52
53#ifndef O_LARGEFILE
54#define O_LARGEFILE 0
55#endif
0274a8ce 56
3993f6b1
DJ
57/* If the system headers did not provide the constants, hard-code the normal
58 values. */
59#ifndef PTRACE_EVENT_FORK
60
61#define PTRACE_SETOPTIONS 0x4200
62#define PTRACE_GETEVENTMSG 0x4201
63
64/* options set using PTRACE_SETOPTIONS */
65#define PTRACE_O_TRACESYSGOOD 0x00000001
66#define PTRACE_O_TRACEFORK 0x00000002
67#define PTRACE_O_TRACEVFORK 0x00000004
68#define PTRACE_O_TRACECLONE 0x00000008
69#define PTRACE_O_TRACEEXEC 0x00000010
9016a515
DJ
70#define PTRACE_O_TRACEVFORKDONE 0x00000020
71#define PTRACE_O_TRACEEXIT 0x00000040
3993f6b1
DJ
72
73/* Wait extended result codes for the above trace options. */
74#define PTRACE_EVENT_FORK 1
75#define PTRACE_EVENT_VFORK 2
76#define PTRACE_EVENT_CLONE 3
77#define PTRACE_EVENT_EXEC 4
c874c7fc 78#define PTRACE_EVENT_VFORK_DONE 5
9016a515 79#define PTRACE_EVENT_EXIT 6
3993f6b1
DJ
80
81#endif /* PTRACE_EVENT_FORK */
82
83/* We can't always assume that this flag is available, but all systems
84 with the ptrace event handlers also have __WALL, so it's safe to use
85 here. */
86#ifndef __WALL
87#define __WALL 0x40000000 /* Wait for any child. */
88#endif
89
02d3ff8c
UW
90#ifndef PTRACE_GETSIGINFO
91#define PTRACE_GETSIGINFO 0x4202
92#endif
93
10d6c8cd
DJ
94/* The single-threaded native GNU/Linux target_ops. We save a pointer for
95 the use of the multi-threaded target. */
96static struct target_ops *linux_ops;
f973ed9c 97static struct target_ops linux_ops_saved;
10d6c8cd 98
9f0bdab8
DJ
99/* The method to call, if any, when a new thread is attached. */
100static void (*linux_nat_new_thread) (ptid_t);
101
ac264b3b
MS
102/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
103 Called by our to_xfer_partial. */
104static LONGEST (*super_xfer_partial) (struct target_ops *,
105 enum target_object,
106 const char *, gdb_byte *,
107 const gdb_byte *,
10d6c8cd
DJ
108 ULONGEST, LONGEST);
109
d6b0e80f 110static int debug_linux_nat;
920d2a44
AC
111static void
112show_debug_linux_nat (struct ui_file *file, int from_tty,
113 struct cmd_list_element *c, const char *value)
114{
115 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
116 value);
117}
d6b0e80f 118
b84876c2
PA
119static int debug_linux_nat_async = 0;
120static void
121show_debug_linux_nat_async (struct ui_file *file, int from_tty,
122 struct cmd_list_element *c, const char *value)
123{
124 fprintf_filtered (file, _("Debugging of GNU/Linux async lwp module is %s.\n"),
125 value);
126}
127
9016a515
DJ
128static int linux_parent_pid;
129
ae087d01
DJ
130struct simple_pid_list
131{
132 int pid;
3d799a95 133 int status;
ae087d01
DJ
134 struct simple_pid_list *next;
135};
136struct simple_pid_list *stopped_pids;
137
3993f6b1
DJ
138/* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
139 can not be used, 1 if it can. */
140
141static int linux_supports_tracefork_flag = -1;
142
9016a515
DJ
143/* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
144 PTRACE_O_TRACEVFORKDONE. */
145
146static int linux_supports_tracevforkdone_flag = -1;
147
b84876c2
PA
148/* Async mode support */
149
150/* To listen to target events asynchronously, we install a SIGCHLD
151 handler whose duty is to call waitpid (-1, ..., WNOHANG) to get all
152 the pending events into a pipe. Whenever we're ready to handle
153 events asynchronously, this pipe is registered as the waitable file
154 handle in the event loop. When we get to entry target points
155 coming out of the common code (target_wait, target_resume, ...),
156 that are going to call waitpid, we block SIGCHLD signals, and
157 remove all the events placed in the pipe into a local queue. All
158 the subsequent calls to my_waitpid (a waitpid wrapper) check this
159 local queue first. */
160
161/* True if async mode is currently on. */
162static int linux_nat_async_enabled;
163
164/* Zero if the async mode, although enabled, is masked, which means
165 linux_nat_wait should behave as if async mode was off. */
166static int linux_nat_async_mask_value = 1;
167
168/* The read/write ends of the pipe registered as waitable file in the
169 event loop. */
170static int linux_nat_event_pipe[2] = { -1, -1 };
171
172/* Number of queued events in the pipe. */
173static volatile int linux_nat_num_queued_events;
174
175/* If async mode is on, true if we're listening for events; false if
176 target events are blocked. */
177static int linux_nat_async_events_enabled;
178
179static int linux_nat_async_events (int enable);
180static void pipe_to_local_event_queue (void);
181static void local_event_queue_to_pipe (void);
182static void linux_nat_event_pipe_push (int pid, int status, int options);
183static int linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options);
184static void linux_nat_set_async_mode (int on);
185static void linux_nat_async (void (*callback)
186 (enum inferior_event_type event_type, void *context),
187 void *context);
188static int linux_nat_async_mask (int mask);
189
190/* Captures the result of a successful waitpid call, along with the
191 options used in that call. */
192struct waitpid_result
193{
194 int pid;
195 int status;
196 int options;
197 struct waitpid_result *next;
198};
199
200/* A singly-linked list of the results of the waitpid calls performed
201 in the async SIGCHLD handler. */
202static struct waitpid_result *waitpid_queue = NULL;
203
204static int
205queued_waitpid (int pid, int *status, int flags)
206{
207 struct waitpid_result *msg = waitpid_queue, *prev = NULL;
208
209 if (debug_linux_nat_async)
210 fprintf_unfiltered (gdb_stdlog,
211 "\
212QWPID: linux_nat_async_events_enabled(%d), linux_nat_num_queued_events(%d)\n",
213 linux_nat_async_events_enabled,
214 linux_nat_num_queued_events);
215
216 if (flags & __WALL)
217 {
218 for (; msg; prev = msg, msg = msg->next)
219 if (pid == -1 || pid == msg->pid)
220 break;
221 }
222 else if (flags & __WCLONE)
223 {
224 for (; msg; prev = msg, msg = msg->next)
225 if (msg->options & __WCLONE
226 && (pid == -1 || pid == msg->pid))
227 break;
228 }
229 else
230 {
231 for (; msg; prev = msg, msg = msg->next)
232 if ((msg->options & __WCLONE) == 0
233 && (pid == -1 || pid == msg->pid))
234 break;
235 }
236
237 if (msg)
238 {
239 int pid;
240
241 if (prev)
242 prev->next = msg->next;
243 else
244 waitpid_queue = msg->next;
245
246 msg->next = NULL;
247 if (status)
248 *status = msg->status;
249 pid = msg->pid;
250
251 if (debug_linux_nat_async)
252 fprintf_unfiltered (gdb_stdlog, "QWPID: pid(%d), status(%x)\n",
253 pid, msg->status);
254 xfree (msg);
255
256 return pid;
257 }
258
259 if (debug_linux_nat_async)
260 fprintf_unfiltered (gdb_stdlog, "QWPID: miss\n");
261
262 if (status)
263 *status = 0;
264 return -1;
265}
266
267static void
268push_waitpid (int pid, int status, int options)
269{
270 struct waitpid_result *event, *new_event;
271
272 new_event = xmalloc (sizeof (*new_event));
273 new_event->pid = pid;
274 new_event->status = status;
275 new_event->options = options;
276 new_event->next = NULL;
277
278 if (waitpid_queue)
279 {
280 for (event = waitpid_queue;
281 event && event->next;
282 event = event->next)
283 ;
284
285 event->next = new_event;
286 }
287 else
288 waitpid_queue = new_event;
289}
290
710151dd 291/* Drain all queued events of PID. If PID is -1, the effect is of
b84876c2
PA
292 draining all events. */
293static void
294drain_queued_events (int pid)
295{
296 while (queued_waitpid (pid, NULL, __WALL) != -1)
297 ;
298}
299
ae087d01
DJ
300\f
301/* Trivial list manipulation functions to keep track of a list of
302 new stopped processes. */
303static void
3d799a95 304add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
305{
306 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
307 new_pid->pid = pid;
3d799a95 308 new_pid->status = status;
ae087d01
DJ
309 new_pid->next = *listp;
310 *listp = new_pid;
311}
312
313static int
3d799a95 314pull_pid_from_list (struct simple_pid_list **listp, int pid, int *status)
ae087d01
DJ
315{
316 struct simple_pid_list **p;
317
318 for (p = listp; *p != NULL; p = &(*p)->next)
319 if ((*p)->pid == pid)
320 {
321 struct simple_pid_list *next = (*p)->next;
3d799a95 322 *status = (*p)->status;
ae087d01
DJ
323 xfree (*p);
324 *p = next;
325 return 1;
326 }
327 return 0;
328}
329
3d799a95
DJ
330static void
331linux_record_stopped_pid (int pid, int status)
ae087d01 332{
3d799a95 333 add_to_pid_list (&stopped_pids, pid, status);
ae087d01
DJ
334}
335
3993f6b1
DJ
336\f
337/* A helper function for linux_test_for_tracefork, called after fork (). */
338
339static void
340linux_tracefork_child (void)
341{
342 int ret;
343
344 ptrace (PTRACE_TRACEME, 0, 0, 0);
345 kill (getpid (), SIGSTOP);
346 fork ();
48bb3cce 347 _exit (0);
3993f6b1
DJ
348}
349
b84876c2
PA
350/* Wrapper function for waitpid which handles EINTR, and checks for
351 locally queued events. */
b957e937
DJ
352
353static int
354my_waitpid (int pid, int *status, int flags)
355{
356 int ret;
b84876c2
PA
357
358 /* There should be no concurrent calls to waitpid. */
359 gdb_assert (!linux_nat_async_events_enabled);
360
361 ret = queued_waitpid (pid, status, flags);
362 if (ret != -1)
363 return ret;
364
b957e937
DJ
365 do
366 {
367 ret = waitpid (pid, status, flags);
368 }
369 while (ret == -1 && errno == EINTR);
370
371 return ret;
372}
373
374/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
375
376 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
377 we know that the feature is not available. This may change the tracing
378 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
379
380 However, if it succeeds, we don't know for sure that the feature is
381 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
3993f6b1 382 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
b957e937
DJ
383 fork tracing, and let it fork. If the process exits, we assume that we
384 can't use TRACEFORK; if we get the fork notification, and we can extract
385 the new child's PID, then we assume that we can. */
3993f6b1
DJ
386
387static void
b957e937 388linux_test_for_tracefork (int original_pid)
3993f6b1
DJ
389{
390 int child_pid, ret, status;
391 long second_pid;
392
b957e937
DJ
393 linux_supports_tracefork_flag = 0;
394 linux_supports_tracevforkdone_flag = 0;
395
396 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
397 if (ret != 0)
398 return;
399
3993f6b1
DJ
400 child_pid = fork ();
401 if (child_pid == -1)
e2e0b3e5 402 perror_with_name (("fork"));
3993f6b1
DJ
403
404 if (child_pid == 0)
405 linux_tracefork_child ();
406
b957e937 407 ret = my_waitpid (child_pid, &status, 0);
3993f6b1 408 if (ret == -1)
e2e0b3e5 409 perror_with_name (("waitpid"));
3993f6b1 410 else if (ret != child_pid)
8a3fe4f8 411 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
3993f6b1 412 if (! WIFSTOPPED (status))
8a3fe4f8 413 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
3993f6b1 414
3993f6b1
DJ
415 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
416 if (ret != 0)
417 {
b957e937
DJ
418 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
419 if (ret != 0)
420 {
8a3fe4f8 421 warning (_("linux_test_for_tracefork: failed to kill child"));
b957e937
DJ
422 return;
423 }
424
425 ret = my_waitpid (child_pid, &status, 0);
426 if (ret != child_pid)
8a3fe4f8 427 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
b957e937 428 else if (!WIFSIGNALED (status))
8a3fe4f8
AC
429 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
430 "killed child"), status);
b957e937 431
3993f6b1
DJ
432 return;
433 }
434
9016a515
DJ
435 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
436 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
437 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
438 linux_supports_tracevforkdone_flag = (ret == 0);
439
b957e937
DJ
440 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
441 if (ret != 0)
8a3fe4f8 442 warning (_("linux_test_for_tracefork: failed to resume child"));
b957e937
DJ
443
444 ret = my_waitpid (child_pid, &status, 0);
445
3993f6b1
DJ
446 if (ret == child_pid && WIFSTOPPED (status)
447 && status >> 16 == PTRACE_EVENT_FORK)
448 {
449 second_pid = 0;
450 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
451 if (ret == 0 && second_pid != 0)
452 {
453 int second_status;
454
455 linux_supports_tracefork_flag = 1;
b957e937
DJ
456 my_waitpid (second_pid, &second_status, 0);
457 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
458 if (ret != 0)
8a3fe4f8 459 warning (_("linux_test_for_tracefork: failed to kill second child"));
97725dc4 460 my_waitpid (second_pid, &status, 0);
3993f6b1
DJ
461 }
462 }
b957e937 463 else
8a3fe4f8
AC
464 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
465 "(%d, status 0x%x)"), ret, status);
3993f6b1 466
b957e937
DJ
467 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
468 if (ret != 0)
8a3fe4f8 469 warning (_("linux_test_for_tracefork: failed to kill child"));
b957e937 470 my_waitpid (child_pid, &status, 0);
3993f6b1
DJ
471}
472
473/* Return non-zero iff we have tracefork functionality available.
474 This function also sets linux_supports_tracefork_flag. */
475
476static int
b957e937 477linux_supports_tracefork (int pid)
3993f6b1
DJ
478{
479 if (linux_supports_tracefork_flag == -1)
b957e937 480 linux_test_for_tracefork (pid);
3993f6b1
DJ
481 return linux_supports_tracefork_flag;
482}
483
9016a515 484static int
b957e937 485linux_supports_tracevforkdone (int pid)
9016a515
DJ
486{
487 if (linux_supports_tracefork_flag == -1)
b957e937 488 linux_test_for_tracefork (pid);
9016a515
DJ
489 return linux_supports_tracevforkdone_flag;
490}
491
3993f6b1 492\f
4de4c07c
DJ
493void
494linux_enable_event_reporting (ptid_t ptid)
495{
d3587048 496 int pid = ptid_get_lwp (ptid);
4de4c07c
DJ
497 int options;
498
d3587048
DJ
499 if (pid == 0)
500 pid = ptid_get_pid (ptid);
501
b957e937 502 if (! linux_supports_tracefork (pid))
4de4c07c
DJ
503 return;
504
a2f23071
DJ
505 options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC
506 | PTRACE_O_TRACECLONE;
b957e937 507 if (linux_supports_tracevforkdone (pid))
9016a515
DJ
508 options |= PTRACE_O_TRACEVFORKDONE;
509
510 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
511 read-only process state. */
4de4c07c
DJ
512
513 ptrace (PTRACE_SETOPTIONS, pid, 0, options);
514}
515
6d8fd2b7
UW
516static void
517linux_child_post_attach (int pid)
4de4c07c
DJ
518{
519 linux_enable_event_reporting (pid_to_ptid (pid));
0ec9a092 520 check_for_thread_db ();
4de4c07c
DJ
521}
522
10d6c8cd 523static void
4de4c07c
DJ
524linux_child_post_startup_inferior (ptid_t ptid)
525{
526 linux_enable_event_reporting (ptid);
0ec9a092 527 check_for_thread_db ();
4de4c07c
DJ
528}
529
6d8fd2b7
UW
530static int
531linux_child_follow_fork (struct target_ops *ops, int follow_child)
3993f6b1 532{
4de4c07c
DJ
533 ptid_t last_ptid;
534 struct target_waitstatus last_status;
9016a515 535 int has_vforked;
4de4c07c
DJ
536 int parent_pid, child_pid;
537
b84876c2
PA
538 if (target_can_async_p ())
539 target_async (NULL, 0);
540
4de4c07c 541 get_last_target_status (&last_ptid, &last_status);
9016a515 542 has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED);
d3587048
DJ
543 parent_pid = ptid_get_lwp (last_ptid);
544 if (parent_pid == 0)
545 parent_pid = ptid_get_pid (last_ptid);
4de4c07c
DJ
546 child_pid = last_status.value.related_pid;
547
548 if (! follow_child)
549 {
550 /* We're already attached to the parent, by default. */
551
552 /* Before detaching from the child, remove all breakpoints from
553 it. (This won't actually modify the breakpoint list, but will
554 physically remove the breakpoints from the child.) */
9016a515
DJ
555 /* If we vforked this will remove the breakpoints from the parent
556 also, but they'll be reinserted below. */
4de4c07c
DJ
557 detach_breakpoints (child_pid);
558
ac264b3b
MS
559 /* Detach new forked process? */
560 if (detach_fork)
f75c00e4 561 {
e85a822c 562 if (info_verbose || debug_linux_nat)
ac264b3b
MS
563 {
564 target_terminal_ours ();
565 fprintf_filtered (gdb_stdlog,
566 "Detaching after fork from child process %d.\n",
567 child_pid);
568 }
4de4c07c 569
ac264b3b
MS
570 ptrace (PTRACE_DETACH, child_pid, 0, 0);
571 }
572 else
573 {
574 struct fork_info *fp;
575 /* Retain child fork in ptrace (stopped) state. */
576 fp = find_fork_pid (child_pid);
577 if (!fp)
578 fp = add_fork (child_pid);
579 fork_save_infrun_state (fp, 0);
580 }
9016a515
DJ
581
582 if (has_vforked)
583 {
b957e937
DJ
584 gdb_assert (linux_supports_tracefork_flag >= 0);
585 if (linux_supports_tracevforkdone (0))
9016a515
DJ
586 {
587 int status;
588
589 ptrace (PTRACE_CONT, parent_pid, 0, 0);
58aecb61 590 my_waitpid (parent_pid, &status, __WALL);
c874c7fc 591 if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
8a3fe4f8
AC
592 warning (_("Unexpected waitpid result %06x when waiting for "
593 "vfork-done"), status);
9016a515
DJ
594 }
595 else
596 {
597 /* We can't insert breakpoints until the child has
598 finished with the shared memory region. We need to
599 wait until that happens. Ideal would be to just
600 call:
601 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
602 - waitpid (parent_pid, &status, __WALL);
603 However, most architectures can't handle a syscall
604 being traced on the way out if it wasn't traced on
605 the way in.
606
607 We might also think to loop, continuing the child
608 until it exits or gets a SIGTRAP. One problem is
609 that the child might call ptrace with PTRACE_TRACEME.
610
611 There's no simple and reliable way to figure out when
612 the vforked child will be done with its copy of the
613 shared memory. We could step it out of the syscall,
614 two instructions, let it go, and then single-step the
615 parent once. When we have hardware single-step, this
616 would work; with software single-step it could still
617 be made to work but we'd have to be able to insert
618 single-step breakpoints in the child, and we'd have
619 to insert -just- the single-step breakpoint in the
620 parent. Very awkward.
621
622 In the end, the best we can do is to make sure it
623 runs for a little while. Hopefully it will be out of
624 range of any breakpoints we reinsert. Usually this
625 is only the single-step breakpoint at vfork's return
626 point. */
627
628 usleep (10000);
629 }
630
631 /* Since we vforked, breakpoints were removed in the parent
632 too. Put them back. */
633 reattach_breakpoints (parent_pid);
634 }
4de4c07c 635 }
3993f6b1 636 else
4de4c07c
DJ
637 {
638 char child_pid_spelling[40];
639
640 /* Needed to keep the breakpoint lists in sync. */
9016a515
DJ
641 if (! has_vforked)
642 detach_breakpoints (child_pid);
4de4c07c
DJ
643
644 /* Before detaching from the parent, remove all breakpoints from it. */
645 remove_breakpoints ();
646
e85a822c 647 if (info_verbose || debug_linux_nat)
f75c00e4
DJ
648 {
649 target_terminal_ours ();
ac264b3b
MS
650 fprintf_filtered (gdb_stdlog,
651 "Attaching after fork to child process %d.\n",
652 child_pid);
f75c00e4 653 }
4de4c07c 654
9016a515
DJ
655 /* If we're vforking, we may want to hold on to the parent until
656 the child exits or execs. At exec time we can remove the old
657 breakpoints from the parent and detach it; at exit time we
658 could do the same (or even, sneakily, resume debugging it - the
659 child's exec has failed, or something similar).
660
661 This doesn't clean up "properly", because we can't call
662 target_detach, but that's OK; if the current target is "child",
663 then it doesn't need any further cleanups, and lin_lwp will
664 generally not encounter vfork (vfork is defined to fork
665 in libpthread.so).
666
667 The holding part is very easy if we have VFORKDONE events;
668 but keeping track of both processes is beyond GDB at the
669 moment. So we don't expose the parent to the rest of GDB.
670 Instead we quietly hold onto it until such time as we can
671 safely resume it. */
672
673 if (has_vforked)
674 linux_parent_pid = parent_pid;
ac264b3b
MS
675 else if (!detach_fork)
676 {
677 struct fork_info *fp;
678 /* Retain parent fork in ptrace (stopped) state. */
679 fp = find_fork_pid (parent_pid);
680 if (!fp)
681 fp = add_fork (parent_pid);
682 fork_save_infrun_state (fp, 0);
683 }
9016a515 684 else
b84876c2 685 target_detach (NULL, 0);
4de4c07c 686
9f0bdab8 687 inferior_ptid = ptid_build (child_pid, child_pid, 0);
ee057212
DJ
688
689 /* Reinstall ourselves, since we might have been removed in
690 target_detach (which does other necessary cleanup). */
ac264b3b 691
ee057212 692 push_target (ops);
9f0bdab8 693 linux_nat_switch_fork (inferior_ptid);
ef29ce1a 694 check_for_thread_db ();
4de4c07c
DJ
695
696 /* Reset breakpoints in the child as appropriate. */
697 follow_inferior_reset_breakpoints ();
698 }
699
b84876c2
PA
700 if (target_can_async_p ())
701 target_async (inferior_event_handler, 0);
702
4de4c07c
DJ
703 return 0;
704}
705
4de4c07c 706\f
6d8fd2b7
UW
707static void
708linux_child_insert_fork_catchpoint (int pid)
4de4c07c 709{
b957e937 710 if (! linux_supports_tracefork (pid))
8a3fe4f8 711 error (_("Your system does not support fork catchpoints."));
3993f6b1
DJ
712}
713
6d8fd2b7
UW
714static void
715linux_child_insert_vfork_catchpoint (int pid)
3993f6b1 716{
b957e937 717 if (!linux_supports_tracefork (pid))
8a3fe4f8 718 error (_("Your system does not support vfork catchpoints."));
3993f6b1
DJ
719}
720
6d8fd2b7
UW
721static void
722linux_child_insert_exec_catchpoint (int pid)
3993f6b1 723{
b957e937 724 if (!linux_supports_tracefork (pid))
8a3fe4f8 725 error (_("Your system does not support exec catchpoints."));
3993f6b1
DJ
726}
727
d6b0e80f
AC
728/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
729 are processes sharing the same VM space. A multi-threaded process
730 is basically a group of such processes. However, such a grouping
731 is almost entirely a user-space issue; the kernel doesn't enforce
732 such a grouping at all (this might change in the future). In
733 general, we'll rely on the threads library (i.e. the GNU/Linux
734 Threads library) to provide such a grouping.
735
736 It is perfectly well possible to write a multi-threaded application
737 without the assistance of a threads library, by using the clone
738 system call directly. This module should be able to give some
739 rudimentary support for debugging such applications if developers
740 specify the CLONE_PTRACE flag in the clone system call, and are
741 using the Linux kernel 2.4 or above.
742
743 Note that there are some peculiarities in GNU/Linux that affect
744 this code:
745
746 - In general one should specify the __WCLONE flag to waitpid in
747 order to make it report events for any of the cloned processes
748 (and leave it out for the initial process). However, if a cloned
749 process has exited the exit status is only reported if the
750 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
751 we cannot use it since GDB must work on older systems too.
752
753 - When a traced, cloned process exits and is waited for by the
754 debugger, the kernel reassigns it to the original parent and
755 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
756 library doesn't notice this, which leads to the "zombie problem":
757 When debugged a multi-threaded process that spawns a lot of
758 threads will run out of processes, even if the threads exit,
759 because the "zombies" stay around. */
760
761/* List of known LWPs. */
9f0bdab8 762struct lwp_info *lwp_list;
d6b0e80f
AC
763
764/* Number of LWPs in the list. */
765static int num_lwps;
d6b0e80f
AC
766\f
767
d6b0e80f
AC
768/* If the last reported event was a SIGTRAP, this variable is set to
769 the process id of the LWP/thread that got it. */
770ptid_t trap_ptid;
771\f
772
d6b0e80f
AC
773/* Since we cannot wait (in linux_nat_wait) for the initial process and
774 any cloned processes with a single call to waitpid, we have to use
775 the WNOHANG flag and call waitpid in a loop. To optimize
776 things a bit we use `sigsuspend' to wake us up when a process has
777 something to report (it will send us a SIGCHLD if it has). To make
778 this work we have to juggle with the signal mask. We save the
779 original signal mask such that we can restore it before creating a
780 new process in order to avoid blocking certain signals in the
781 inferior. We then block SIGCHLD during the waitpid/sigsuspend
782 loop. */
783
784/* Original signal mask. */
785static sigset_t normal_mask;
786
787/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
788 _initialize_linux_nat. */
789static sigset_t suspend_mask;
790
b84876c2
PA
791/* SIGCHLD action for synchronous mode. */
792struct sigaction sync_sigchld_action;
793
794/* SIGCHLD action for asynchronous mode. */
795static struct sigaction async_sigchld_action;
d6b0e80f
AC
796\f
797
798/* Prototypes for local functions. */
799static int stop_wait_callback (struct lwp_info *lp, void *data);
800static int linux_nat_thread_alive (ptid_t ptid);
6d8fd2b7 801static char *linux_child_pid_to_exec_file (int pid);
710151dd
PA
802static int cancel_breakpoint (struct lwp_info *lp);
803
d6b0e80f
AC
804\f
805/* Convert wait status STATUS to a string. Used for printing debug
806 messages only. */
807
808static char *
809status_to_str (int status)
810{
811 static char buf[64];
812
813 if (WIFSTOPPED (status))
814 snprintf (buf, sizeof (buf), "%s (stopped)",
815 strsignal (WSTOPSIG (status)));
816 else if (WIFSIGNALED (status))
817 snprintf (buf, sizeof (buf), "%s (terminated)",
818 strsignal (WSTOPSIG (status)));
819 else
820 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
821
822 return buf;
823}
824
825/* Initialize the list of LWPs. Note that this module, contrary to
826 what GDB's generic threads layer does for its thread list,
827 re-initializes the LWP lists whenever we mourn or detach (which
828 doesn't involve mourning) the inferior. */
829
830static void
831init_lwp_list (void)
832{
833 struct lwp_info *lp, *lpnext;
834
835 for (lp = lwp_list; lp; lp = lpnext)
836 {
837 lpnext = lp->next;
838 xfree (lp);
839 }
840
841 lwp_list = NULL;
842 num_lwps = 0;
d6b0e80f
AC
843}
844
f973ed9c 845/* Add the LWP specified by PID to the list. Return a pointer to the
9f0bdab8
DJ
846 structure describing the new LWP. The LWP should already be stopped
847 (with an exception for the very first LWP). */
d6b0e80f
AC
848
849static struct lwp_info *
850add_lwp (ptid_t ptid)
851{
852 struct lwp_info *lp;
853
854 gdb_assert (is_lwp (ptid));
855
856 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
857
858 memset (lp, 0, sizeof (struct lwp_info));
859
860 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
861
862 lp->ptid = ptid;
863
864 lp->next = lwp_list;
865 lwp_list = lp;
f973ed9c 866 ++num_lwps;
d6b0e80f 867
9f0bdab8
DJ
868 if (num_lwps > 1 && linux_nat_new_thread != NULL)
869 linux_nat_new_thread (ptid);
870
d6b0e80f
AC
871 return lp;
872}
873
874/* Remove the LWP specified by PID from the list. */
875
876static void
877delete_lwp (ptid_t ptid)
878{
879 struct lwp_info *lp, *lpprev;
880
881 lpprev = NULL;
882
883 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
884 if (ptid_equal (lp->ptid, ptid))
885 break;
886
887 if (!lp)
888 return;
889
d6b0e80f
AC
890 num_lwps--;
891
892 if (lpprev)
893 lpprev->next = lp->next;
894 else
895 lwp_list = lp->next;
896
897 xfree (lp);
898}
899
900/* Return a pointer to the structure describing the LWP corresponding
901 to PID. If no corresponding LWP could be found, return NULL. */
902
903static struct lwp_info *
904find_lwp_pid (ptid_t ptid)
905{
906 struct lwp_info *lp;
907 int lwp;
908
909 if (is_lwp (ptid))
910 lwp = GET_LWP (ptid);
911 else
912 lwp = GET_PID (ptid);
913
914 for (lp = lwp_list; lp; lp = lp->next)
915 if (lwp == GET_LWP (lp->ptid))
916 return lp;
917
918 return NULL;
919}
920
921/* Call CALLBACK with its second argument set to DATA for every LWP in
922 the list. If CALLBACK returns 1 for a particular LWP, return a
923 pointer to the structure describing that LWP immediately.
924 Otherwise return NULL. */
925
926struct lwp_info *
927iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data)
928{
929 struct lwp_info *lp, *lpnext;
930
931 for (lp = lwp_list; lp; lp = lpnext)
932 {
933 lpnext = lp->next;
934 if ((*callback) (lp, data))
935 return lp;
936 }
937
938 return NULL;
939}
940
f973ed9c
DJ
941/* Update our internal state when changing from one fork (checkpoint,
942 et cetera) to another indicated by NEW_PTID. We can only switch
943 single-threaded applications, so we only create one new LWP, and
944 the previous list is discarded. */
945
946void
947linux_nat_switch_fork (ptid_t new_ptid)
948{
949 struct lwp_info *lp;
950
951 init_lwp_list ();
952 lp = add_lwp (new_ptid);
953 lp->stopped = 1;
954}
955
e26af52f
DJ
956/* Record a PTID for later deletion. */
957
958struct saved_ptids
959{
960 ptid_t ptid;
961 struct saved_ptids *next;
962};
963static struct saved_ptids *threads_to_delete;
964
965static void
966record_dead_thread (ptid_t ptid)
967{
968 struct saved_ptids *p = xmalloc (sizeof (struct saved_ptids));
969 p->ptid = ptid;
970 p->next = threads_to_delete;
971 threads_to_delete = p;
972}
973
974/* Delete any dead threads which are not the current thread. */
975
976static void
977prune_lwps (void)
978{
979 struct saved_ptids **p = &threads_to_delete;
980
981 while (*p)
982 if (! ptid_equal ((*p)->ptid, inferior_ptid))
983 {
984 struct saved_ptids *tmp = *p;
985 delete_thread (tmp->ptid);
986 *p = tmp->next;
987 xfree (tmp);
988 }
989 else
990 p = &(*p)->next;
991}
992
e26af52f
DJ
993/* Handle the exit of a single thread LP. */
994
995static void
996exit_lwp (struct lwp_info *lp)
997{
998 if (in_thread_list (lp->ptid))
999 {
17faa917
DJ
1000 if (print_thread_events)
1001 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1002
e26af52f
DJ
1003 /* Core GDB cannot deal with us deleting the current thread. */
1004 if (!ptid_equal (lp->ptid, inferior_ptid))
1005 delete_thread (lp->ptid);
1006 else
1007 record_dead_thread (lp->ptid);
e26af52f
DJ
1008 }
1009
1010 delete_lwp (lp->ptid);
1011}
1012
d6b0e80f
AC
1013/* Attach to the LWP specified by PID. If VERBOSE is non-zero, print
1014 a message telling the user that a new LWP has been added to the
9ee57c33
DJ
1015 process. Return 0 if successful or -1 if the new LWP could not
1016 be attached. */
d6b0e80f 1017
9ee57c33 1018int
93815fbf 1019lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 1020{
9ee57c33 1021 struct lwp_info *lp;
b84876c2 1022 int async_events_were_enabled = 0;
d6b0e80f
AC
1023
1024 gdb_assert (is_lwp (ptid));
1025
b84876c2
PA
1026 if (target_can_async_p ())
1027 async_events_were_enabled = linux_nat_async_events (0);
d6b0e80f 1028
9ee57c33 1029 lp = find_lwp_pid (ptid);
d6b0e80f
AC
1030
1031 /* We assume that we're already attached to any LWP that has an id
1032 equal to the overall process id, and to any LWP that is already
1033 in our list of LWPs. If we're not seeing exit events from threads
1034 and we've had PID wraparound since we last tried to stop all threads,
1035 this assumption might be wrong; fortunately, this is very unlikely
1036 to happen. */
9ee57c33 1037 if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
d6b0e80f
AC
1038 {
1039 pid_t pid;
1040 int status;
9f0bdab8 1041 int cloned = 0;
d6b0e80f
AC
1042
1043 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
9ee57c33
DJ
1044 {
1045 /* If we fail to attach to the thread, issue a warning,
1046 but continue. One way this can happen is if thread
e9efe249 1047 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1048 bug may place threads in the thread list and then fail
1049 to create them. */
1050 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1051 safe_strerror (errno));
1052 return -1;
1053 }
1054
d6b0e80f
AC
1055 if (debug_linux_nat)
1056 fprintf_unfiltered (gdb_stdlog,
1057 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1058 target_pid_to_str (ptid));
1059
58aecb61 1060 pid = my_waitpid (GET_LWP (ptid), &status, 0);
d6b0e80f
AC
1061 if (pid == -1 && errno == ECHILD)
1062 {
1063 /* Try again with __WCLONE to check cloned processes. */
58aecb61 1064 pid = my_waitpid (GET_LWP (ptid), &status, __WCLONE);
9f0bdab8 1065 cloned = 1;
d6b0e80f
AC
1066 }
1067
1068 gdb_assert (pid == GET_LWP (ptid)
1069 && WIFSTOPPED (status) && WSTOPSIG (status));
1070
9f0bdab8
DJ
1071 if (lp == NULL)
1072 lp = add_lwp (ptid);
1073 lp->cloned = cloned;
1074
0ec9a092 1075 target_post_attach (pid);
d6b0e80f
AC
1076
1077 lp->stopped = 1;
1078
1079 if (debug_linux_nat)
1080 {
1081 fprintf_unfiltered (gdb_stdlog,
1082 "LLAL: waitpid %s received %s\n",
1083 target_pid_to_str (ptid),
1084 status_to_str (status));
1085 }
1086 }
1087 else
1088 {
1089 /* We assume that the LWP representing the original process is
1090 already stopped. Mark it as stopped in the data structure
155bd5d1
AC
1091 that the GNU/linux ptrace layer uses to keep track of
1092 threads. Note that this won't have already been done since
1093 the main thread will have, we assume, been stopped by an
1094 attach from a different layer. */
9ee57c33
DJ
1095 if (lp == NULL)
1096 lp = add_lwp (ptid);
d6b0e80f
AC
1097 lp->stopped = 1;
1098 }
9ee57c33 1099
b84876c2
PA
1100 if (async_events_were_enabled)
1101 linux_nat_async_events (1);
1102
9ee57c33 1103 return 0;
d6b0e80f
AC
1104}
1105
b84876c2
PA
1106static void
1107linux_nat_create_inferior (char *exec_file, char *allargs, char **env,
1108 int from_tty)
1109{
1110 int saved_async = 0;
1111
1112 /* The fork_child mechanism is synchronous and calls target_wait, so
1113 we have to mask the async mode. */
1114
1115 if (target_can_async_p ())
1116 saved_async = linux_nat_async_mask (0);
1117 else
1118 {
1119 /* Restore the original signal mask. */
1120 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1121 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1122 suspend_mask = normal_mask;
1123 sigdelset (&suspend_mask, SIGCHLD);
1124 }
1125
1126 linux_ops->to_create_inferior (exec_file, allargs, env, from_tty);
1127
1128 if (saved_async)
1129 linux_nat_async_mask (saved_async);
1130}
1131
d6b0e80f
AC
1132static void
1133linux_nat_attach (char *args, int from_tty)
1134{
1135 struct lwp_info *lp;
1136 pid_t pid;
1137 int status;
9f0bdab8 1138 int cloned = 0;
710151dd 1139 int options = 0;
d6b0e80f
AC
1140
1141 /* FIXME: We should probably accept a list of process id's, and
1142 attach all of them. */
10d6c8cd 1143 linux_ops->to_attach (args, from_tty);
d6b0e80f 1144
b84876c2
PA
1145 if (!target_can_async_p ())
1146 {
1147 /* Restore the original signal mask. */
1148 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1149 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1150 suspend_mask = normal_mask;
1151 sigdelset (&suspend_mask, SIGCHLD);
1152 }
1153
d6b0e80f
AC
1154 /* Make sure the initial process is stopped. The user-level threads
1155 layer might want to poke around in the inferior, and that won't
1156 work if things haven't stabilized yet. */
710151dd 1157 pid = my_waitpid (GET_PID (inferior_ptid), &status, options);
d6b0e80f
AC
1158 if (pid == -1 && errno == ECHILD)
1159 {
8a3fe4f8 1160 warning (_("%s is a cloned process"), target_pid_to_str (inferior_ptid));
d6b0e80f
AC
1161
1162 /* Try again with __WCLONE to check cloned processes. */
710151dd
PA
1163 options = __WCLONE;
1164 pid = my_waitpid (GET_PID (inferior_ptid), &status, options);
9f0bdab8 1165 cloned = 1;
d6b0e80f
AC
1166 }
1167
1168 gdb_assert (pid == GET_PID (inferior_ptid)
1169 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP);
1170
9f0bdab8
DJ
1171 /* Add the initial process as the first LWP to the list. */
1172 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1173 lp = add_lwp (inferior_ptid);
1174 lp->cloned = cloned;
1175
403fe197
PA
1176 /* If this process is not using thread_db, then we still don't
1177 detect any other threads, but add at least this one. */
1178 add_thread_silent (lp->ptid);
1179
d6b0e80f 1180 lp->stopped = 1;
d6b0e80f 1181 lp->resumed = 1;
710151dd
PA
1182
1183 if (!target_can_async_p ())
d6b0e80f 1184 {
710151dd
PA
1185 /* Fake the SIGSTOP that core GDB expects. */
1186 lp->status = W_STOPCODE (SIGSTOP);
1187 if (debug_linux_nat)
1188 fprintf_unfiltered (gdb_stdlog,
1189 "LNA: waitpid %ld, faking SIGSTOP\n", (long) pid);
1190 }
1191 else
1192 {
1193 /* We already waited for this LWP, so put the wait result on the
1194 pipe. The event loop will wake up and gets us to handling
1195 this event. */
1196 linux_nat_event_pipe_push (pid, status, options);
b84876c2
PA
1197 /* Register in the event loop. */
1198 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1199 }
1200}
1201
1202static int
1203detach_callback (struct lwp_info *lp, void *data)
1204{
1205 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1206
1207 if (debug_linux_nat && lp->status)
1208 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1209 strsignal (WSTOPSIG (lp->status)),
1210 target_pid_to_str (lp->ptid));
1211
1212 while (lp->signalled && lp->stopped)
1213 {
1214 errno = 0;
1215 if (ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0,
1216 WSTOPSIG (lp->status)) < 0)
8a3fe4f8 1217 error (_("Can't continue %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1218 safe_strerror (errno));
1219
1220 if (debug_linux_nat)
1221 fprintf_unfiltered (gdb_stdlog,
1222 "DC: PTRACE_CONTINUE (%s, 0, %s) (OK)\n",
1223 target_pid_to_str (lp->ptid),
1224 status_to_str (lp->status));
1225
1226 lp->stopped = 0;
1227 lp->signalled = 0;
1228 lp->status = 0;
1229 /* FIXME drow/2003-08-26: There was a call to stop_wait_callback
1230 here. But since lp->signalled was cleared above,
1231 stop_wait_callback didn't do anything; the process was left
1232 running. Shouldn't we be waiting for it to stop?
1233 I've removed the call, since stop_wait_callback now does do
1234 something when called with lp->signalled == 0. */
1235
1236 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1237 }
1238
1239 /* We don't actually detach from the LWP that has an id equal to the
1240 overall process id just yet. */
1241 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1242 {
1243 errno = 0;
1244 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1245 WSTOPSIG (lp->status)) < 0)
8a3fe4f8 1246 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1247 safe_strerror (errno));
1248
1249 if (debug_linux_nat)
1250 fprintf_unfiltered (gdb_stdlog,
1251 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1252 target_pid_to_str (lp->ptid),
1253 strsignal (WSTOPSIG (lp->status)));
1254
b84876c2 1255 drain_queued_events (GET_LWP (lp->ptid));
d6b0e80f
AC
1256 delete_lwp (lp->ptid);
1257 }
1258
1259 return 0;
1260}
1261
1262static void
1263linux_nat_detach (char *args, int from_tty)
1264{
b84876c2
PA
1265 int pid;
1266 if (target_can_async_p ())
1267 linux_nat_async (NULL, 0);
1268
d6b0e80f
AC
1269 iterate_over_lwps (detach_callback, NULL);
1270
1271 /* Only the initial process should be left right now. */
1272 gdb_assert (num_lwps == 1);
1273
1274 trap_ptid = null_ptid;
1275
1276 /* Destroy LWP info; it's no longer valid. */
1277 init_lwp_list ();
1278
b84876c2
PA
1279 pid = GET_PID (inferior_ptid);
1280 inferior_ptid = pid_to_ptid (pid);
10d6c8cd 1281 linux_ops->to_detach (args, from_tty);
b84876c2
PA
1282
1283 if (target_can_async_p ())
1284 drain_queued_events (pid);
d6b0e80f
AC
1285}
1286
1287/* Resume LP. */
1288
1289static int
1290resume_callback (struct lwp_info *lp, void *data)
1291{
1292 if (lp->stopped && lp->status == 0)
1293 {
10d6c8cd
DJ
1294 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1295 0, TARGET_SIGNAL_0);
d6b0e80f
AC
1296 if (debug_linux_nat)
1297 fprintf_unfiltered (gdb_stdlog,
1298 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1299 target_pid_to_str (lp->ptid));
1300 lp->stopped = 0;
1301 lp->step = 0;
9f0bdab8 1302 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
d6b0e80f
AC
1303 }
1304
1305 return 0;
1306}
1307
1308static int
1309resume_clear_callback (struct lwp_info *lp, void *data)
1310{
1311 lp->resumed = 0;
1312 return 0;
1313}
1314
1315static int
1316resume_set_callback (struct lwp_info *lp, void *data)
1317{
1318 lp->resumed = 1;
1319 return 0;
1320}
1321
1322static void
1323linux_nat_resume (ptid_t ptid, int step, enum target_signal signo)
1324{
1325 struct lwp_info *lp;
1326 int resume_all;
1327
76f50ad1
DJ
1328 if (debug_linux_nat)
1329 fprintf_unfiltered (gdb_stdlog,
1330 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1331 step ? "step" : "resume",
1332 target_pid_to_str (ptid),
1333 signo ? strsignal (signo) : "0",
1334 target_pid_to_str (inferior_ptid));
1335
e26af52f
DJ
1336 prune_lwps ();
1337
b84876c2
PA
1338 if (target_can_async_p ())
1339 /* Block events while we're here. */
1340 linux_nat_async_events (0);
1341
d6b0e80f
AC
1342 /* A specific PTID means `step only this process id'. */
1343 resume_all = (PIDGET (ptid) == -1);
1344
1345 if (resume_all)
1346 iterate_over_lwps (resume_set_callback, NULL);
1347 else
1348 iterate_over_lwps (resume_clear_callback, NULL);
1349
1350 /* If PID is -1, it's the current inferior that should be
1351 handled specially. */
1352 if (PIDGET (ptid) == -1)
1353 ptid = inferior_ptid;
1354
1355 lp = find_lwp_pid (ptid);
9f0bdab8 1356 gdb_assert (lp != NULL);
d6b0e80f 1357
9f0bdab8 1358 ptid = pid_to_ptid (GET_LWP (lp->ptid));
d6b0e80f 1359
9f0bdab8
DJ
1360 /* Remember if we're stepping. */
1361 lp->step = step;
d6b0e80f 1362
9f0bdab8
DJ
1363 /* Mark this LWP as resumed. */
1364 lp->resumed = 1;
76f50ad1 1365
9f0bdab8
DJ
1366 /* If we have a pending wait status for this thread, there is no
1367 point in resuming the process. But first make sure that
1368 linux_nat_wait won't preemptively handle the event - we
1369 should never take this short-circuit if we are going to
1370 leave LP running, since we have skipped resuming all the
1371 other threads. This bit of code needs to be synchronized
1372 with linux_nat_wait. */
76f50ad1 1373
710151dd
PA
1374 /* In async mode, we never have pending wait status. */
1375 if (target_can_async_p () && lp->status)
1376 internal_error (__FILE__, __LINE__, "Pending status in async mode");
1377
9f0bdab8
DJ
1378 if (lp->status && WIFSTOPPED (lp->status))
1379 {
1380 int saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
76f50ad1 1381
9f0bdab8
DJ
1382 if (signal_stop_state (saved_signo) == 0
1383 && signal_print_state (saved_signo) == 0
1384 && signal_pass_state (saved_signo) == 1)
d6b0e80f 1385 {
9f0bdab8
DJ
1386 if (debug_linux_nat)
1387 fprintf_unfiltered (gdb_stdlog,
1388 "LLR: Not short circuiting for ignored "
1389 "status 0x%x\n", lp->status);
1390
d6b0e80f
AC
1391 /* FIXME: What should we do if we are supposed to continue
1392 this thread with a signal? */
1393 gdb_assert (signo == TARGET_SIGNAL_0);
9f0bdab8
DJ
1394 signo = saved_signo;
1395 lp->status = 0;
1396 }
1397 }
76f50ad1 1398
9f0bdab8
DJ
1399 if (lp->status)
1400 {
1401 /* FIXME: What should we do if we are supposed to continue
1402 this thread with a signal? */
1403 gdb_assert (signo == TARGET_SIGNAL_0);
76f50ad1 1404
9f0bdab8
DJ
1405 if (debug_linux_nat)
1406 fprintf_unfiltered (gdb_stdlog,
1407 "LLR: Short circuiting for status 0x%x\n",
1408 lp->status);
d6b0e80f 1409
9f0bdab8 1410 return;
d6b0e80f
AC
1411 }
1412
9f0bdab8
DJ
1413 /* Mark LWP as not stopped to prevent it from being continued by
1414 resume_callback. */
1415 lp->stopped = 0;
1416
d6b0e80f
AC
1417 if (resume_all)
1418 iterate_over_lwps (resume_callback, NULL);
1419
10d6c8cd 1420 linux_ops->to_resume (ptid, step, signo);
9f0bdab8
DJ
1421 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1422
d6b0e80f
AC
1423 if (debug_linux_nat)
1424 fprintf_unfiltered (gdb_stdlog,
1425 "LLR: %s %s, %s (resume event thread)\n",
1426 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1427 target_pid_to_str (ptid),
1428 signo ? strsignal (signo) : "0");
b84876c2
PA
1429
1430 if (target_can_async_p ())
1431 {
1432 target_executing = 1;
1433 target_async (inferior_event_handler, 0);
1434 }
d6b0e80f
AC
1435}
1436
1437/* Issue kill to specified lwp. */
1438
1439static int tkill_failed;
1440
1441static int
1442kill_lwp (int lwpid, int signo)
1443{
1444 errno = 0;
1445
1446/* Use tkill, if possible, in case we are using nptl threads. If tkill
1447 fails, then we are not using nptl threads and we should be using kill. */
1448
1449#ifdef HAVE_TKILL_SYSCALL
1450 if (!tkill_failed)
1451 {
1452 int ret = syscall (__NR_tkill, lwpid, signo);
1453 if (errno != ENOSYS)
1454 return ret;
1455 errno = 0;
1456 tkill_failed = 1;
1457 }
1458#endif
1459
1460 return kill (lwpid, signo);
1461}
1462
3d799a95
DJ
1463/* Handle a GNU/Linux extended wait response. If we see a clone
1464 event, we need to add the new LWP to our list (and not report the
1465 trap to higher layers). This function returns non-zero if the
1466 event should be ignored and we should wait again. If STOPPING is
1467 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
1468
1469static int
3d799a95
DJ
1470linux_handle_extended_wait (struct lwp_info *lp, int status,
1471 int stopping)
d6b0e80f 1472{
3d799a95
DJ
1473 int pid = GET_LWP (lp->ptid);
1474 struct target_waitstatus *ourstatus = &lp->waitstatus;
1475 struct lwp_info *new_lp = NULL;
1476 int event = status >> 16;
d6b0e80f 1477
3d799a95
DJ
1478 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1479 || event == PTRACE_EVENT_CLONE)
d6b0e80f 1480 {
3d799a95
DJ
1481 unsigned long new_pid;
1482 int ret;
1483
1484 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 1485
3d799a95
DJ
1486 /* If we haven't already seen the new PID stop, wait for it now. */
1487 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1488 {
1489 /* The new child has a pending SIGSTOP. We can't affect it until it
1490 hits the SIGSTOP, but we're already attached. */
1491 ret = my_waitpid (new_pid, &status,
1492 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1493 if (ret == -1)
1494 perror_with_name (_("waiting for new child"));
1495 else if (ret != new_pid)
1496 internal_error (__FILE__, __LINE__,
1497 _("wait returned unexpected PID %d"), ret);
1498 else if (!WIFSTOPPED (status))
1499 internal_error (__FILE__, __LINE__,
1500 _("wait returned unexpected status 0x%x"), status);
1501 }
1502
1503 ourstatus->value.related_pid = new_pid;
1504
1505 if (event == PTRACE_EVENT_FORK)
1506 ourstatus->kind = TARGET_WAITKIND_FORKED;
1507 else if (event == PTRACE_EVENT_VFORK)
1508 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 1509 else
3d799a95
DJ
1510 {
1511 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1512 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (inferior_ptid)));
1513 new_lp->cloned = 1;
d6b0e80f 1514
3d799a95
DJ
1515 if (WSTOPSIG (status) != SIGSTOP)
1516 {
1517 /* This can happen if someone starts sending signals to
1518 the new thread before it gets a chance to run, which
1519 have a lower number than SIGSTOP (e.g. SIGUSR1).
1520 This is an unlikely case, and harder to handle for
1521 fork / vfork than for clone, so we do not try - but
1522 we handle it for clone events here. We'll send
1523 the other signal on to the thread below. */
1524
1525 new_lp->signalled = 1;
1526 }
1527 else
1528 status = 0;
d6b0e80f 1529
3d799a95
DJ
1530 if (stopping)
1531 new_lp->stopped = 1;
1532 else
1533 {
1534 new_lp->resumed = 1;
1535 ptrace (PTRACE_CONT, lp->waitstatus.value.related_pid, 0,
1536 status ? WSTOPSIG (status) : 0);
1537 }
d6b0e80f 1538
3d799a95
DJ
1539 if (debug_linux_nat)
1540 fprintf_unfiltered (gdb_stdlog,
1541 "LHEW: Got clone event from LWP %ld, resuming\n",
1542 GET_LWP (lp->ptid));
1543 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1544
1545 return 1;
1546 }
1547
1548 return 0;
d6b0e80f
AC
1549 }
1550
3d799a95
DJ
1551 if (event == PTRACE_EVENT_EXEC)
1552 {
1553 ourstatus->kind = TARGET_WAITKIND_EXECD;
1554 ourstatus->value.execd_pathname
6d8fd2b7 1555 = xstrdup (linux_child_pid_to_exec_file (pid));
3d799a95
DJ
1556
1557 if (linux_parent_pid)
1558 {
1559 detach_breakpoints (linux_parent_pid);
1560 ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
1561
1562 linux_parent_pid = 0;
1563 }
1564
1565 return 0;
1566 }
1567
1568 internal_error (__FILE__, __LINE__,
1569 _("unknown ptrace event %d"), event);
d6b0e80f
AC
1570}
1571
1572/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1573 exited. */
1574
1575static int
1576wait_lwp (struct lwp_info *lp)
1577{
1578 pid_t pid;
1579 int status;
1580 int thread_dead = 0;
1581
1582 gdb_assert (!lp->stopped);
1583 gdb_assert (lp->status == 0);
1584
58aecb61 1585 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
d6b0e80f
AC
1586 if (pid == -1 && errno == ECHILD)
1587 {
58aecb61 1588 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
d6b0e80f
AC
1589 if (pid == -1 && errno == ECHILD)
1590 {
1591 /* The thread has previously exited. We need to delete it
1592 now because, for some vendor 2.4 kernels with NPTL
1593 support backported, there won't be an exit event unless
1594 it is the main thread. 2.6 kernels will report an exit
1595 event for each thread that exits, as expected. */
1596 thread_dead = 1;
1597 if (debug_linux_nat)
1598 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
1599 target_pid_to_str (lp->ptid));
1600 }
1601 }
1602
1603 if (!thread_dead)
1604 {
1605 gdb_assert (pid == GET_LWP (lp->ptid));
1606
1607 if (debug_linux_nat)
1608 {
1609 fprintf_unfiltered (gdb_stdlog,
1610 "WL: waitpid %s received %s\n",
1611 target_pid_to_str (lp->ptid),
1612 status_to_str (status));
1613 }
1614 }
1615
1616 /* Check if the thread has exited. */
1617 if (WIFEXITED (status) || WIFSIGNALED (status))
1618 {
1619 thread_dead = 1;
1620 if (debug_linux_nat)
1621 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
1622 target_pid_to_str (lp->ptid));
1623 }
1624
1625 if (thread_dead)
1626 {
e26af52f 1627 exit_lwp (lp);
d6b0e80f
AC
1628 return 0;
1629 }
1630
1631 gdb_assert (WIFSTOPPED (status));
1632
1633 /* Handle GNU/Linux's extended waitstatus for trace events. */
1634 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1635 {
1636 if (debug_linux_nat)
1637 fprintf_unfiltered (gdb_stdlog,
1638 "WL: Handling extended status 0x%06x\n",
1639 status);
3d799a95 1640 if (linux_handle_extended_wait (lp, status, 1))
d6b0e80f
AC
1641 return wait_lwp (lp);
1642 }
1643
1644 return status;
1645}
1646
9f0bdab8
DJ
1647/* Save the most recent siginfo for LP. This is currently only called
1648 for SIGTRAP; some ports use the si_addr field for
1649 target_stopped_data_address. In the future, it may also be used to
1650 restore the siginfo of requeued signals. */
1651
1652static void
1653save_siginfo (struct lwp_info *lp)
1654{
1655 errno = 0;
1656 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
1657 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
1658
1659 if (errno != 0)
1660 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1661}
1662
d6b0e80f
AC
1663/* Send a SIGSTOP to LP. */
1664
1665static int
1666stop_callback (struct lwp_info *lp, void *data)
1667{
1668 if (!lp->stopped && !lp->signalled)
1669 {
1670 int ret;
1671
1672 if (debug_linux_nat)
1673 {
1674 fprintf_unfiltered (gdb_stdlog,
1675 "SC: kill %s **<SIGSTOP>**\n",
1676 target_pid_to_str (lp->ptid));
1677 }
1678 errno = 0;
1679 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
1680 if (debug_linux_nat)
1681 {
1682 fprintf_unfiltered (gdb_stdlog,
1683 "SC: lwp kill %d %s\n",
1684 ret,
1685 errno ? safe_strerror (errno) : "ERRNO-OK");
1686 }
1687
1688 lp->signalled = 1;
1689 gdb_assert (lp->status == 0);
1690 }
1691
1692 return 0;
1693}
1694
1695/* Wait until LP is stopped. If DATA is non-null it is interpreted as
1696 a pointer to a set of signals to be flushed immediately. */
1697
1698static int
1699stop_wait_callback (struct lwp_info *lp, void *data)
1700{
1701 sigset_t *flush_mask = data;
1702
1703 if (!lp->stopped)
1704 {
1705 int status;
1706
1707 status = wait_lwp (lp);
1708 if (status == 0)
1709 return 0;
1710
1711 /* Ignore any signals in FLUSH_MASK. */
1712 if (flush_mask && sigismember (flush_mask, WSTOPSIG (status)))
1713 {
1714 if (!lp->signalled)
1715 {
1716 lp->stopped = 1;
1717 return 0;
1718 }
1719
1720 errno = 0;
1721 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1722 if (debug_linux_nat)
1723 fprintf_unfiltered (gdb_stdlog,
1724 "PTRACE_CONT %s, 0, 0 (%s)\n",
1725 target_pid_to_str (lp->ptid),
1726 errno ? safe_strerror (errno) : "OK");
1727
1728 return stop_wait_callback (lp, flush_mask);
1729 }
1730
1731 if (WSTOPSIG (status) != SIGSTOP)
1732 {
1733 if (WSTOPSIG (status) == SIGTRAP)
1734 {
1735 /* If a LWP other than the LWP that we're reporting an
1736 event for has hit a GDB breakpoint (as opposed to
1737 some random trap signal), then just arrange for it to
1738 hit it again later. We don't keep the SIGTRAP status
1739 and don't forward the SIGTRAP signal to the LWP. We
1740 will handle the current event, eventually we will
1741 resume all LWPs, and this one will get its breakpoint
1742 trap again.
1743
1744 If we do not do this, then we run the risk that the
1745 user will delete or disable the breakpoint, but the
1746 thread will have already tripped on it. */
1747
9f0bdab8
DJ
1748 /* Save the trap's siginfo in case we need it later. */
1749 save_siginfo (lp);
1750
d6b0e80f
AC
1751 /* Now resume this LWP and get the SIGSTOP event. */
1752 errno = 0;
1753 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1754 if (debug_linux_nat)
1755 {
1756 fprintf_unfiltered (gdb_stdlog,
1757 "PTRACE_CONT %s, 0, 0 (%s)\n",
1758 target_pid_to_str (lp->ptid),
1759 errno ? safe_strerror (errno) : "OK");
1760
1761 fprintf_unfiltered (gdb_stdlog,
1762 "SWC: Candidate SIGTRAP event in %s\n",
1763 target_pid_to_str (lp->ptid));
1764 }
710151dd
PA
1765 /* Hold this event/waitstatus while we check to see if
1766 there are any more (we still want to get that SIGSTOP). */
d6b0e80f 1767 stop_wait_callback (lp, data);
710151dd
PA
1768
1769 if (target_can_async_p ())
d6b0e80f 1770 {
710151dd
PA
1771 /* Don't leave a pending wait status in async mode.
1772 Retrigger the breakpoint. */
1773 if (!cancel_breakpoint (lp))
d6b0e80f 1774 {
710151dd
PA
1775 /* There was no gdb breakpoint set at pc. Put
1776 the event back in the queue. */
1777 if (debug_linux_nat)
1778 fprintf_unfiltered (gdb_stdlog,
1779 "SWC: kill %s, %s\n",
1780 target_pid_to_str (lp->ptid),
1781 status_to_str ((int) status));
1782 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
1783 }
1784 }
1785 else
1786 {
1787 /* Hold the SIGTRAP for handling by
1788 linux_nat_wait. */
1789 /* If there's another event, throw it back into the
1790 queue. */
1791 if (lp->status)
1792 {
1793 if (debug_linux_nat)
1794 fprintf_unfiltered (gdb_stdlog,
1795 "SWC: kill %s, %s\n",
1796 target_pid_to_str (lp->ptid),
1797 status_to_str ((int) status));
1798 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
d6b0e80f 1799 }
710151dd
PA
1800 /* Save the sigtrap event. */
1801 lp->status = status;
d6b0e80f 1802 }
d6b0e80f
AC
1803 return 0;
1804 }
1805 else
1806 {
1807 /* The thread was stopped with a signal other than
1808 SIGSTOP, and didn't accidentally trip a breakpoint. */
1809
1810 if (debug_linux_nat)
1811 {
1812 fprintf_unfiltered (gdb_stdlog,
1813 "SWC: Pending event %s in %s\n",
1814 status_to_str ((int) status),
1815 target_pid_to_str (lp->ptid));
1816 }
1817 /* Now resume this LWP and get the SIGSTOP event. */
1818 errno = 0;
1819 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1820 if (debug_linux_nat)
1821 fprintf_unfiltered (gdb_stdlog,
1822 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
1823 target_pid_to_str (lp->ptid),
1824 errno ? safe_strerror (errno) : "OK");
1825
1826 /* Hold this event/waitstatus while we check to see if
1827 there are any more (we still want to get that SIGSTOP). */
1828 stop_wait_callback (lp, data);
710151dd
PA
1829
1830 /* If the lp->status field is still empty, use it to
1831 hold this event. If not, then this event must be
1832 returned to the event queue of the LWP. */
1833 if (lp->status || target_can_async_p ())
d6b0e80f
AC
1834 {
1835 if (debug_linux_nat)
1836 {
1837 fprintf_unfiltered (gdb_stdlog,
1838 "SWC: kill %s, %s\n",
1839 target_pid_to_str (lp->ptid),
1840 status_to_str ((int) status));
1841 }
1842 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
1843 }
710151dd
PA
1844 else
1845 lp->status = status;
d6b0e80f
AC
1846 return 0;
1847 }
1848 }
1849 else
1850 {
1851 /* We caught the SIGSTOP that we intended to catch, so
1852 there's no SIGSTOP pending. */
1853 lp->stopped = 1;
1854 lp->signalled = 0;
1855 }
1856 }
1857
1858 return 0;
1859}
1860
1861/* Check whether PID has any pending signals in FLUSH_MASK. If so set
1862 the appropriate bits in PENDING, and return 1 - otherwise return 0. */
1863
1864static int
1865linux_nat_has_pending (int pid, sigset_t *pending, sigset_t *flush_mask)
1866{
1867 sigset_t blocked, ignored;
1868 int i;
1869
1870 linux_proc_pending_signals (pid, pending, &blocked, &ignored);
1871
1872 if (!flush_mask)
1873 return 0;
1874
1875 for (i = 1; i < NSIG; i++)
1876 if (sigismember (pending, i))
1877 if (!sigismember (flush_mask, i)
1878 || sigismember (&blocked, i)
1879 || sigismember (&ignored, i))
1880 sigdelset (pending, i);
1881
1882 if (sigisemptyset (pending))
1883 return 0;
1884
1885 return 1;
1886}
1887
1888/* DATA is interpreted as a mask of signals to flush. If LP has
1889 signals pending, and they are all in the flush mask, then arrange
1890 to flush them. LP should be stopped, as should all other threads
1891 it might share a signal queue with. */
1892
1893static int
1894flush_callback (struct lwp_info *lp, void *data)
1895{
1896 sigset_t *flush_mask = data;
1897 sigset_t pending, intersection, blocked, ignored;
1898 int pid, status;
1899
1900 /* Normally, when an LWP exits, it is removed from the LWP list. The
1901 last LWP isn't removed till later, however. So if there is only
1902 one LWP on the list, make sure it's alive. */
1903 if (lwp_list == lp && lp->next == NULL)
1904 if (!linux_nat_thread_alive (lp->ptid))
1905 return 0;
1906
1907 /* Just because the LWP is stopped doesn't mean that new signals
1908 can't arrive from outside, so this function must be careful of
1909 race conditions. However, because all threads are stopped, we
1910 can assume that the pending mask will not shrink unless we resume
1911 the LWP, and that it will then get another signal. We can't
1912 control which one, however. */
1913
1914 if (lp->status)
1915 {
1916 if (debug_linux_nat)
a3f17187 1917 printf_unfiltered (_("FC: LP has pending status %06x\n"), lp->status);
d6b0e80f
AC
1918 if (WIFSTOPPED (lp->status) && sigismember (flush_mask, WSTOPSIG (lp->status)))
1919 lp->status = 0;
1920 }
1921
3d799a95
DJ
1922 /* While there is a pending signal we would like to flush, continue
1923 the inferior and collect another signal. But if there's already
1924 a saved status that we don't want to flush, we can't resume the
1925 inferior - if it stopped for some other reason we wouldn't have
1926 anywhere to save the new status. In that case, we must leave the
1927 signal unflushed (and possibly generate an extra SIGINT stop).
1928 That's much less bad than losing a signal. */
1929 while (lp->status == 0
1930 && linux_nat_has_pending (GET_LWP (lp->ptid), &pending, flush_mask))
d6b0e80f
AC
1931 {
1932 int ret;
1933
1934 errno = 0;
1935 ret = ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1936 if (debug_linux_nat)
1937 fprintf_unfiltered (gdb_stderr,
1938 "FC: Sent PTRACE_CONT, ret %d %d\n", ret, errno);
1939
1940 lp->stopped = 0;
1941 stop_wait_callback (lp, flush_mask);
1942 if (debug_linux_nat)
1943 fprintf_unfiltered (gdb_stderr,
1944 "FC: Wait finished; saved status is %d\n",
1945 lp->status);
1946 }
1947
1948 return 0;
1949}
1950
1951/* Return non-zero if LP has a wait status pending. */
1952
1953static int
1954status_callback (struct lwp_info *lp, void *data)
1955{
1956 /* Only report a pending wait status if we pretend that this has
1957 indeed been resumed. */
1958 return (lp->status != 0 && lp->resumed);
1959}
1960
1961/* Return non-zero if LP isn't stopped. */
1962
1963static int
1964running_callback (struct lwp_info *lp, void *data)
1965{
1966 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
1967}
1968
1969/* Count the LWP's that have had events. */
1970
1971static int
1972count_events_callback (struct lwp_info *lp, void *data)
1973{
1974 int *count = data;
1975
1976 gdb_assert (count != NULL);
1977
1978 /* Count only LWPs that have a SIGTRAP event pending. */
1979 if (lp->status != 0
1980 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
1981 (*count)++;
1982
1983 return 0;
1984}
1985
1986/* Select the LWP (if any) that is currently being single-stepped. */
1987
1988static int
1989select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
1990{
1991 if (lp->step && lp->status != 0)
1992 return 1;
1993 else
1994 return 0;
1995}
1996
1997/* Select the Nth LWP that has had a SIGTRAP event. */
1998
1999static int
2000select_event_lwp_callback (struct lwp_info *lp, void *data)
2001{
2002 int *selector = data;
2003
2004 gdb_assert (selector != NULL);
2005
2006 /* Select only LWPs that have a SIGTRAP event pending. */
2007 if (lp->status != 0
2008 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2009 if ((*selector)-- == 0)
2010 return 1;
2011
2012 return 0;
2013}
2014
710151dd
PA
2015static int
2016cancel_breakpoint (struct lwp_info *lp)
2017{
2018 /* Arrange for a breakpoint to be hit again later. We don't keep
2019 the SIGTRAP status and don't forward the SIGTRAP signal to the
2020 LWP. We will handle the current event, eventually we will resume
2021 this LWP, and this breakpoint will trap again.
2022
2023 If we do not do this, then we run the risk that the user will
2024 delete or disable the breakpoint, but the LWP will have already
2025 tripped on it. */
2026
2027 if (breakpoint_inserted_here_p (read_pc_pid (lp->ptid) -
2028 gdbarch_decr_pc_after_break
2029 (current_gdbarch)))
2030 {
2031 if (debug_linux_nat)
2032 fprintf_unfiltered (gdb_stdlog,
2033 "CB: Push back breakpoint for %s\n",
2034 target_pid_to_str (lp->ptid));
2035
2036 /* Back up the PC if necessary. */
2037 if (gdbarch_decr_pc_after_break (current_gdbarch))
2038 write_pc_pid (read_pc_pid (lp->ptid) - gdbarch_decr_pc_after_break
2039 (current_gdbarch),
2040 lp->ptid);
2041 return 1;
2042 }
2043 return 0;
2044}
2045
d6b0e80f
AC
2046static int
2047cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2048{
2049 struct lwp_info *event_lp = data;
2050
2051 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2052 if (lp == event_lp)
2053 return 0;
2054
2055 /* If a LWP other than the LWP that we're reporting an event for has
2056 hit a GDB breakpoint (as opposed to some random trap signal),
2057 then just arrange for it to hit it again later. We don't keep
2058 the SIGTRAP status and don't forward the SIGTRAP signal to the
2059 LWP. We will handle the current event, eventually we will resume
2060 all LWPs, and this one will get its breakpoint trap again.
2061
2062 If we do not do this, then we run the risk that the user will
2063 delete or disable the breakpoint, but the LWP will have already
2064 tripped on it. */
2065
2066 if (lp->status != 0
2067 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
710151dd
PA
2068 && cancel_breakpoint (lp))
2069 /* Throw away the SIGTRAP. */
2070 lp->status = 0;
d6b0e80f
AC
2071
2072 return 0;
2073}
2074
2075/* Select one LWP out of those that have events pending. */
2076
2077static void
2078select_event_lwp (struct lwp_info **orig_lp, int *status)
2079{
2080 int num_events = 0;
2081 int random_selector;
2082 struct lwp_info *event_lp;
2083
ac264b3b 2084 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2085 (*orig_lp)->status = *status;
2086
2087 /* Give preference to any LWP that is being single-stepped. */
2088 event_lp = iterate_over_lwps (select_singlestep_lwp_callback, NULL);
2089 if (event_lp != NULL)
2090 {
2091 if (debug_linux_nat)
2092 fprintf_unfiltered (gdb_stdlog,
2093 "SEL: Select single-step %s\n",
2094 target_pid_to_str (event_lp->ptid));
2095 }
2096 else
2097 {
2098 /* No single-stepping LWP. Select one at random, out of those
2099 which have had SIGTRAP events. */
2100
2101 /* First see how many SIGTRAP events we have. */
2102 iterate_over_lwps (count_events_callback, &num_events);
2103
2104 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2105 random_selector = (int)
2106 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2107
2108 if (debug_linux_nat && num_events > 1)
2109 fprintf_unfiltered (gdb_stdlog,
2110 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2111 num_events, random_selector);
2112
2113 event_lp = iterate_over_lwps (select_event_lwp_callback,
2114 &random_selector);
2115 }
2116
2117 if (event_lp != NULL)
2118 {
2119 /* Switch the event LWP. */
2120 *orig_lp = event_lp;
2121 *status = event_lp->status;
2122 }
2123
2124 /* Flush the wait status for the event LWP. */
2125 (*orig_lp)->status = 0;
2126}
2127
2128/* Return non-zero if LP has been resumed. */
2129
2130static int
2131resumed_callback (struct lwp_info *lp, void *data)
2132{
2133 return lp->resumed;
2134}
2135
d6b0e80f
AC
2136/* Stop an active thread, verify it still exists, then resume it. */
2137
2138static int
2139stop_and_resume_callback (struct lwp_info *lp, void *data)
2140{
2141 struct lwp_info *ptr;
2142
2143 if (!lp->stopped && !lp->signalled)
2144 {
2145 stop_callback (lp, NULL);
2146 stop_wait_callback (lp, NULL);
2147 /* Resume if the lwp still exists. */
2148 for (ptr = lwp_list; ptr; ptr = ptr->next)
2149 if (lp == ptr)
2150 {
2151 resume_callback (lp, NULL);
2152 resume_set_callback (lp, NULL);
2153 }
2154 }
2155 return 0;
2156}
2157
02f3fc28 2158/* Check if we should go on and pass this event to common code.
fa2c6a57 2159 Return the affected lwp if we are, or NULL otherwise. */
02f3fc28
PA
2160static struct lwp_info *
2161linux_nat_filter_event (int lwpid, int status, int options)
2162{
2163 struct lwp_info *lp;
2164
2165 lp = find_lwp_pid (pid_to_ptid (lwpid));
2166
2167 /* Check for stop events reported by a process we didn't already
2168 know about - anything not already in our LWP list.
2169
2170 If we're expecting to receive stopped processes after
2171 fork, vfork, and clone events, then we'll just add the
2172 new one to our list and go back to waiting for the event
2173 to be reported - the stopped process might be returned
2174 from waitpid before or after the event is. */
2175 if (WIFSTOPPED (status) && !lp)
2176 {
2177 linux_record_stopped_pid (lwpid, status);
2178 return NULL;
2179 }
2180
2181 /* Make sure we don't report an event for the exit of an LWP not in
2182 our list, i.e. not part of the current process. This can happen
2183 if we detach from a program we original forked and then it
2184 exits. */
2185 if (!WIFSTOPPED (status) && !lp)
2186 return NULL;
2187
2188 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
2189 CLONE_PTRACE processes which do not use the thread library -
2190 otherwise we wouldn't find the new LWP this way. That doesn't
2191 currently work, and the following code is currently unreachable
2192 due to the two blocks above. If it's fixed some day, this code
2193 should be broken out into a function so that we can also pick up
2194 LWPs from the new interface. */
2195 if (!lp)
2196 {
2197 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
2198 if (options & __WCLONE)
2199 lp->cloned = 1;
2200
2201 gdb_assert (WIFSTOPPED (status)
2202 && WSTOPSIG (status) == SIGSTOP);
2203 lp->signalled = 1;
2204
2205 if (!in_thread_list (inferior_ptid))
2206 {
2207 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2208 GET_PID (inferior_ptid));
2209 add_thread (inferior_ptid);
2210 }
2211
2212 add_thread (lp->ptid);
2213 }
2214
2215 /* Save the trap's siginfo in case we need it later. */
2216 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2217 save_siginfo (lp);
2218
2219 /* Handle GNU/Linux's extended waitstatus for trace events. */
2220 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2221 {
2222 if (debug_linux_nat)
2223 fprintf_unfiltered (gdb_stdlog,
2224 "LLW: Handling extended status 0x%06x\n",
2225 status);
2226 if (linux_handle_extended_wait (lp, status, 0))
2227 return NULL;
2228 }
2229
2230 /* Check if the thread has exited. */
2231 if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1)
2232 {
2233 /* If this is the main thread, we must stop all threads and
2234 verify if they are still alive. This is because in the nptl
2235 thread model, there is no signal issued for exiting LWPs
2236 other than the main thread. We only get the main thread exit
2237 signal once all child threads have already exited. If we
2238 stop all the threads and use the stop_wait_callback to check
2239 if they have exited we can determine whether this signal
2240 should be ignored or whether it means the end of the debugged
2241 application, regardless of which threading model is being
2242 used. */
2243 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
2244 {
2245 lp->stopped = 1;
2246 iterate_over_lwps (stop_and_resume_callback, NULL);
2247 }
2248
2249 if (debug_linux_nat)
2250 fprintf_unfiltered (gdb_stdlog,
2251 "LLW: %s exited.\n",
2252 target_pid_to_str (lp->ptid));
2253
2254 exit_lwp (lp);
2255
2256 /* If there is at least one more LWP, then the exit signal was
2257 not the end of the debugged application and should be
2258 ignored. */
2259 if (num_lwps > 0)
2260 {
2261 /* Make sure there is at least one thread running. */
2262 gdb_assert (iterate_over_lwps (running_callback, NULL));
2263
2264 /* Discard the event. */
2265 return NULL;
2266 }
2267 }
2268
2269 /* Check if the current LWP has previously exited. In the nptl
2270 thread model, LWPs other than the main thread do not issue
2271 signals when they exit so we must check whenever the thread has
2272 stopped. A similar check is made in stop_wait_callback(). */
2273 if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid))
2274 {
2275 if (debug_linux_nat)
2276 fprintf_unfiltered (gdb_stdlog,
2277 "LLW: %s exited.\n",
2278 target_pid_to_str (lp->ptid));
2279
2280 exit_lwp (lp);
2281
2282 /* Make sure there is at least one thread running. */
2283 gdb_assert (iterate_over_lwps (running_callback, NULL));
2284
2285 /* Discard the event. */
2286 return NULL;
2287 }
2288
2289 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2290 an attempt to stop an LWP. */
2291 if (lp->signalled
2292 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2293 {
2294 if (debug_linux_nat)
2295 fprintf_unfiltered (gdb_stdlog,
2296 "LLW: Delayed SIGSTOP caught for %s.\n",
2297 target_pid_to_str (lp->ptid));
2298
2299 /* This is a delayed SIGSTOP. */
2300 lp->signalled = 0;
2301
2302 registers_changed ();
2303
2304 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2305 lp->step, TARGET_SIGNAL_0);
2306 if (debug_linux_nat)
2307 fprintf_unfiltered (gdb_stdlog,
2308 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2309 lp->step ?
2310 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2311 target_pid_to_str (lp->ptid));
2312
2313 lp->stopped = 0;
2314 gdb_assert (lp->resumed);
2315
2316 /* Discard the event. */
2317 return NULL;
2318 }
2319
2320 /* An interesting event. */
2321 gdb_assert (lp);
2322 return lp;
2323}
2324
b84876c2
PA
2325/* Get the events stored in the pipe into the local queue, so they are
2326 accessible to queued_waitpid. We need to do this, since it is not
2327 always the case that the event at the head of the pipe is the event
2328 we want. */
2329
2330static void
2331pipe_to_local_event_queue (void)
2332{
2333 if (debug_linux_nat_async)
2334 fprintf_unfiltered (gdb_stdlog,
2335 "PTLEQ: linux_nat_num_queued_events(%d)\n",
2336 linux_nat_num_queued_events);
2337 while (linux_nat_num_queued_events)
2338 {
2339 int lwpid, status, options;
b84876c2 2340 lwpid = linux_nat_event_pipe_pop (&status, &options);
b84876c2
PA
2341 gdb_assert (lwpid > 0);
2342 push_waitpid (lwpid, status, options);
2343 }
2344}
2345
2346/* Get the unprocessed events stored in the local queue back into the
2347 pipe, so the event loop realizes there's something else to
2348 process. */
2349
2350static void
2351local_event_queue_to_pipe (void)
2352{
2353 struct waitpid_result *w = waitpid_queue;
2354 while (w)
2355 {
2356 struct waitpid_result *next = w->next;
2357 linux_nat_event_pipe_push (w->pid,
2358 w->status,
2359 w->options);
2360 xfree (w);
2361 w = next;
2362 }
2363 waitpid_queue = NULL;
2364
2365 if (debug_linux_nat_async)
2366 fprintf_unfiltered (gdb_stdlog,
2367 "LEQTP: linux_nat_num_queued_events(%d)\n",
2368 linux_nat_num_queued_events);
2369}
2370
d6b0e80f
AC
2371static ptid_t
2372linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
2373{
2374 struct lwp_info *lp = NULL;
2375 int options = 0;
2376 int status = 0;
2377 pid_t pid = PIDGET (ptid);
2378 sigset_t flush_mask;
2379
b84876c2
PA
2380 if (debug_linux_nat_async)
2381 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
2382
f973ed9c
DJ
2383 /* The first time we get here after starting a new inferior, we may
2384 not have added it to the LWP list yet - this is the earliest
2385 moment at which we know its PID. */
2386 if (num_lwps == 0)
2387 {
2388 gdb_assert (!is_lwp (inferior_ptid));
2389
2390 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2391 GET_PID (inferior_ptid));
2392 lp = add_lwp (inferior_ptid);
2393 lp->resumed = 1;
403fe197
PA
2394 /* Add the main thread to GDB's thread list. */
2395 add_thread_silent (lp->ptid);
f973ed9c
DJ
2396 }
2397
d6b0e80f
AC
2398 sigemptyset (&flush_mask);
2399
b84876c2
PA
2400 if (target_can_async_p ())
2401 /* Block events while we're here. */
2402 target_async (NULL, 0);
d6b0e80f
AC
2403
2404retry:
2405
f973ed9c
DJ
2406 /* Make sure there is at least one LWP that has been resumed. */
2407 gdb_assert (iterate_over_lwps (resumed_callback, NULL));
d6b0e80f
AC
2408
2409 /* First check if there is a LWP with a wait status pending. */
2410 if (pid == -1)
2411 {
2412 /* Any LWP that's been resumed will do. */
2413 lp = iterate_over_lwps (status_callback, NULL);
2414 if (lp)
2415 {
710151dd
PA
2416 if (target_can_async_p ())
2417 internal_error (__FILE__, __LINE__,
2418 "Found an LWP with a pending status in async mode.");
2419
d6b0e80f
AC
2420 status = lp->status;
2421 lp->status = 0;
2422
2423 if (debug_linux_nat && status)
2424 fprintf_unfiltered (gdb_stdlog,
2425 "LLW: Using pending wait status %s for %s.\n",
2426 status_to_str (status),
2427 target_pid_to_str (lp->ptid));
2428 }
2429
b84876c2 2430 /* But if we don't find one, we'll have to wait, and check both
d6b0e80f
AC
2431 cloned and uncloned processes. We start with the cloned
2432 processes. */
2433 options = __WCLONE | WNOHANG;
2434 }
2435 else if (is_lwp (ptid))
2436 {
2437 if (debug_linux_nat)
2438 fprintf_unfiltered (gdb_stdlog,
2439 "LLW: Waiting for specific LWP %s.\n",
2440 target_pid_to_str (ptid));
2441
2442 /* We have a specific LWP to check. */
2443 lp = find_lwp_pid (ptid);
2444 gdb_assert (lp);
2445 status = lp->status;
2446 lp->status = 0;
2447
2448 if (debug_linux_nat && status)
2449 fprintf_unfiltered (gdb_stdlog,
2450 "LLW: Using pending wait status %s for %s.\n",
2451 status_to_str (status),
2452 target_pid_to_str (lp->ptid));
2453
2454 /* If we have to wait, take into account whether PID is a cloned
2455 process or not. And we have to convert it to something that
2456 the layer beneath us can understand. */
2457 options = lp->cloned ? __WCLONE : 0;
2458 pid = GET_LWP (ptid);
2459 }
2460
2461 if (status && lp->signalled)
2462 {
2463 /* A pending SIGSTOP may interfere with the normal stream of
2464 events. In a typical case where interference is a problem,
2465 we have a SIGSTOP signal pending for LWP A while
2466 single-stepping it, encounter an event in LWP B, and take the
2467 pending SIGSTOP while trying to stop LWP A. After processing
2468 the event in LWP B, LWP A is continued, and we'll never see
2469 the SIGTRAP associated with the last time we were
2470 single-stepping LWP A. */
2471
2472 /* Resume the thread. It should halt immediately returning the
2473 pending SIGSTOP. */
2474 registers_changed ();
10d6c8cd
DJ
2475 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2476 lp->step, TARGET_SIGNAL_0);
d6b0e80f
AC
2477 if (debug_linux_nat)
2478 fprintf_unfiltered (gdb_stdlog,
2479 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
2480 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2481 target_pid_to_str (lp->ptid));
2482 lp->stopped = 0;
2483 gdb_assert (lp->resumed);
2484
2485 /* This should catch the pending SIGSTOP. */
2486 stop_wait_callback (lp, NULL);
2487 }
2488
b84876c2
PA
2489 if (!target_can_async_p ())
2490 {
2491 /* Causes SIGINT to be passed on to the attached process. */
2492 set_sigint_trap ();
2493 set_sigio_trap ();
2494 }
d6b0e80f
AC
2495
2496 while (status == 0)
2497 {
2498 pid_t lwpid;
2499
b84876c2
PA
2500 if (target_can_async_p ())
2501 /* In async mode, don't ever block. Only look at the locally
2502 queued events. */
2503 lwpid = queued_waitpid (pid, &status, options);
2504 else
2505 lwpid = my_waitpid (pid, &status, options);
2506
d6b0e80f
AC
2507 if (lwpid > 0)
2508 {
2509 gdb_assert (pid == -1 || lwpid == pid);
2510
2511 if (debug_linux_nat)
2512 {
2513 fprintf_unfiltered (gdb_stdlog,
2514 "LLW: waitpid %ld received %s\n",
2515 (long) lwpid, status_to_str (status));
2516 }
2517
02f3fc28 2518 lp = linux_nat_filter_event (lwpid, status, options);
d6b0e80f
AC
2519 if (!lp)
2520 {
02f3fc28 2521 /* A discarded event. */
d6b0e80f
AC
2522 status = 0;
2523 continue;
2524 }
2525
2526 break;
2527 }
2528
2529 if (pid == -1)
2530 {
2531 /* Alternate between checking cloned and uncloned processes. */
2532 options ^= __WCLONE;
2533
b84876c2
PA
2534 /* And every time we have checked both:
2535 In async mode, return to event loop;
2536 In sync mode, suspend waiting for a SIGCHLD signal. */
d6b0e80f 2537 if (options & __WCLONE)
b84876c2
PA
2538 {
2539 if (target_can_async_p ())
2540 {
2541 /* No interesting event. */
2542 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2543
2544 /* Get ready for the next event. */
2545 target_async (inferior_event_handler, 0);
2546
2547 if (debug_linux_nat_async)
2548 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
2549
2550 return minus_one_ptid;
2551 }
2552
2553 sigsuspend (&suspend_mask);
2554 }
d6b0e80f
AC
2555 }
2556
2557 /* We shouldn't end up here unless we want to try again. */
2558 gdb_assert (status == 0);
2559 }
2560
b84876c2
PA
2561 if (!target_can_async_p ())
2562 {
2563 clear_sigio_trap ();
2564 clear_sigint_trap ();
2565 }
d6b0e80f
AC
2566
2567 gdb_assert (lp);
2568
2569 /* Don't report signals that GDB isn't interested in, such as
2570 signals that are neither printed nor stopped upon. Stopping all
2571 threads can be a bit time-consuming so if we want decent
2572 performance with heavily multi-threaded programs, especially when
2573 they're using a high frequency timer, we'd better avoid it if we
2574 can. */
2575
2576 if (WIFSTOPPED (status))
2577 {
2578 int signo = target_signal_from_host (WSTOPSIG (status));
2579
d539ed7e
UW
2580 /* If we get a signal while single-stepping, we may need special
2581 care, e.g. to skip the signal handler. Defer to common code. */
2582 if (!lp->step
2583 && signal_stop_state (signo) == 0
d6b0e80f
AC
2584 && signal_print_state (signo) == 0
2585 && signal_pass_state (signo) == 1)
2586 {
2587 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2588 here? It is not clear we should. GDB may not expect
2589 other threads to run. On the other hand, not resuming
2590 newly attached threads may cause an unwanted delay in
2591 getting them running. */
2592 registers_changed ();
10d6c8cd
DJ
2593 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2594 lp->step, signo);
d6b0e80f
AC
2595 if (debug_linux_nat)
2596 fprintf_unfiltered (gdb_stdlog,
2597 "LLW: %s %s, %s (preempt 'handle')\n",
2598 lp->step ?
2599 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2600 target_pid_to_str (lp->ptid),
2601 signo ? strsignal (signo) : "0");
2602 lp->stopped = 0;
2603 status = 0;
2604 goto retry;
2605 }
2606
2607 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
2608 {
2609 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2610 forwarded to the entire process group, that is, all LWP's
2611 will receive it. Since we only want to report it once,
2612 we try to flush it from all LWPs except this one. */
2613 sigaddset (&flush_mask, SIGINT);
2614 }
2615 }
2616
2617 /* This LWP is stopped now. */
2618 lp->stopped = 1;
2619
2620 if (debug_linux_nat)
2621 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
2622 status_to_str (status), target_pid_to_str (lp->ptid));
2623
2624 /* Now stop all other LWP's ... */
2625 iterate_over_lwps (stop_callback, NULL);
2626
2627 /* ... and wait until all of them have reported back that they're no
2628 longer running. */
2629 iterate_over_lwps (stop_wait_callback, &flush_mask);
2630 iterate_over_lwps (flush_callback, &flush_mask);
2631
2632 /* If we're not waiting for a specific LWP, choose an event LWP from
2633 among those that have had events. Giving equal priority to all
2634 LWPs that have had events helps prevent starvation. */
2635 if (pid == -1)
2636 select_event_lwp (&lp, &status);
2637
2638 /* Now that we've selected our final event LWP, cancel any
2639 breakpoints in other LWPs that have hit a GDB breakpoint. See
2640 the comment in cancel_breakpoints_callback to find out why. */
2641 iterate_over_lwps (cancel_breakpoints_callback, lp);
2642
d6b0e80f
AC
2643 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2644 {
f973ed9c 2645 trap_ptid = lp->ptid;
d6b0e80f
AC
2646 if (debug_linux_nat)
2647 fprintf_unfiltered (gdb_stdlog,
2648 "LLW: trap_ptid is %s.\n",
2649 target_pid_to_str (trap_ptid));
2650 }
2651 else
2652 trap_ptid = null_ptid;
2653
2654 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2655 {
2656 *ourstatus = lp->waitstatus;
2657 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
2658 }
2659 else
2660 store_waitstatus (ourstatus, status);
2661
b84876c2
PA
2662 /* Get ready for the next event. */
2663 if (target_can_async_p ())
2664 target_async (inferior_event_handler, 0);
2665
2666 if (debug_linux_nat_async)
2667 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
2668
f973ed9c 2669 return lp->ptid;
d6b0e80f
AC
2670}
2671
2672static int
2673kill_callback (struct lwp_info *lp, void *data)
2674{
2675 errno = 0;
2676 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
2677 if (debug_linux_nat)
2678 fprintf_unfiltered (gdb_stdlog,
2679 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
2680 target_pid_to_str (lp->ptid),
2681 errno ? safe_strerror (errno) : "OK");
2682
2683 return 0;
2684}
2685
2686static int
2687kill_wait_callback (struct lwp_info *lp, void *data)
2688{
2689 pid_t pid;
2690
2691 /* We must make sure that there are no pending events (delayed
2692 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
2693 program doesn't interfere with any following debugging session. */
2694
2695 /* For cloned processes we must check both with __WCLONE and
2696 without, since the exit status of a cloned process isn't reported
2697 with __WCLONE. */
2698 if (lp->cloned)
2699 {
2700 do
2701 {
58aecb61 2702 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
e85a822c 2703 if (pid != (pid_t) -1)
d6b0e80f 2704 {
e85a822c
DJ
2705 if (debug_linux_nat)
2706 fprintf_unfiltered (gdb_stdlog,
2707 "KWC: wait %s received unknown.\n",
2708 target_pid_to_str (lp->ptid));
2709 /* The Linux kernel sometimes fails to kill a thread
2710 completely after PTRACE_KILL; that goes from the stop
2711 point in do_fork out to the one in
2712 get_signal_to_deliever and waits again. So kill it
2713 again. */
2714 kill_callback (lp, NULL);
d6b0e80f
AC
2715 }
2716 }
2717 while (pid == GET_LWP (lp->ptid));
2718
2719 gdb_assert (pid == -1 && errno == ECHILD);
2720 }
2721
2722 do
2723 {
58aecb61 2724 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
e85a822c 2725 if (pid != (pid_t) -1)
d6b0e80f 2726 {
e85a822c
DJ
2727 if (debug_linux_nat)
2728 fprintf_unfiltered (gdb_stdlog,
2729 "KWC: wait %s received unk.\n",
2730 target_pid_to_str (lp->ptid));
2731 /* See the call to kill_callback above. */
2732 kill_callback (lp, NULL);
d6b0e80f
AC
2733 }
2734 }
2735 while (pid == GET_LWP (lp->ptid));
2736
2737 gdb_assert (pid == -1 && errno == ECHILD);
2738 return 0;
2739}
2740
2741static void
2742linux_nat_kill (void)
2743{
f973ed9c
DJ
2744 struct target_waitstatus last;
2745 ptid_t last_ptid;
2746 int status;
d6b0e80f 2747
b84876c2
PA
2748 if (target_can_async_p ())
2749 target_async (NULL, 0);
2750
f973ed9c
DJ
2751 /* If we're stopped while forking and we haven't followed yet,
2752 kill the other task. We need to do this first because the
2753 parent will be sleeping if this is a vfork. */
d6b0e80f 2754
f973ed9c 2755 get_last_target_status (&last_ptid, &last);
d6b0e80f 2756
f973ed9c
DJ
2757 if (last.kind == TARGET_WAITKIND_FORKED
2758 || last.kind == TARGET_WAITKIND_VFORKED)
2759 {
2760 ptrace (PT_KILL, last.value.related_pid, 0, 0);
2761 wait (&status);
2762 }
2763
2764 if (forks_exist_p ())
b84876c2
PA
2765 {
2766 linux_fork_killall ();
2767 drain_queued_events (-1);
2768 }
f973ed9c
DJ
2769 else
2770 {
2771 /* Kill all LWP's ... */
2772 iterate_over_lwps (kill_callback, NULL);
2773
2774 /* ... and wait until we've flushed all events. */
2775 iterate_over_lwps (kill_wait_callback, NULL);
2776 }
2777
2778 target_mourn_inferior ();
d6b0e80f
AC
2779}
2780
2781static void
2782linux_nat_mourn_inferior (void)
2783{
2784 trap_ptid = null_ptid;
2785
2786 /* Destroy LWP info; it's no longer valid. */
2787 init_lwp_list ();
2788
f973ed9c 2789 if (! forks_exist_p ())
b84876c2
PA
2790 {
2791 /* Normal case, no other forks available. */
2792 if (target_can_async_p ())
2793 linux_nat_async (NULL, 0);
2794 linux_ops->to_mourn_inferior ();
2795 }
f973ed9c
DJ
2796 else
2797 /* Multi-fork case. The current inferior_ptid has exited, but
2798 there are other viable forks to debug. Delete the exiting
2799 one and context-switch to the first available. */
2800 linux_fork_mourn_inferior ();
d6b0e80f
AC
2801}
2802
10d6c8cd
DJ
2803static LONGEST
2804linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
2805 const char *annex, gdb_byte *readbuf,
2806 const gdb_byte *writebuf,
2807 ULONGEST offset, LONGEST len)
d6b0e80f
AC
2808{
2809 struct cleanup *old_chain = save_inferior_ptid ();
10d6c8cd 2810 LONGEST xfer;
d6b0e80f
AC
2811
2812 if (is_lwp (inferior_ptid))
2813 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
2814
10d6c8cd
DJ
2815 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
2816 offset, len);
d6b0e80f
AC
2817
2818 do_cleanups (old_chain);
2819 return xfer;
2820}
2821
2822static int
2823linux_nat_thread_alive (ptid_t ptid)
2824{
2825 gdb_assert (is_lwp (ptid));
2826
2827 errno = 0;
2828 ptrace (PTRACE_PEEKUSER, GET_LWP (ptid), 0, 0);
2829 if (debug_linux_nat)
2830 fprintf_unfiltered (gdb_stdlog,
2831 "LLTA: PTRACE_PEEKUSER %s, 0, 0 (%s)\n",
2832 target_pid_to_str (ptid),
2833 errno ? safe_strerror (errno) : "OK");
9c0dd46b 2834
155bd5d1
AC
2835 /* Not every Linux kernel implements PTRACE_PEEKUSER. But we can
2836 handle that case gracefully since ptrace will first do a lookup
2837 for the process based upon the passed-in pid. If that fails we
2838 will get either -ESRCH or -EPERM, otherwise the child exists and
2839 is alive. */
a529be7c 2840 if (errno == ESRCH || errno == EPERM)
d6b0e80f
AC
2841 return 0;
2842
2843 return 1;
2844}
2845
2846static char *
2847linux_nat_pid_to_str (ptid_t ptid)
2848{
2849 static char buf[64];
2850
f973ed9c 2851 if (lwp_list && lwp_list->next && is_lwp (ptid))
d6b0e80f
AC
2852 {
2853 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
2854 return buf;
2855 }
2856
2857 return normal_pid_to_str (ptid);
2858}
2859
d6b0e80f
AC
2860static void
2861sigchld_handler (int signo)
2862{
b84876c2
PA
2863 if (linux_nat_async_enabled
2864 && linux_nat_async_events_enabled
2865 && signo == SIGCHLD)
2866 /* It is *always* a bug to hit this. */
2867 internal_error (__FILE__, __LINE__,
2868 "sigchld_handler called when async events are enabled");
2869
d6b0e80f
AC
2870 /* Do nothing. The only reason for this handler is that it allows
2871 us to use sigsuspend in linux_nat_wait above to wait for the
2872 arrival of a SIGCHLD. */
2873}
2874
dba24537
AC
2875/* Accepts an integer PID; Returns a string representing a file that
2876 can be opened to get the symbols for the child process. */
2877
6d8fd2b7
UW
2878static char *
2879linux_child_pid_to_exec_file (int pid)
dba24537
AC
2880{
2881 char *name1, *name2;
2882
2883 name1 = xmalloc (MAXPATHLEN);
2884 name2 = xmalloc (MAXPATHLEN);
2885 make_cleanup (xfree, name1);
2886 make_cleanup (xfree, name2);
2887 memset (name2, 0, MAXPATHLEN);
2888
2889 sprintf (name1, "/proc/%d/exe", pid);
2890 if (readlink (name1, name2, MAXPATHLEN) > 0)
2891 return name2;
2892 else
2893 return name1;
2894}
2895
2896/* Service function for corefiles and info proc. */
2897
2898static int
2899read_mapping (FILE *mapfile,
2900 long long *addr,
2901 long long *endaddr,
2902 char *permissions,
2903 long long *offset,
2904 char *device, long long *inode, char *filename)
2905{
2906 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
2907 addr, endaddr, permissions, offset, device, inode);
2908
2e14c2ea
MS
2909 filename[0] = '\0';
2910 if (ret > 0 && ret != EOF)
dba24537
AC
2911 {
2912 /* Eat everything up to EOL for the filename. This will prevent
2913 weird filenames (such as one with embedded whitespace) from
2914 confusing this code. It also makes this code more robust in
2915 respect to annotations the kernel may add after the filename.
2916
2917 Note the filename is used for informational purposes
2918 only. */
2919 ret += fscanf (mapfile, "%[^\n]\n", filename);
2920 }
2e14c2ea 2921
dba24537
AC
2922 return (ret != 0 && ret != EOF);
2923}
2924
2925/* Fills the "to_find_memory_regions" target vector. Lists the memory
2926 regions in the inferior for a corefile. */
2927
2928static int
2929linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
2930 unsigned long,
2931 int, int, int, void *), void *obfd)
2932{
2933 long long pid = PIDGET (inferior_ptid);
2934 char mapsfilename[MAXPATHLEN];
2935 FILE *mapsfile;
2936 long long addr, endaddr, size, offset, inode;
2937 char permissions[8], device[8], filename[MAXPATHLEN];
2938 int read, write, exec;
2939 int ret;
2940
2941 /* Compose the filename for the /proc memory map, and open it. */
2942 sprintf (mapsfilename, "/proc/%lld/maps", pid);
2943 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
8a3fe4f8 2944 error (_("Could not open %s."), mapsfilename);
dba24537
AC
2945
2946 if (info_verbose)
2947 fprintf_filtered (gdb_stdout,
2948 "Reading memory regions from %s\n", mapsfilename);
2949
2950 /* Now iterate until end-of-file. */
2951 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
2952 &offset, &device[0], &inode, &filename[0]))
2953 {
2954 size = endaddr - addr;
2955
2956 /* Get the segment's permissions. */
2957 read = (strchr (permissions, 'r') != 0);
2958 write = (strchr (permissions, 'w') != 0);
2959 exec = (strchr (permissions, 'x') != 0);
2960
2961 if (info_verbose)
2962 {
2963 fprintf_filtered (gdb_stdout,
2964 "Save segment, %lld bytes at 0x%s (%c%c%c)",
2965 size, paddr_nz (addr),
2966 read ? 'r' : ' ',
2967 write ? 'w' : ' ', exec ? 'x' : ' ');
b260b6c1 2968 if (filename[0])
dba24537
AC
2969 fprintf_filtered (gdb_stdout, " for %s", filename);
2970 fprintf_filtered (gdb_stdout, "\n");
2971 }
2972
2973 /* Invoke the callback function to create the corefile
2974 segment. */
2975 func (addr, size, read, write, exec, obfd);
2976 }
2977 fclose (mapsfile);
2978 return 0;
2979}
2980
2981/* Records the thread's register state for the corefile note
2982 section. */
2983
2984static char *
2985linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
2986 char *note_data, int *note_size)
2987{
2988 gdb_gregset_t gregs;
2989 gdb_fpregset_t fpregs;
2990#ifdef FILL_FPXREGSET
2991 gdb_fpxregset_t fpxregs;
2992#endif
2993 unsigned long lwp = ptid_get_lwp (ptid);
594f7785
UW
2994 struct regcache *regcache = get_thread_regcache (ptid);
2995 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4f844a66 2996 const struct regset *regset;
55e969c1 2997 int core_regset_p;
594f7785
UW
2998 struct cleanup *old_chain;
2999
3000 old_chain = save_inferior_ptid ();
3001 inferior_ptid = ptid;
3002 target_fetch_registers (regcache, -1);
3003 do_cleanups (old_chain);
4f844a66
DM
3004
3005 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
55e969c1
DM
3006 if (core_regset_p
3007 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
3008 sizeof (gregs))) != NULL
3009 && regset->collect_regset != NULL)
594f7785 3010 regset->collect_regset (regset, regcache, -1,
55e969c1 3011 &gregs, sizeof (gregs));
4f844a66 3012 else
594f7785 3013 fill_gregset (regcache, &gregs, -1);
4f844a66 3014
55e969c1
DM
3015 note_data = (char *) elfcore_write_prstatus (obfd,
3016 note_data,
3017 note_size,
3018 lwp,
3019 stop_signal, &gregs);
3020
3021 if (core_regset_p
3022 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
3023 sizeof (fpregs))) != NULL
3024 && regset->collect_regset != NULL)
594f7785 3025 regset->collect_regset (regset, regcache, -1,
55e969c1 3026 &fpregs, sizeof (fpregs));
4f844a66 3027 else
594f7785 3028 fill_fpregset (regcache, &fpregs, -1);
4f844a66 3029
55e969c1
DM
3030 note_data = (char *) elfcore_write_prfpreg (obfd,
3031 note_data,
3032 note_size,
3033 &fpregs, sizeof (fpregs));
dba24537 3034
dba24537 3035#ifdef FILL_FPXREGSET
55e969c1
DM
3036 if (core_regset_p
3037 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg-xfp",
3038 sizeof (fpxregs))) != NULL
3039 && regset->collect_regset != NULL)
594f7785 3040 regset->collect_regset (regset, regcache, -1,
55e969c1 3041 &fpxregs, sizeof (fpxregs));
4f844a66 3042 else
594f7785 3043 fill_fpxregset (regcache, &fpxregs, -1);
4f844a66 3044
55e969c1
DM
3045 note_data = (char *) elfcore_write_prxfpreg (obfd,
3046 note_data,
3047 note_size,
3048 &fpxregs, sizeof (fpxregs));
dba24537
AC
3049#endif
3050 return note_data;
3051}
3052
3053struct linux_nat_corefile_thread_data
3054{
3055 bfd *obfd;
3056 char *note_data;
3057 int *note_size;
3058 int num_notes;
3059};
3060
3061/* Called by gdbthread.c once per thread. Records the thread's
3062 register state for the corefile note section. */
3063
3064static int
3065linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
3066{
3067 struct linux_nat_corefile_thread_data *args = data;
dba24537 3068
dba24537
AC
3069 args->note_data = linux_nat_do_thread_registers (args->obfd,
3070 ti->ptid,
3071 args->note_data,
3072 args->note_size);
3073 args->num_notes++;
56be3814 3074
dba24537
AC
3075 return 0;
3076}
3077
3078/* Records the register state for the corefile note section. */
3079
3080static char *
3081linux_nat_do_registers (bfd *obfd, ptid_t ptid,
3082 char *note_data, int *note_size)
3083{
dba24537
AC
3084 return linux_nat_do_thread_registers (obfd,
3085 ptid_build (ptid_get_pid (inferior_ptid),
3086 ptid_get_pid (inferior_ptid),
3087 0),
3088 note_data, note_size);
dba24537
AC
3089}
3090
3091/* Fills the "to_make_corefile_note" target vector. Builds the note
3092 section for a corefile, and returns it in a malloc buffer. */
3093
3094static char *
3095linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
3096{
3097 struct linux_nat_corefile_thread_data thread_args;
3098 struct cleanup *old_chain;
d99148ef 3099 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
dba24537 3100 char fname[16] = { '\0' };
d99148ef 3101 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
dba24537
AC
3102 char psargs[80] = { '\0' };
3103 char *note_data = NULL;
3104 ptid_t current_ptid = inferior_ptid;
c6826062 3105 gdb_byte *auxv;
dba24537
AC
3106 int auxv_len;
3107
3108 if (get_exec_file (0))
3109 {
3110 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
3111 strncpy (psargs, get_exec_file (0), sizeof (psargs));
3112 if (get_inferior_args ())
3113 {
d99148ef
JK
3114 char *string_end;
3115 char *psargs_end = psargs + sizeof (psargs);
3116
3117 /* linux_elfcore_write_prpsinfo () handles zero unterminated
3118 strings fine. */
3119 string_end = memchr (psargs, 0, sizeof (psargs));
3120 if (string_end != NULL)
3121 {
3122 *string_end++ = ' ';
3123 strncpy (string_end, get_inferior_args (),
3124 psargs_end - string_end);
3125 }
dba24537
AC
3126 }
3127 note_data = (char *) elfcore_write_prpsinfo (obfd,
3128 note_data,
3129 note_size, fname, psargs);
3130 }
3131
3132 /* Dump information for threads. */
3133 thread_args.obfd = obfd;
3134 thread_args.note_data = note_data;
3135 thread_args.note_size = note_size;
3136 thread_args.num_notes = 0;
3137 iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args);
3138 if (thread_args.num_notes == 0)
3139 {
3140 /* iterate_over_threads didn't come up with any threads; just
3141 use inferior_ptid. */
3142 note_data = linux_nat_do_registers (obfd, inferior_ptid,
3143 note_data, note_size);
3144 }
3145 else
3146 {
3147 note_data = thread_args.note_data;
3148 }
3149
13547ab6
DJ
3150 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
3151 NULL, &auxv);
dba24537
AC
3152 if (auxv_len > 0)
3153 {
3154 note_data = elfcore_write_note (obfd, note_data, note_size,
3155 "CORE", NT_AUXV, auxv, auxv_len);
3156 xfree (auxv);
3157 }
3158
3159 make_cleanup (xfree, note_data);
3160 return note_data;
3161}
3162
3163/* Implement the "info proc" command. */
3164
3165static void
3166linux_nat_info_proc_cmd (char *args, int from_tty)
3167{
3168 long long pid = PIDGET (inferior_ptid);
3169 FILE *procfile;
3170 char **argv = NULL;
3171 char buffer[MAXPATHLEN];
3172 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
3173 int cmdline_f = 1;
3174 int cwd_f = 1;
3175 int exe_f = 1;
3176 int mappings_f = 0;
3177 int environ_f = 0;
3178 int status_f = 0;
3179 int stat_f = 0;
3180 int all = 0;
3181 struct stat dummy;
3182
3183 if (args)
3184 {
3185 /* Break up 'args' into an argv array. */
3186 if ((argv = buildargv (args)) == NULL)
3187 nomem (0);
3188 else
3189 make_cleanup_freeargv (argv);
3190 }
3191 while (argv != NULL && *argv != NULL)
3192 {
3193 if (isdigit (argv[0][0]))
3194 {
3195 pid = strtoul (argv[0], NULL, 10);
3196 }
3197 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
3198 {
3199 mappings_f = 1;
3200 }
3201 else if (strcmp (argv[0], "status") == 0)
3202 {
3203 status_f = 1;
3204 }
3205 else if (strcmp (argv[0], "stat") == 0)
3206 {
3207 stat_f = 1;
3208 }
3209 else if (strcmp (argv[0], "cmd") == 0)
3210 {
3211 cmdline_f = 1;
3212 }
3213 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
3214 {
3215 exe_f = 1;
3216 }
3217 else if (strcmp (argv[0], "cwd") == 0)
3218 {
3219 cwd_f = 1;
3220 }
3221 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
3222 {
3223 all = 1;
3224 }
3225 else
3226 {
3227 /* [...] (future options here) */
3228 }
3229 argv++;
3230 }
3231 if (pid == 0)
8a3fe4f8 3232 error (_("No current process: you must name one."));
dba24537
AC
3233
3234 sprintf (fname1, "/proc/%lld", pid);
3235 if (stat (fname1, &dummy) != 0)
8a3fe4f8 3236 error (_("No /proc directory: '%s'"), fname1);
dba24537 3237
a3f17187 3238 printf_filtered (_("process %lld\n"), pid);
dba24537
AC
3239 if (cmdline_f || all)
3240 {
3241 sprintf (fname1, "/proc/%lld/cmdline", pid);
d5d6fca5 3242 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3243 {
3244 fgets (buffer, sizeof (buffer), procfile);
3245 printf_filtered ("cmdline = '%s'\n", buffer);
3246 fclose (procfile);
3247 }
3248 else
8a3fe4f8 3249 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3250 }
3251 if (cwd_f || all)
3252 {
3253 sprintf (fname1, "/proc/%lld/cwd", pid);
3254 memset (fname2, 0, sizeof (fname2));
3255 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
3256 printf_filtered ("cwd = '%s'\n", fname2);
3257 else
8a3fe4f8 3258 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
3259 }
3260 if (exe_f || all)
3261 {
3262 sprintf (fname1, "/proc/%lld/exe", pid);
3263 memset (fname2, 0, sizeof (fname2));
3264 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
3265 printf_filtered ("exe = '%s'\n", fname2);
3266 else
8a3fe4f8 3267 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
3268 }
3269 if (mappings_f || all)
3270 {
3271 sprintf (fname1, "/proc/%lld/maps", pid);
d5d6fca5 3272 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3273 {
3274 long long addr, endaddr, size, offset, inode;
3275 char permissions[8], device[8], filename[MAXPATHLEN];
3276
a3f17187 3277 printf_filtered (_("Mapped address spaces:\n\n"));
17a912b6 3278 if (gdbarch_addr_bit (current_gdbarch) == 32)
dba24537
AC
3279 {
3280 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
3281 "Start Addr",
3282 " End Addr",
3283 " Size", " Offset", "objfile");
3284 }
3285 else
3286 {
3287 printf_filtered (" %18s %18s %10s %10s %7s\n",
3288 "Start Addr",
3289 " End Addr",
3290 " Size", " Offset", "objfile");
3291 }
3292
3293 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
3294 &offset, &device[0], &inode, &filename[0]))
3295 {
3296 size = endaddr - addr;
3297
3298 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
3299 calls here (and possibly above) should be abstracted
3300 out into their own functions? Andrew suggests using
3301 a generic local_address_string instead to print out
3302 the addresses; that makes sense to me, too. */
3303
17a912b6 3304 if (gdbarch_addr_bit (current_gdbarch) == 32)
dba24537
AC
3305 {
3306 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
3307 (unsigned long) addr, /* FIXME: pr_addr */
3308 (unsigned long) endaddr,
3309 (int) size,
3310 (unsigned int) offset,
3311 filename[0] ? filename : "");
3312 }
3313 else
3314 {
3315 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
3316 (unsigned long) addr, /* FIXME: pr_addr */
3317 (unsigned long) endaddr,
3318 (int) size,
3319 (unsigned int) offset,
3320 filename[0] ? filename : "");
3321 }
3322 }
3323
3324 fclose (procfile);
3325 }
3326 else
8a3fe4f8 3327 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3328 }
3329 if (status_f || all)
3330 {
3331 sprintf (fname1, "/proc/%lld/status", pid);
d5d6fca5 3332 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3333 {
3334 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
3335 puts_filtered (buffer);
3336 fclose (procfile);
3337 }
3338 else
8a3fe4f8 3339 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3340 }
3341 if (stat_f || all)
3342 {
3343 sprintf (fname1, "/proc/%lld/stat", pid);
d5d6fca5 3344 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3345 {
3346 int itmp;
3347 char ctmp;
a25694b4 3348 long ltmp;
dba24537
AC
3349
3350 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3351 printf_filtered (_("Process: %d\n"), itmp);
a25694b4 3352 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
a3f17187 3353 printf_filtered (_("Exec file: %s\n"), buffer);
dba24537 3354 if (fscanf (procfile, "%c ", &ctmp) > 0)
a3f17187 3355 printf_filtered (_("State: %c\n"), ctmp);
dba24537 3356 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3357 printf_filtered (_("Parent process: %d\n"), itmp);
dba24537 3358 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3359 printf_filtered (_("Process group: %d\n"), itmp);
dba24537 3360 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3361 printf_filtered (_("Session id: %d\n"), itmp);
dba24537 3362 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3363 printf_filtered (_("TTY: %d\n"), itmp);
dba24537 3364 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3365 printf_filtered (_("TTY owner process group: %d\n"), itmp);
a25694b4
AS
3366 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3367 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
3368 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3369 printf_filtered (_("Minor faults (no memory page): %lu\n"),
3370 (unsigned long) ltmp);
3371 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3372 printf_filtered (_("Minor faults, children: %lu\n"),
3373 (unsigned long) ltmp);
3374 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3375 printf_filtered (_("Major faults (memory page faults): %lu\n"),
3376 (unsigned long) ltmp);
3377 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3378 printf_filtered (_("Major faults, children: %lu\n"),
3379 (unsigned long) ltmp);
3380 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3381 printf_filtered (_("utime: %ld\n"), ltmp);
3382 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3383 printf_filtered (_("stime: %ld\n"), ltmp);
3384 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3385 printf_filtered (_("utime, children: %ld\n"), ltmp);
3386 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3387 printf_filtered (_("stime, children: %ld\n"), ltmp);
3388 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3389 printf_filtered (_("jiffies remaining in current time slice: %ld\n"),
3390 ltmp);
3391 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3392 printf_filtered (_("'nice' value: %ld\n"), ltmp);
3393 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3394 printf_filtered (_("jiffies until next timeout: %lu\n"),
3395 (unsigned long) ltmp);
3396 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3397 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
3398 (unsigned long) ltmp);
3399 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3400 printf_filtered (_("start time (jiffies since system boot): %ld\n"),
3401 ltmp);
3402 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3403 printf_filtered (_("Virtual memory size: %lu\n"),
3404 (unsigned long) ltmp);
3405 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3406 printf_filtered (_("Resident set size: %lu\n"), (unsigned long) ltmp);
3407 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3408 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
3409 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3410 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
3411 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3412 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
3413 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3414 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
dba24537
AC
3415#if 0 /* Don't know how architecture-dependent the rest is...
3416 Anyway the signal bitmap info is available from "status". */
a25694b4
AS
3417 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3418 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
3419 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3420 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
3421 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3422 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
3423 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3424 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
3425 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3426 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
3427 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3428 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
3429 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3430 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
dba24537
AC
3431#endif
3432 fclose (procfile);
3433 }
3434 else
8a3fe4f8 3435 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3436 }
3437}
3438
10d6c8cd
DJ
3439/* Implement the to_xfer_partial interface for memory reads using the /proc
3440 filesystem. Because we can use a single read() call for /proc, this
3441 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3442 but it doesn't support writes. */
3443
3444static LONGEST
3445linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3446 const char *annex, gdb_byte *readbuf,
3447 const gdb_byte *writebuf,
3448 ULONGEST offset, LONGEST len)
dba24537 3449{
10d6c8cd
DJ
3450 LONGEST ret;
3451 int fd;
dba24537
AC
3452 char filename[64];
3453
10d6c8cd 3454 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
3455 return 0;
3456
3457 /* Don't bother for one word. */
3458 if (len < 3 * sizeof (long))
3459 return 0;
3460
3461 /* We could keep this file open and cache it - possibly one per
3462 thread. That requires some juggling, but is even faster. */
3463 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
3464 fd = open (filename, O_RDONLY | O_LARGEFILE);
3465 if (fd == -1)
3466 return 0;
3467
3468 /* If pread64 is available, use it. It's faster if the kernel
3469 supports it (only one syscall), and it's 64-bit safe even on
3470 32-bit platforms (for instance, SPARC debugging a SPARC64
3471 application). */
3472#ifdef HAVE_PREAD64
10d6c8cd 3473 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 3474#else
10d6c8cd 3475 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
3476#endif
3477 ret = 0;
3478 else
3479 ret = len;
3480
3481 close (fd);
3482 return ret;
3483}
3484
3485/* Parse LINE as a signal set and add its set bits to SIGS. */
3486
3487static void
3488add_line_to_sigset (const char *line, sigset_t *sigs)
3489{
3490 int len = strlen (line) - 1;
3491 const char *p;
3492 int signum;
3493
3494 if (line[len] != '\n')
8a3fe4f8 3495 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3496
3497 p = line;
3498 signum = len * 4;
3499 while (len-- > 0)
3500 {
3501 int digit;
3502
3503 if (*p >= '0' && *p <= '9')
3504 digit = *p - '0';
3505 else if (*p >= 'a' && *p <= 'f')
3506 digit = *p - 'a' + 10;
3507 else
8a3fe4f8 3508 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3509
3510 signum -= 4;
3511
3512 if (digit & 1)
3513 sigaddset (sigs, signum + 1);
3514 if (digit & 2)
3515 sigaddset (sigs, signum + 2);
3516 if (digit & 4)
3517 sigaddset (sigs, signum + 3);
3518 if (digit & 8)
3519 sigaddset (sigs, signum + 4);
3520
3521 p++;
3522 }
3523}
3524
3525/* Find process PID's pending signals from /proc/pid/status and set
3526 SIGS to match. */
3527
3528void
3529linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
3530{
3531 FILE *procfile;
3532 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
3533 int signum;
3534
3535 sigemptyset (pending);
3536 sigemptyset (blocked);
3537 sigemptyset (ignored);
3538 sprintf (fname, "/proc/%d/status", pid);
3539 procfile = fopen (fname, "r");
3540 if (procfile == NULL)
8a3fe4f8 3541 error (_("Could not open %s"), fname);
dba24537
AC
3542
3543 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
3544 {
3545 /* Normal queued signals are on the SigPnd line in the status
3546 file. However, 2.6 kernels also have a "shared" pending
3547 queue for delivering signals to a thread group, so check for
3548 a ShdPnd line also.
3549
3550 Unfortunately some Red Hat kernels include the shared pending
3551 queue but not the ShdPnd status field. */
3552
3553 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
3554 add_line_to_sigset (buffer + 8, pending);
3555 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
3556 add_line_to_sigset (buffer + 8, pending);
3557 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
3558 add_line_to_sigset (buffer + 8, blocked);
3559 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
3560 add_line_to_sigset (buffer + 8, ignored);
3561 }
3562
3563 fclose (procfile);
3564}
3565
10d6c8cd
DJ
3566static LONGEST
3567linux_xfer_partial (struct target_ops *ops, enum target_object object,
3568 const char *annex, gdb_byte *readbuf,
3569 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3570{
3571 LONGEST xfer;
3572
3573 if (object == TARGET_OBJECT_AUXV)
3574 return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
3575 offset, len);
3576
3577 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
3578 offset, len);
3579 if (xfer != 0)
3580 return xfer;
3581
3582 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
3583 offset, len);
3584}
3585
e9efe249 3586/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
3587 it with local methods. */
3588
910122bf
UW
3589static void
3590linux_target_install_ops (struct target_ops *t)
10d6c8cd 3591{
6d8fd2b7
UW
3592 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
3593 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
3594 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
3595 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 3596 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
3597 t->to_post_attach = linux_child_post_attach;
3598 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
3599 t->to_find_memory_regions = linux_nat_find_memory_regions;
3600 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
3601
3602 super_xfer_partial = t->to_xfer_partial;
3603 t->to_xfer_partial = linux_xfer_partial;
910122bf
UW
3604}
3605
3606struct target_ops *
3607linux_target (void)
3608{
3609 struct target_ops *t;
3610
3611 t = inf_ptrace_target ();
3612 linux_target_install_ops (t);
3613
3614 return t;
3615}
3616
3617struct target_ops *
7714d83a 3618linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
3619{
3620 struct target_ops *t;
3621
3622 t = inf_ptrace_trad_target (register_u_offset);
3623 linux_target_install_ops (t);
10d6c8cd 3624
10d6c8cd
DJ
3625 return t;
3626}
3627
b84876c2
PA
3628/* Controls if async mode is permitted. */
3629static int linux_async_permitted = 0;
3630
3631/* The set command writes to this variable. If the inferior is
3632 executing, linux_nat_async_permitted is *not* updated. */
3633static int linux_async_permitted_1 = 0;
3634
3635static void
3636set_maintenance_linux_async_permitted (char *args, int from_tty,
3637 struct cmd_list_element *c)
3638{
3639 if (target_has_execution)
3640 {
3641 linux_async_permitted_1 = linux_async_permitted;
3642 error (_("Cannot change this setting while the inferior is running."));
3643 }
3644
3645 linux_async_permitted = linux_async_permitted_1;
3646 linux_nat_set_async_mode (linux_async_permitted);
3647}
3648
3649static void
3650show_maintenance_linux_async_permitted (struct ui_file *file, int from_tty,
3651 struct cmd_list_element *c, const char *value)
3652{
3653 fprintf_filtered (file, _("\
3654Controlling the GNU/Linux inferior in asynchronous mode is %s.\n"),
3655 value);
3656}
3657
3658/* target_is_async_p implementation. */
3659
3660static int
3661linux_nat_is_async_p (void)
3662{
3663 /* NOTE: palves 2008-03-21: We're only async when the user requests
3664 it explicitly with the "maintenance set linux-async" command.
3665 Someday, linux will always be async. */
3666 if (!linux_async_permitted)
3667 return 0;
3668
3669 return 1;
3670}
3671
3672/* target_can_async_p implementation. */
3673
3674static int
3675linux_nat_can_async_p (void)
3676{
3677 /* NOTE: palves 2008-03-21: We're only async when the user requests
3678 it explicitly with the "maintenance set linux-async" command.
3679 Someday, linux will always be async. */
3680 if (!linux_async_permitted)
3681 return 0;
3682
3683 /* See target.h/target_async_mask. */
3684 return linux_nat_async_mask_value;
3685}
3686
3687/* target_async_mask implementation. */
3688
3689static int
3690linux_nat_async_mask (int mask)
3691{
3692 int current_state;
3693 current_state = linux_nat_async_mask_value;
3694
3695 if (current_state != mask)
3696 {
3697 if (mask == 0)
3698 {
3699 linux_nat_async (NULL, 0);
3700 linux_nat_async_mask_value = mask;
3701 /* We're in sync mode. Make sure SIGCHLD isn't handled by
3702 async_sigchld_handler when we come out of sigsuspend in
3703 linux_nat_wait. */
3704 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
3705 }
3706 else
3707 {
3708 /* Restore the async handler. */
3709 sigaction (SIGCHLD, &async_sigchld_action, NULL);
3710 linux_nat_async_mask_value = mask;
3711 linux_nat_async (inferior_event_handler, 0);
3712 }
3713 }
3714
3715 return current_state;
3716}
3717
3718/* Pop an event from the event pipe. */
3719
3720static int
3721linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options)
3722{
3723 struct waitpid_result event = {0};
3724 int ret;
3725
3726 do
3727 {
3728 ret = read (linux_nat_event_pipe[0], &event, sizeof (event));
3729 }
3730 while (ret == -1 && errno == EINTR);
3731
3732 gdb_assert (ret == sizeof (event));
3733
3734 *ptr_status = event.status;
3735 *ptr_options = event.options;
3736
3737 linux_nat_num_queued_events--;
3738
3739 return event.pid;
3740}
3741
3742/* Push an event into the event pipe. */
3743
3744static void
3745linux_nat_event_pipe_push (int pid, int status, int options)
3746{
3747 int ret;
3748 struct waitpid_result event = {0};
3749 event.pid = pid;
3750 event.status = status;
3751 event.options = options;
3752
3753 do
3754 {
3755 ret = write (linux_nat_event_pipe[1], &event, sizeof (event));
3756 gdb_assert ((ret == -1 && errno == EINTR) || ret == sizeof (event));
3757 } while (ret == -1 && errno == EINTR);
3758
3759 linux_nat_num_queued_events++;
3760}
3761
3762static void
3763get_pending_events (void)
3764{
3765 int status, options, pid;
3766
3767 if (!linux_nat_async_enabled || !linux_nat_async_events_enabled)
3768 internal_error (__FILE__, __LINE__,
3769 "get_pending_events called with async masked");
3770
3771 while (1)
3772 {
3773 status = 0;
3774 options = __WCLONE | WNOHANG;
3775
3776 do
3777 {
3778 pid = waitpid (-1, &status, options);
3779 }
3780 while (pid == -1 && errno == EINTR);
3781
3782 if (pid <= 0)
3783 {
3784 options = WNOHANG;
3785 do
3786 {
3787 pid = waitpid (-1, &status, options);
3788 }
3789 while (pid == -1 && errno == EINTR);
3790 }
3791
3792 if (pid <= 0)
3793 /* No more children reporting events. */
3794 break;
3795
3796 if (debug_linux_nat_async)
3797 fprintf_unfiltered (gdb_stdlog, "\
3798get_pending_events: pid(%d), status(%x), options (%x)\n",
3799 pid, status, options);
3800
3801 linux_nat_event_pipe_push (pid, status, options);
3802 }
3803
3804 if (debug_linux_nat_async)
3805 fprintf_unfiltered (gdb_stdlog, "\
3806get_pending_events: linux_nat_num_queued_events(%d)\n",
3807 linux_nat_num_queued_events);
3808}
3809
3810/* SIGCHLD handler for async mode. */
3811
3812static void
3813async_sigchld_handler (int signo)
3814{
3815 if (debug_linux_nat_async)
3816 fprintf_unfiltered (gdb_stdlog, "async_sigchld_handler\n");
3817
3818 get_pending_events ();
3819}
3820
3821/* Enable or disable async SIGCHLD handling. */
3822
3823static int
3824linux_nat_async_events (int enable)
3825{
3826 int current_state = linux_nat_async_events_enabled;
3827
3828 if (debug_linux_nat_async)
3829 fprintf_unfiltered (gdb_stdlog,
3830 "LNAE: enable(%d): linux_nat_async_events_enabled(%d), "
3831 "linux_nat_num_queued_events(%d)\n",
3832 enable, linux_nat_async_events_enabled,
3833 linux_nat_num_queued_events);
3834
3835 if (current_state != enable)
3836 {
3837 sigset_t mask;
3838 sigemptyset (&mask);
3839 sigaddset (&mask, SIGCHLD);
3840 if (enable)
3841 {
3842 /* Unblock target events. */
3843 linux_nat_async_events_enabled = 1;
3844
3845 local_event_queue_to_pipe ();
3846 /* While in masked async, we may have not collected all the
3847 pending events. Get them out now. */
3848 get_pending_events ();
3849 sigprocmask (SIG_UNBLOCK, &mask, NULL);
3850 }
3851 else
3852 {
3853 /* Block target events. */
3854 sigprocmask (SIG_BLOCK, &mask, NULL);
3855 linux_nat_async_events_enabled = 0;
3856 /* Get events out of queue, and make them available to
3857 queued_waitpid / my_waitpid. */
3858 pipe_to_local_event_queue ();
3859 }
3860 }
3861
3862 return current_state;
3863}
3864
3865static int async_terminal_is_ours = 1;
3866
3867/* target_terminal_inferior implementation. */
3868
3869static void
3870linux_nat_terminal_inferior (void)
3871{
3872 if (!target_is_async_p ())
3873 {
3874 /* Async mode is disabled. */
3875 terminal_inferior ();
3876 return;
3877 }
3878
3879 /* GDB should never give the terminal to the inferior, if the
3880 inferior is running in the background (run&, continue&, etc.).
3881 This check can be removed when the common code is fixed. */
3882 if (!sync_execution)
3883 return;
3884
3885 terminal_inferior ();
3886
3887 if (!async_terminal_is_ours)
3888 return;
3889
3890 delete_file_handler (input_fd);
3891 async_terminal_is_ours = 0;
3892 set_sigint_trap ();
3893}
3894
3895/* target_terminal_ours implementation. */
3896
3897void
3898linux_nat_terminal_ours (void)
3899{
3900 if (!target_is_async_p ())
3901 {
3902 /* Async mode is disabled. */
3903 terminal_ours ();
3904 return;
3905 }
3906
3907 /* GDB should never give the terminal to the inferior if the
3908 inferior is running in the background (run&, continue&, etc.),
3909 but claiming it sure should. */
3910 terminal_ours ();
3911
3912 if (!sync_execution)
3913 return;
3914
3915 if (async_terminal_is_ours)
3916 return;
3917
3918 clear_sigint_trap ();
3919 add_file_handler (input_fd, stdin_event_handler, 0);
3920 async_terminal_is_ours = 1;
3921}
3922
3923static void (*async_client_callback) (enum inferior_event_type event_type,
3924 void *context);
3925static void *async_client_context;
3926
3927static void
3928linux_nat_async_file_handler (int error, gdb_client_data client_data)
3929{
3930 async_client_callback (INF_REG_EVENT, async_client_context);
3931}
3932
3933/* target_async implementation. */
3934
3935static void
3936linux_nat_async (void (*callback) (enum inferior_event_type event_type,
3937 void *context), void *context)
3938{
3939 if (linux_nat_async_mask_value == 0 || !linux_nat_async_enabled)
3940 internal_error (__FILE__, __LINE__,
3941 "Calling target_async when async is masked");
3942
3943 if (callback != NULL)
3944 {
3945 async_client_callback = callback;
3946 async_client_context = context;
3947 add_file_handler (linux_nat_event_pipe[0],
3948 linux_nat_async_file_handler, NULL);
3949
3950 linux_nat_async_events (1);
3951 }
3952 else
3953 {
3954 async_client_callback = callback;
3955 async_client_context = context;
3956
3957 linux_nat_async_events (0);
3958 delete_file_handler (linux_nat_event_pipe[0]);
3959 }
3960 return;
3961}
3962
3963/* Enable/Disable async mode. */
3964
3965static void
3966linux_nat_set_async_mode (int on)
3967{
3968 if (linux_nat_async_enabled != on)
3969 {
3970 if (on)
3971 {
3972 gdb_assert (waitpid_queue == NULL);
3973 sigaction (SIGCHLD, &async_sigchld_action, NULL);
3974
3975 if (pipe (linux_nat_event_pipe) == -1)
3976 internal_error (__FILE__, __LINE__,
3977 "creating event pipe failed.");
3978
3979 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
3980 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
3981 }
3982 else
3983 {
3984 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
3985
3986 drain_queued_events (-1);
3987
3988 linux_nat_num_queued_events = 0;
3989 close (linux_nat_event_pipe[0]);
3990 close (linux_nat_event_pipe[1]);
3991 linux_nat_event_pipe[0] = linux_nat_event_pipe[1] = -1;
3992
3993 }
3994 }
3995 linux_nat_async_enabled = on;
3996}
3997
f973ed9c
DJ
3998void
3999linux_nat_add_target (struct target_ops *t)
4000{
f973ed9c
DJ
4001 /* Save the provided single-threaded target. We save this in a separate
4002 variable because another target we've inherited from (e.g. inf-ptrace)
4003 may have saved a pointer to T; we want to use it for the final
4004 process stratum target. */
4005 linux_ops_saved = *t;
4006 linux_ops = &linux_ops_saved;
4007
4008 /* Override some methods for multithreading. */
b84876c2 4009 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
4010 t->to_attach = linux_nat_attach;
4011 t->to_detach = linux_nat_detach;
4012 t->to_resume = linux_nat_resume;
4013 t->to_wait = linux_nat_wait;
4014 t->to_xfer_partial = linux_nat_xfer_partial;
4015 t->to_kill = linux_nat_kill;
4016 t->to_mourn_inferior = linux_nat_mourn_inferior;
4017 t->to_thread_alive = linux_nat_thread_alive;
4018 t->to_pid_to_str = linux_nat_pid_to_str;
4019 t->to_has_thread_control = tc_schedlock;
4020
b84876c2
PA
4021 t->to_can_async_p = linux_nat_can_async_p;
4022 t->to_is_async_p = linux_nat_is_async_p;
4023 t->to_async = linux_nat_async;
4024 t->to_async_mask = linux_nat_async_mask;
4025 t->to_terminal_inferior = linux_nat_terminal_inferior;
4026 t->to_terminal_ours = linux_nat_terminal_ours;
4027
f973ed9c
DJ
4028 /* We don't change the stratum; this target will sit at
4029 process_stratum and thread_db will set at thread_stratum. This
4030 is a little strange, since this is a multi-threaded-capable
4031 target, but we want to be on the stack below thread_db, and we
4032 also want to be used for single-threaded processes. */
4033
4034 add_target (t);
4035
4036 /* TODO: Eliminate this and have libthread_db use
4037 find_target_beneath. */
4038 thread_db_init (t);
4039}
4040
9f0bdab8
DJ
4041/* Register a method to call whenever a new thread is attached. */
4042void
4043linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t))
4044{
4045 /* Save the pointer. We only support a single registered instance
4046 of the GNU/Linux native target, so we do not need to map this to
4047 T. */
4048 linux_nat_new_thread = new_thread;
4049}
4050
4051/* Return the saved siginfo associated with PTID. */
4052struct siginfo *
4053linux_nat_get_siginfo (ptid_t ptid)
4054{
4055 struct lwp_info *lp = find_lwp_pid (ptid);
4056
4057 gdb_assert (lp != NULL);
4058
4059 return &lp->siginfo;
4060}
4061
d6b0e80f
AC
4062void
4063_initialize_linux_nat (void)
4064{
b84876c2 4065 sigset_t mask;
dba24537 4066
1bedd215
AC
4067 add_info ("proc", linux_nat_info_proc_cmd, _("\
4068Show /proc process information about any running process.\n\
dba24537
AC
4069Specify any process id, or use the program being debugged by default.\n\
4070Specify any of the following keywords for detailed info:\n\
4071 mappings -- list of mapped memory regions.\n\
4072 stat -- list a bunch of random process info.\n\
4073 status -- list a different bunch of random process info.\n\
1bedd215 4074 all -- list all available /proc info."));
d6b0e80f 4075
b84876c2
PA
4076 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
4077 &debug_linux_nat, _("\
4078Set debugging of GNU/Linux lwp module."), _("\
4079Show debugging of GNU/Linux lwp module."), _("\
4080Enables printf debugging output."),
4081 NULL,
4082 show_debug_linux_nat,
4083 &setdebuglist, &showdebuglist);
4084
4085 add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance,
4086 &debug_linux_nat_async, _("\
4087Set debugging of GNU/Linux async lwp module."), _("\
4088Show debugging of GNU/Linux async lwp module."), _("\
4089Enables printf debugging output."),
4090 NULL,
4091 show_debug_linux_nat_async,
4092 &setdebuglist, &showdebuglist);
4093
4094 add_setshow_boolean_cmd ("linux-async", class_maintenance,
4095 &linux_async_permitted_1, _("\
4096Set whether gdb controls the GNU/Linux inferior in asynchronous mode."), _("\
4097Show whether gdb controls the GNU/Linux inferior in asynchronous mode."), _("\
4098Tells gdb whether to control the GNU/Linux inferior in asynchronous mode."),
4099 set_maintenance_linux_async_permitted,
4100 show_maintenance_linux_async_permitted,
4101 &maintenance_set_cmdlist,
4102 &maintenance_show_cmdlist);
4103
4104 /* Block SIGCHLD by default. Doing this early prevents it getting
4105 unblocked if an exception is thrown due to an error while the
4106 inferior is starting (sigsetjmp/siglongjmp). */
4107 sigemptyset (&mask);
4108 sigaddset (&mask, SIGCHLD);
4109 sigprocmask (SIG_BLOCK, &mask, NULL);
4110
4111 /* Save this mask as the default. */
d6b0e80f
AC
4112 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4113
b84876c2
PA
4114 /* The synchronous SIGCHLD handler. */
4115 sync_sigchld_action.sa_handler = sigchld_handler;
4116 sigemptyset (&sync_sigchld_action.sa_mask);
4117 sync_sigchld_action.sa_flags = SA_RESTART;
4118
4119 /* Make it the default. */
4120 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
d6b0e80f
AC
4121
4122 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4123 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4124 sigdelset (&suspend_mask, SIGCHLD);
4125
b84876c2
PA
4126 /* SIGCHLD handler for async mode. */
4127 async_sigchld_action.sa_handler = async_sigchld_handler;
4128 sigemptyset (&async_sigchld_action.sa_mask);
4129 async_sigchld_action.sa_flags = SA_RESTART;
d6b0e80f 4130
b84876c2
PA
4131 /* Install the default mode. */
4132 linux_nat_set_async_mode (linux_async_permitted);
d6b0e80f
AC
4133}
4134\f
4135
4136/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4137 the GNU/Linux Threads library and therefore doesn't really belong
4138 here. */
4139
4140/* Read variable NAME in the target and return its value if found.
4141 Otherwise return zero. It is assumed that the type of the variable
4142 is `int'. */
4143
4144static int
4145get_signo (const char *name)
4146{
4147 struct minimal_symbol *ms;
4148 int signo;
4149
4150 ms = lookup_minimal_symbol (name, NULL, NULL);
4151 if (ms == NULL)
4152 return 0;
4153
8e70166d 4154 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
4155 sizeof (signo)) != 0)
4156 return 0;
4157
4158 return signo;
4159}
4160
4161/* Return the set of signals used by the threads library in *SET. */
4162
4163void
4164lin_thread_get_thread_signals (sigset_t *set)
4165{
4166 struct sigaction action;
4167 int restart, cancel;
b84876c2 4168 sigset_t blocked_mask;
d6b0e80f 4169
b84876c2 4170 sigemptyset (&blocked_mask);
d6b0e80f
AC
4171 sigemptyset (set);
4172
4173 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
4174 cancel = get_signo ("__pthread_sig_cancel");
4175
4176 /* LinuxThreads normally uses the first two RT signals, but in some legacy
4177 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
4178 not provide any way for the debugger to query the signal numbers -
4179 fortunately they don't change! */
4180
d6b0e80f 4181 if (restart == 0)
17fbb0bd 4182 restart = __SIGRTMIN;
d6b0e80f 4183
d6b0e80f 4184 if (cancel == 0)
17fbb0bd 4185 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
4186
4187 sigaddset (set, restart);
4188 sigaddset (set, cancel);
4189
4190 /* The GNU/Linux Threads library makes terminating threads send a
4191 special "cancel" signal instead of SIGCHLD. Make sure we catch
4192 those (to prevent them from terminating GDB itself, which is
4193 likely to be their default action) and treat them the same way as
4194 SIGCHLD. */
4195
4196 action.sa_handler = sigchld_handler;
4197 sigemptyset (&action.sa_mask);
58aecb61 4198 action.sa_flags = SA_RESTART;
d6b0e80f
AC
4199 sigaction (cancel, &action, NULL);
4200
4201 /* We block the "cancel" signal throughout this code ... */
4202 sigaddset (&blocked_mask, cancel);
4203 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
4204
4205 /* ... except during a sigsuspend. */
4206 sigdelset (&suspend_mask, cancel);
4207}
ac264b3b 4208