]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/linux-nat.c
Switch the license of all .c files to GPLv3.
[thirdparty/binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "inferior.h"
23 #include "target.h"
24 #include "gdb_string.h"
25 #include "gdb_wait.h"
26 #include "gdb_assert.h"
27 #ifdef HAVE_TKILL_SYSCALL
28 #include <unistd.h>
29 #include <sys/syscall.h>
30 #endif
31 #include <sys/ptrace.h>
32 #include "linux-nat.h"
33 #include "linux-fork.h"
34 #include "gdbthread.h"
35 #include "gdbcmd.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "inf-ptrace.h"
39 #include "auxv.h"
40 #include <sys/param.h> /* for MAXPATHLEN */
41 #include <sys/procfs.h> /* for elf_gregset etc. */
42 #include "elf-bfd.h" /* for elfcore_write_* */
43 #include "gregset.h" /* for gregset */
44 #include "gdbcore.h" /* for get_exec_file */
45 #include <ctype.h> /* for isdigit */
46 #include "gdbthread.h" /* for struct thread_info etc. */
47 #include "gdb_stat.h" /* for struct stat */
48 #include <fcntl.h> /* for O_RDONLY */
49
50 #ifndef O_LARGEFILE
51 #define O_LARGEFILE 0
52 #endif
53
54 /* If the system headers did not provide the constants, hard-code the normal
55 values. */
56 #ifndef PTRACE_EVENT_FORK
57
58 #define PTRACE_SETOPTIONS 0x4200
59 #define PTRACE_GETEVENTMSG 0x4201
60
61 /* options set using PTRACE_SETOPTIONS */
62 #define PTRACE_O_TRACESYSGOOD 0x00000001
63 #define PTRACE_O_TRACEFORK 0x00000002
64 #define PTRACE_O_TRACEVFORK 0x00000004
65 #define PTRACE_O_TRACECLONE 0x00000008
66 #define PTRACE_O_TRACEEXEC 0x00000010
67 #define PTRACE_O_TRACEVFORKDONE 0x00000020
68 #define PTRACE_O_TRACEEXIT 0x00000040
69
70 /* Wait extended result codes for the above trace options. */
71 #define PTRACE_EVENT_FORK 1
72 #define PTRACE_EVENT_VFORK 2
73 #define PTRACE_EVENT_CLONE 3
74 #define PTRACE_EVENT_EXEC 4
75 #define PTRACE_EVENT_VFORK_DONE 5
76 #define PTRACE_EVENT_EXIT 6
77
78 #endif /* PTRACE_EVENT_FORK */
79
80 /* We can't always assume that this flag is available, but all systems
81 with the ptrace event handlers also have __WALL, so it's safe to use
82 here. */
83 #ifndef __WALL
84 #define __WALL 0x40000000 /* Wait for any child. */
85 #endif
86
87 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
88 the use of the multi-threaded target. */
89 static struct target_ops *linux_ops;
90 static struct target_ops linux_ops_saved;
91
92 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
93 Called by our to_xfer_partial. */
94 static LONGEST (*super_xfer_partial) (struct target_ops *,
95 enum target_object,
96 const char *, gdb_byte *,
97 const gdb_byte *,
98 ULONGEST, LONGEST);
99
100 static int debug_linux_nat;
101 static void
102 show_debug_linux_nat (struct ui_file *file, int from_tty,
103 struct cmd_list_element *c, const char *value)
104 {
105 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
106 value);
107 }
108
109 static int linux_parent_pid;
110
111 struct simple_pid_list
112 {
113 int pid;
114 int status;
115 struct simple_pid_list *next;
116 };
117 struct simple_pid_list *stopped_pids;
118
119 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
120 can not be used, 1 if it can. */
121
122 static int linux_supports_tracefork_flag = -1;
123
124 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
125 PTRACE_O_TRACEVFORKDONE. */
126
127 static int linux_supports_tracevforkdone_flag = -1;
128
129 \f
130 /* Trivial list manipulation functions to keep track of a list of
131 new stopped processes. */
132 static void
133 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
134 {
135 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
136 new_pid->pid = pid;
137 new_pid->status = status;
138 new_pid->next = *listp;
139 *listp = new_pid;
140 }
141
142 static int
143 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *status)
144 {
145 struct simple_pid_list **p;
146
147 for (p = listp; *p != NULL; p = &(*p)->next)
148 if ((*p)->pid == pid)
149 {
150 struct simple_pid_list *next = (*p)->next;
151 *status = (*p)->status;
152 xfree (*p);
153 *p = next;
154 return 1;
155 }
156 return 0;
157 }
158
159 static void
160 linux_record_stopped_pid (int pid, int status)
161 {
162 add_to_pid_list (&stopped_pids, pid, status);
163 }
164
165 \f
166 /* A helper function for linux_test_for_tracefork, called after fork (). */
167
168 static void
169 linux_tracefork_child (void)
170 {
171 int ret;
172
173 ptrace (PTRACE_TRACEME, 0, 0, 0);
174 kill (getpid (), SIGSTOP);
175 fork ();
176 _exit (0);
177 }
178
179 /* Wrapper function for waitpid which handles EINTR. */
180
181 static int
182 my_waitpid (int pid, int *status, int flags)
183 {
184 int ret;
185 do
186 {
187 ret = waitpid (pid, status, flags);
188 }
189 while (ret == -1 && errno == EINTR);
190
191 return ret;
192 }
193
194 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
195
196 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
197 we know that the feature is not available. This may change the tracing
198 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
199
200 However, if it succeeds, we don't know for sure that the feature is
201 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
202 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
203 fork tracing, and let it fork. If the process exits, we assume that we
204 can't use TRACEFORK; if we get the fork notification, and we can extract
205 the new child's PID, then we assume that we can. */
206
207 static void
208 linux_test_for_tracefork (int original_pid)
209 {
210 int child_pid, ret, status;
211 long second_pid;
212
213 linux_supports_tracefork_flag = 0;
214 linux_supports_tracevforkdone_flag = 0;
215
216 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
217 if (ret != 0)
218 return;
219
220 child_pid = fork ();
221 if (child_pid == -1)
222 perror_with_name (("fork"));
223
224 if (child_pid == 0)
225 linux_tracefork_child ();
226
227 ret = my_waitpid (child_pid, &status, 0);
228 if (ret == -1)
229 perror_with_name (("waitpid"));
230 else if (ret != child_pid)
231 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
232 if (! WIFSTOPPED (status))
233 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
234
235 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
236 if (ret != 0)
237 {
238 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
239 if (ret != 0)
240 {
241 warning (_("linux_test_for_tracefork: failed to kill child"));
242 return;
243 }
244
245 ret = my_waitpid (child_pid, &status, 0);
246 if (ret != child_pid)
247 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
248 else if (!WIFSIGNALED (status))
249 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
250 "killed child"), status);
251
252 return;
253 }
254
255 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
256 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
257 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
258 linux_supports_tracevforkdone_flag = (ret == 0);
259
260 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
261 if (ret != 0)
262 warning (_("linux_test_for_tracefork: failed to resume child"));
263
264 ret = my_waitpid (child_pid, &status, 0);
265
266 if (ret == child_pid && WIFSTOPPED (status)
267 && status >> 16 == PTRACE_EVENT_FORK)
268 {
269 second_pid = 0;
270 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
271 if (ret == 0 && second_pid != 0)
272 {
273 int second_status;
274
275 linux_supports_tracefork_flag = 1;
276 my_waitpid (second_pid, &second_status, 0);
277 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
278 if (ret != 0)
279 warning (_("linux_test_for_tracefork: failed to kill second child"));
280 my_waitpid (second_pid, &status, 0);
281 }
282 }
283 else
284 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
285 "(%d, status 0x%x)"), ret, status);
286
287 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
288 if (ret != 0)
289 warning (_("linux_test_for_tracefork: failed to kill child"));
290 my_waitpid (child_pid, &status, 0);
291 }
292
293 /* Return non-zero iff we have tracefork functionality available.
294 This function also sets linux_supports_tracefork_flag. */
295
296 static int
297 linux_supports_tracefork (int pid)
298 {
299 if (linux_supports_tracefork_flag == -1)
300 linux_test_for_tracefork (pid);
301 return linux_supports_tracefork_flag;
302 }
303
304 static int
305 linux_supports_tracevforkdone (int pid)
306 {
307 if (linux_supports_tracefork_flag == -1)
308 linux_test_for_tracefork (pid);
309 return linux_supports_tracevforkdone_flag;
310 }
311
312 \f
313 void
314 linux_enable_event_reporting (ptid_t ptid)
315 {
316 int pid = ptid_get_lwp (ptid);
317 int options;
318
319 if (pid == 0)
320 pid = ptid_get_pid (ptid);
321
322 if (! linux_supports_tracefork (pid))
323 return;
324
325 options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC
326 | PTRACE_O_TRACECLONE;
327 if (linux_supports_tracevforkdone (pid))
328 options |= PTRACE_O_TRACEVFORKDONE;
329
330 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
331 read-only process state. */
332
333 ptrace (PTRACE_SETOPTIONS, pid, 0, options);
334 }
335
336 static void
337 linux_child_post_attach (int pid)
338 {
339 linux_enable_event_reporting (pid_to_ptid (pid));
340 check_for_thread_db ();
341 }
342
343 static void
344 linux_child_post_startup_inferior (ptid_t ptid)
345 {
346 linux_enable_event_reporting (ptid);
347 check_for_thread_db ();
348 }
349
350 static int
351 linux_child_follow_fork (struct target_ops *ops, int follow_child)
352 {
353 ptid_t last_ptid;
354 struct target_waitstatus last_status;
355 int has_vforked;
356 int parent_pid, child_pid;
357
358 get_last_target_status (&last_ptid, &last_status);
359 has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED);
360 parent_pid = ptid_get_lwp (last_ptid);
361 if (parent_pid == 0)
362 parent_pid = ptid_get_pid (last_ptid);
363 child_pid = last_status.value.related_pid;
364
365 if (! follow_child)
366 {
367 /* We're already attached to the parent, by default. */
368
369 /* Before detaching from the child, remove all breakpoints from
370 it. (This won't actually modify the breakpoint list, but will
371 physically remove the breakpoints from the child.) */
372 /* If we vforked this will remove the breakpoints from the parent
373 also, but they'll be reinserted below. */
374 detach_breakpoints (child_pid);
375
376 /* Detach new forked process? */
377 if (detach_fork)
378 {
379 if (debug_linux_nat)
380 {
381 target_terminal_ours ();
382 fprintf_filtered (gdb_stdlog,
383 "Detaching after fork from child process %d.\n",
384 child_pid);
385 }
386
387 ptrace (PTRACE_DETACH, child_pid, 0, 0);
388 }
389 else
390 {
391 struct fork_info *fp;
392 /* Retain child fork in ptrace (stopped) state. */
393 fp = find_fork_pid (child_pid);
394 if (!fp)
395 fp = add_fork (child_pid);
396 fork_save_infrun_state (fp, 0);
397 }
398
399 if (has_vforked)
400 {
401 gdb_assert (linux_supports_tracefork_flag >= 0);
402 if (linux_supports_tracevforkdone (0))
403 {
404 int status;
405
406 ptrace (PTRACE_CONT, parent_pid, 0, 0);
407 my_waitpid (parent_pid, &status, __WALL);
408 if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
409 warning (_("Unexpected waitpid result %06x when waiting for "
410 "vfork-done"), status);
411 }
412 else
413 {
414 /* We can't insert breakpoints until the child has
415 finished with the shared memory region. We need to
416 wait until that happens. Ideal would be to just
417 call:
418 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
419 - waitpid (parent_pid, &status, __WALL);
420 However, most architectures can't handle a syscall
421 being traced on the way out if it wasn't traced on
422 the way in.
423
424 We might also think to loop, continuing the child
425 until it exits or gets a SIGTRAP. One problem is
426 that the child might call ptrace with PTRACE_TRACEME.
427
428 There's no simple and reliable way to figure out when
429 the vforked child will be done with its copy of the
430 shared memory. We could step it out of the syscall,
431 two instructions, let it go, and then single-step the
432 parent once. When we have hardware single-step, this
433 would work; with software single-step it could still
434 be made to work but we'd have to be able to insert
435 single-step breakpoints in the child, and we'd have
436 to insert -just- the single-step breakpoint in the
437 parent. Very awkward.
438
439 In the end, the best we can do is to make sure it
440 runs for a little while. Hopefully it will be out of
441 range of any breakpoints we reinsert. Usually this
442 is only the single-step breakpoint at vfork's return
443 point. */
444
445 usleep (10000);
446 }
447
448 /* Since we vforked, breakpoints were removed in the parent
449 too. Put them back. */
450 reattach_breakpoints (parent_pid);
451 }
452 }
453 else
454 {
455 char child_pid_spelling[40];
456
457 /* Needed to keep the breakpoint lists in sync. */
458 if (! has_vforked)
459 detach_breakpoints (child_pid);
460
461 /* Before detaching from the parent, remove all breakpoints from it. */
462 remove_breakpoints ();
463
464 if (debug_linux_nat)
465 {
466 target_terminal_ours ();
467 fprintf_filtered (gdb_stdlog,
468 "Attaching after fork to child process %d.\n",
469 child_pid);
470 }
471
472 /* If we're vforking, we may want to hold on to the parent until
473 the child exits or execs. At exec time we can remove the old
474 breakpoints from the parent and detach it; at exit time we
475 could do the same (or even, sneakily, resume debugging it - the
476 child's exec has failed, or something similar).
477
478 This doesn't clean up "properly", because we can't call
479 target_detach, but that's OK; if the current target is "child",
480 then it doesn't need any further cleanups, and lin_lwp will
481 generally not encounter vfork (vfork is defined to fork
482 in libpthread.so).
483
484 The holding part is very easy if we have VFORKDONE events;
485 but keeping track of both processes is beyond GDB at the
486 moment. So we don't expose the parent to the rest of GDB.
487 Instead we quietly hold onto it until such time as we can
488 safely resume it. */
489
490 if (has_vforked)
491 linux_parent_pid = parent_pid;
492 else if (!detach_fork)
493 {
494 struct fork_info *fp;
495 /* Retain parent fork in ptrace (stopped) state. */
496 fp = find_fork_pid (parent_pid);
497 if (!fp)
498 fp = add_fork (parent_pid);
499 fork_save_infrun_state (fp, 0);
500 }
501 else
502 {
503 target_detach (NULL, 0);
504 }
505
506 inferior_ptid = pid_to_ptid (child_pid);
507
508 /* Reinstall ourselves, since we might have been removed in
509 target_detach (which does other necessary cleanup). */
510
511 push_target (ops);
512
513 /* Reset breakpoints in the child as appropriate. */
514 follow_inferior_reset_breakpoints ();
515 }
516
517 return 0;
518 }
519
520 \f
521 static void
522 linux_child_insert_fork_catchpoint (int pid)
523 {
524 if (! linux_supports_tracefork (pid))
525 error (_("Your system does not support fork catchpoints."));
526 }
527
528 static void
529 linux_child_insert_vfork_catchpoint (int pid)
530 {
531 if (!linux_supports_tracefork (pid))
532 error (_("Your system does not support vfork catchpoints."));
533 }
534
535 static void
536 linux_child_insert_exec_catchpoint (int pid)
537 {
538 if (!linux_supports_tracefork (pid))
539 error (_("Your system does not support exec catchpoints."));
540 }
541
542 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
543 are processes sharing the same VM space. A multi-threaded process
544 is basically a group of such processes. However, such a grouping
545 is almost entirely a user-space issue; the kernel doesn't enforce
546 such a grouping at all (this might change in the future). In
547 general, we'll rely on the threads library (i.e. the GNU/Linux
548 Threads library) to provide such a grouping.
549
550 It is perfectly well possible to write a multi-threaded application
551 without the assistance of a threads library, by using the clone
552 system call directly. This module should be able to give some
553 rudimentary support for debugging such applications if developers
554 specify the CLONE_PTRACE flag in the clone system call, and are
555 using the Linux kernel 2.4 or above.
556
557 Note that there are some peculiarities in GNU/Linux that affect
558 this code:
559
560 - In general one should specify the __WCLONE flag to waitpid in
561 order to make it report events for any of the cloned processes
562 (and leave it out for the initial process). However, if a cloned
563 process has exited the exit status is only reported if the
564 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
565 we cannot use it since GDB must work on older systems too.
566
567 - When a traced, cloned process exits and is waited for by the
568 debugger, the kernel reassigns it to the original parent and
569 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
570 library doesn't notice this, which leads to the "zombie problem":
571 When debugged a multi-threaded process that spawns a lot of
572 threads will run out of processes, even if the threads exit,
573 because the "zombies" stay around. */
574
575 /* List of known LWPs. */
576 static struct lwp_info *lwp_list;
577
578 /* Number of LWPs in the list. */
579 static int num_lwps;
580 \f
581
582 #define GET_LWP(ptid) ptid_get_lwp (ptid)
583 #define GET_PID(ptid) ptid_get_pid (ptid)
584 #define is_lwp(ptid) (GET_LWP (ptid) != 0)
585 #define BUILD_LWP(lwp, pid) ptid_build (pid, lwp, 0)
586
587 /* If the last reported event was a SIGTRAP, this variable is set to
588 the process id of the LWP/thread that got it. */
589 ptid_t trap_ptid;
590 \f
591
592 /* Since we cannot wait (in linux_nat_wait) for the initial process and
593 any cloned processes with a single call to waitpid, we have to use
594 the WNOHANG flag and call waitpid in a loop. To optimize
595 things a bit we use `sigsuspend' to wake us up when a process has
596 something to report (it will send us a SIGCHLD if it has). To make
597 this work we have to juggle with the signal mask. We save the
598 original signal mask such that we can restore it before creating a
599 new process in order to avoid blocking certain signals in the
600 inferior. We then block SIGCHLD during the waitpid/sigsuspend
601 loop. */
602
603 /* Original signal mask. */
604 static sigset_t normal_mask;
605
606 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
607 _initialize_linux_nat. */
608 static sigset_t suspend_mask;
609
610 /* Signals to block to make that sigsuspend work. */
611 static sigset_t blocked_mask;
612 \f
613
614 /* Prototypes for local functions. */
615 static int stop_wait_callback (struct lwp_info *lp, void *data);
616 static int linux_nat_thread_alive (ptid_t ptid);
617 static char *linux_child_pid_to_exec_file (int pid);
618 \f
619 /* Convert wait status STATUS to a string. Used for printing debug
620 messages only. */
621
622 static char *
623 status_to_str (int status)
624 {
625 static char buf[64];
626
627 if (WIFSTOPPED (status))
628 snprintf (buf, sizeof (buf), "%s (stopped)",
629 strsignal (WSTOPSIG (status)));
630 else if (WIFSIGNALED (status))
631 snprintf (buf, sizeof (buf), "%s (terminated)",
632 strsignal (WSTOPSIG (status)));
633 else
634 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
635
636 return buf;
637 }
638
639 /* Initialize the list of LWPs. Note that this module, contrary to
640 what GDB's generic threads layer does for its thread list,
641 re-initializes the LWP lists whenever we mourn or detach (which
642 doesn't involve mourning) the inferior. */
643
644 static void
645 init_lwp_list (void)
646 {
647 struct lwp_info *lp, *lpnext;
648
649 for (lp = lwp_list; lp; lp = lpnext)
650 {
651 lpnext = lp->next;
652 xfree (lp);
653 }
654
655 lwp_list = NULL;
656 num_lwps = 0;
657 }
658
659 /* Add the LWP specified by PID to the list. Return a pointer to the
660 structure describing the new LWP. */
661
662 static struct lwp_info *
663 add_lwp (ptid_t ptid)
664 {
665 struct lwp_info *lp;
666
667 gdb_assert (is_lwp (ptid));
668
669 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
670
671 memset (lp, 0, sizeof (struct lwp_info));
672
673 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
674
675 lp->ptid = ptid;
676
677 lp->next = lwp_list;
678 lwp_list = lp;
679 ++num_lwps;
680
681 return lp;
682 }
683
684 /* Remove the LWP specified by PID from the list. */
685
686 static void
687 delete_lwp (ptid_t ptid)
688 {
689 struct lwp_info *lp, *lpprev;
690
691 lpprev = NULL;
692
693 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
694 if (ptid_equal (lp->ptid, ptid))
695 break;
696
697 if (!lp)
698 return;
699
700 num_lwps--;
701
702 if (lpprev)
703 lpprev->next = lp->next;
704 else
705 lwp_list = lp->next;
706
707 xfree (lp);
708 }
709
710 /* Return a pointer to the structure describing the LWP corresponding
711 to PID. If no corresponding LWP could be found, return NULL. */
712
713 static struct lwp_info *
714 find_lwp_pid (ptid_t ptid)
715 {
716 struct lwp_info *lp;
717 int lwp;
718
719 if (is_lwp (ptid))
720 lwp = GET_LWP (ptid);
721 else
722 lwp = GET_PID (ptid);
723
724 for (lp = lwp_list; lp; lp = lp->next)
725 if (lwp == GET_LWP (lp->ptid))
726 return lp;
727
728 return NULL;
729 }
730
731 /* Call CALLBACK with its second argument set to DATA for every LWP in
732 the list. If CALLBACK returns 1 for a particular LWP, return a
733 pointer to the structure describing that LWP immediately.
734 Otherwise return NULL. */
735
736 struct lwp_info *
737 iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data)
738 {
739 struct lwp_info *lp, *lpnext;
740
741 for (lp = lwp_list; lp; lp = lpnext)
742 {
743 lpnext = lp->next;
744 if ((*callback) (lp, data))
745 return lp;
746 }
747
748 return NULL;
749 }
750
751 /* Update our internal state when changing from one fork (checkpoint,
752 et cetera) to another indicated by NEW_PTID. We can only switch
753 single-threaded applications, so we only create one new LWP, and
754 the previous list is discarded. */
755
756 void
757 linux_nat_switch_fork (ptid_t new_ptid)
758 {
759 struct lwp_info *lp;
760
761 init_lwp_list ();
762 lp = add_lwp (new_ptid);
763 lp->stopped = 1;
764 }
765
766 /* Record a PTID for later deletion. */
767
768 struct saved_ptids
769 {
770 ptid_t ptid;
771 struct saved_ptids *next;
772 };
773 static struct saved_ptids *threads_to_delete;
774
775 static void
776 record_dead_thread (ptid_t ptid)
777 {
778 struct saved_ptids *p = xmalloc (sizeof (struct saved_ptids));
779 p->ptid = ptid;
780 p->next = threads_to_delete;
781 threads_to_delete = p;
782 }
783
784 /* Delete any dead threads which are not the current thread. */
785
786 static void
787 prune_lwps (void)
788 {
789 struct saved_ptids **p = &threads_to_delete;
790
791 while (*p)
792 if (! ptid_equal ((*p)->ptid, inferior_ptid))
793 {
794 struct saved_ptids *tmp = *p;
795 delete_thread (tmp->ptid);
796 *p = tmp->next;
797 xfree (tmp);
798 }
799 else
800 p = &(*p)->next;
801 }
802
803 /* Callback for iterate_over_threads that finds a thread corresponding
804 to the given LWP. */
805
806 static int
807 find_thread_from_lwp (struct thread_info *thr, void *dummy)
808 {
809 ptid_t *ptid_p = dummy;
810
811 if (GET_LWP (thr->ptid) && GET_LWP (thr->ptid) == GET_LWP (*ptid_p))
812 return 1;
813 else
814 return 0;
815 }
816
817 /* Handle the exit of a single thread LP. */
818
819 static void
820 exit_lwp (struct lwp_info *lp)
821 {
822 if (in_thread_list (lp->ptid))
823 {
824 /* Core GDB cannot deal with us deleting the current thread. */
825 if (!ptid_equal (lp->ptid, inferior_ptid))
826 delete_thread (lp->ptid);
827 else
828 record_dead_thread (lp->ptid);
829 printf_unfiltered (_("[%s exited]\n"),
830 target_pid_to_str (lp->ptid));
831 }
832 else
833 {
834 /* Even if LP->PTID is not in the global GDB thread list, the
835 LWP may be - with an additional thread ID. We don't need
836 to print anything in this case; thread_db is in use and
837 already took care of that. But it didn't delete the thread
838 in order to handle zombies correctly. */
839
840 struct thread_info *thr;
841
842 thr = iterate_over_threads (find_thread_from_lwp, &lp->ptid);
843 if (thr)
844 {
845 if (!ptid_equal (thr->ptid, inferior_ptid))
846 delete_thread (thr->ptid);
847 else
848 record_dead_thread (thr->ptid);
849 }
850 }
851
852 delete_lwp (lp->ptid);
853 }
854
855 /* Attach to the LWP specified by PID. If VERBOSE is non-zero, print
856 a message telling the user that a new LWP has been added to the
857 process. Return 0 if successful or -1 if the new LWP could not
858 be attached. */
859
860 int
861 lin_lwp_attach_lwp (ptid_t ptid, int verbose)
862 {
863 struct lwp_info *lp;
864
865 gdb_assert (is_lwp (ptid));
866
867 /* Make sure SIGCHLD is blocked. We don't want SIGCHLD events
868 to interrupt either the ptrace() or waitpid() calls below. */
869 if (!sigismember (&blocked_mask, SIGCHLD))
870 {
871 sigaddset (&blocked_mask, SIGCHLD);
872 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
873 }
874
875 lp = find_lwp_pid (ptid);
876
877 /* We assume that we're already attached to any LWP that has an id
878 equal to the overall process id, and to any LWP that is already
879 in our list of LWPs. If we're not seeing exit events from threads
880 and we've had PID wraparound since we last tried to stop all threads,
881 this assumption might be wrong; fortunately, this is very unlikely
882 to happen. */
883 if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
884 {
885 pid_t pid;
886 int status;
887
888 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
889 {
890 /* If we fail to attach to the thread, issue a warning,
891 but continue. One way this can happen is if thread
892 creation is interrupted; as of Linux 2.6.19, a kernel
893 bug may place threads in the thread list and then fail
894 to create them. */
895 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
896 safe_strerror (errno));
897 return -1;
898 }
899
900 if (lp == NULL)
901 lp = add_lwp (ptid);
902
903 if (debug_linux_nat)
904 fprintf_unfiltered (gdb_stdlog,
905 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
906 target_pid_to_str (ptid));
907
908 pid = my_waitpid (GET_LWP (ptid), &status, 0);
909 if (pid == -1 && errno == ECHILD)
910 {
911 /* Try again with __WCLONE to check cloned processes. */
912 pid = my_waitpid (GET_LWP (ptid), &status, __WCLONE);
913 lp->cloned = 1;
914 }
915
916 gdb_assert (pid == GET_LWP (ptid)
917 && WIFSTOPPED (status) && WSTOPSIG (status));
918
919 target_post_attach (pid);
920
921 lp->stopped = 1;
922
923 if (debug_linux_nat)
924 {
925 fprintf_unfiltered (gdb_stdlog,
926 "LLAL: waitpid %s received %s\n",
927 target_pid_to_str (ptid),
928 status_to_str (status));
929 }
930 }
931 else
932 {
933 /* We assume that the LWP representing the original process is
934 already stopped. Mark it as stopped in the data structure
935 that the GNU/linux ptrace layer uses to keep track of
936 threads. Note that this won't have already been done since
937 the main thread will have, we assume, been stopped by an
938 attach from a different layer. */
939 if (lp == NULL)
940 lp = add_lwp (ptid);
941 lp->stopped = 1;
942 }
943
944 if (verbose)
945 printf_filtered (_("[New %s]\n"), target_pid_to_str (ptid));
946
947 return 0;
948 }
949
950 static void
951 linux_nat_attach (char *args, int from_tty)
952 {
953 struct lwp_info *lp;
954 pid_t pid;
955 int status;
956
957 /* FIXME: We should probably accept a list of process id's, and
958 attach all of them. */
959 linux_ops->to_attach (args, from_tty);
960
961 /* Add the initial process as the first LWP to the list. */
962 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
963 lp = add_lwp (inferior_ptid);
964
965 /* Make sure the initial process is stopped. The user-level threads
966 layer might want to poke around in the inferior, and that won't
967 work if things haven't stabilized yet. */
968 pid = my_waitpid (GET_PID (inferior_ptid), &status, 0);
969 if (pid == -1 && errno == ECHILD)
970 {
971 warning (_("%s is a cloned process"), target_pid_to_str (inferior_ptid));
972
973 /* Try again with __WCLONE to check cloned processes. */
974 pid = my_waitpid (GET_PID (inferior_ptid), &status, __WCLONE);
975 lp->cloned = 1;
976 }
977
978 gdb_assert (pid == GET_PID (inferior_ptid)
979 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP);
980
981 lp->stopped = 1;
982
983 /* Fake the SIGSTOP that core GDB expects. */
984 lp->status = W_STOPCODE (SIGSTOP);
985 lp->resumed = 1;
986 if (debug_linux_nat)
987 {
988 fprintf_unfiltered (gdb_stdlog,
989 "LLA: waitpid %ld, faking SIGSTOP\n", (long) pid);
990 }
991 }
992
993 static int
994 detach_callback (struct lwp_info *lp, void *data)
995 {
996 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
997
998 if (debug_linux_nat && lp->status)
999 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1000 strsignal (WSTOPSIG (lp->status)),
1001 target_pid_to_str (lp->ptid));
1002
1003 while (lp->signalled && lp->stopped)
1004 {
1005 errno = 0;
1006 if (ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0,
1007 WSTOPSIG (lp->status)) < 0)
1008 error (_("Can't continue %s: %s"), target_pid_to_str (lp->ptid),
1009 safe_strerror (errno));
1010
1011 if (debug_linux_nat)
1012 fprintf_unfiltered (gdb_stdlog,
1013 "DC: PTRACE_CONTINUE (%s, 0, %s) (OK)\n",
1014 target_pid_to_str (lp->ptid),
1015 status_to_str (lp->status));
1016
1017 lp->stopped = 0;
1018 lp->signalled = 0;
1019 lp->status = 0;
1020 /* FIXME drow/2003-08-26: There was a call to stop_wait_callback
1021 here. But since lp->signalled was cleared above,
1022 stop_wait_callback didn't do anything; the process was left
1023 running. Shouldn't we be waiting for it to stop?
1024 I've removed the call, since stop_wait_callback now does do
1025 something when called with lp->signalled == 0. */
1026
1027 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1028 }
1029
1030 /* We don't actually detach from the LWP that has an id equal to the
1031 overall process id just yet. */
1032 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1033 {
1034 errno = 0;
1035 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1036 WSTOPSIG (lp->status)) < 0)
1037 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1038 safe_strerror (errno));
1039
1040 if (debug_linux_nat)
1041 fprintf_unfiltered (gdb_stdlog,
1042 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1043 target_pid_to_str (lp->ptid),
1044 strsignal (WSTOPSIG (lp->status)));
1045
1046 delete_lwp (lp->ptid);
1047 }
1048
1049 return 0;
1050 }
1051
1052 static void
1053 linux_nat_detach (char *args, int from_tty)
1054 {
1055 iterate_over_lwps (detach_callback, NULL);
1056
1057 /* Only the initial process should be left right now. */
1058 gdb_assert (num_lwps == 1);
1059
1060 trap_ptid = null_ptid;
1061
1062 /* Destroy LWP info; it's no longer valid. */
1063 init_lwp_list ();
1064
1065 /* Restore the original signal mask. */
1066 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1067 sigemptyset (&blocked_mask);
1068
1069 inferior_ptid = pid_to_ptid (GET_PID (inferior_ptid));
1070 linux_ops->to_detach (args, from_tty);
1071 }
1072
1073 /* Resume LP. */
1074
1075 static int
1076 resume_callback (struct lwp_info *lp, void *data)
1077 {
1078 if (lp->stopped && lp->status == 0)
1079 {
1080 struct thread_info *tp;
1081
1082 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1083 0, TARGET_SIGNAL_0);
1084 if (debug_linux_nat)
1085 fprintf_unfiltered (gdb_stdlog,
1086 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1087 target_pid_to_str (lp->ptid));
1088 lp->stopped = 0;
1089 lp->step = 0;
1090 }
1091
1092 return 0;
1093 }
1094
1095 static int
1096 resume_clear_callback (struct lwp_info *lp, void *data)
1097 {
1098 lp->resumed = 0;
1099 return 0;
1100 }
1101
1102 static int
1103 resume_set_callback (struct lwp_info *lp, void *data)
1104 {
1105 lp->resumed = 1;
1106 return 0;
1107 }
1108
1109 static void
1110 linux_nat_resume (ptid_t ptid, int step, enum target_signal signo)
1111 {
1112 struct lwp_info *lp;
1113 int resume_all;
1114
1115 if (debug_linux_nat)
1116 fprintf_unfiltered (gdb_stdlog,
1117 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1118 step ? "step" : "resume",
1119 target_pid_to_str (ptid),
1120 signo ? strsignal (signo) : "0",
1121 target_pid_to_str (inferior_ptid));
1122
1123 prune_lwps ();
1124
1125 /* A specific PTID means `step only this process id'. */
1126 resume_all = (PIDGET (ptid) == -1);
1127
1128 if (resume_all)
1129 iterate_over_lwps (resume_set_callback, NULL);
1130 else
1131 iterate_over_lwps (resume_clear_callback, NULL);
1132
1133 /* If PID is -1, it's the current inferior that should be
1134 handled specially. */
1135 if (PIDGET (ptid) == -1)
1136 ptid = inferior_ptid;
1137
1138 lp = find_lwp_pid (ptid);
1139 if (lp)
1140 {
1141 ptid = pid_to_ptid (GET_LWP (lp->ptid));
1142
1143 /* Remember if we're stepping. */
1144 lp->step = step;
1145
1146 /* Mark this LWP as resumed. */
1147 lp->resumed = 1;
1148
1149 /* If we have a pending wait status for this thread, there is no
1150 point in resuming the process. But first make sure that
1151 linux_nat_wait won't preemptively handle the event - we
1152 should never take this short-circuit if we are going to
1153 leave LP running, since we have skipped resuming all the
1154 other threads. This bit of code needs to be synchronized
1155 with linux_nat_wait. */
1156
1157 if (lp->status && WIFSTOPPED (lp->status))
1158 {
1159 int saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
1160
1161 if (signal_stop_state (saved_signo) == 0
1162 && signal_print_state (saved_signo) == 0
1163 && signal_pass_state (saved_signo) == 1)
1164 {
1165 if (debug_linux_nat)
1166 fprintf_unfiltered (gdb_stdlog,
1167 "LLR: Not short circuiting for ignored "
1168 "status 0x%x\n", lp->status);
1169
1170 /* FIXME: What should we do if we are supposed to continue
1171 this thread with a signal? */
1172 gdb_assert (signo == TARGET_SIGNAL_0);
1173 signo = saved_signo;
1174 lp->status = 0;
1175 }
1176 }
1177
1178 if (lp->status)
1179 {
1180 /* FIXME: What should we do if we are supposed to continue
1181 this thread with a signal? */
1182 gdb_assert (signo == TARGET_SIGNAL_0);
1183
1184 if (debug_linux_nat)
1185 fprintf_unfiltered (gdb_stdlog,
1186 "LLR: Short circuiting for status 0x%x\n",
1187 lp->status);
1188
1189 return;
1190 }
1191
1192 /* Mark LWP as not stopped to prevent it from being continued by
1193 resume_callback. */
1194 lp->stopped = 0;
1195 }
1196
1197 if (resume_all)
1198 iterate_over_lwps (resume_callback, NULL);
1199
1200 linux_ops->to_resume (ptid, step, signo);
1201 if (debug_linux_nat)
1202 fprintf_unfiltered (gdb_stdlog,
1203 "LLR: %s %s, %s (resume event thread)\n",
1204 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1205 target_pid_to_str (ptid),
1206 signo ? strsignal (signo) : "0");
1207 }
1208
1209 /* Issue kill to specified lwp. */
1210
1211 static int tkill_failed;
1212
1213 static int
1214 kill_lwp (int lwpid, int signo)
1215 {
1216 errno = 0;
1217
1218 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1219 fails, then we are not using nptl threads and we should be using kill. */
1220
1221 #ifdef HAVE_TKILL_SYSCALL
1222 if (!tkill_failed)
1223 {
1224 int ret = syscall (__NR_tkill, lwpid, signo);
1225 if (errno != ENOSYS)
1226 return ret;
1227 errno = 0;
1228 tkill_failed = 1;
1229 }
1230 #endif
1231
1232 return kill (lwpid, signo);
1233 }
1234
1235 /* Handle a GNU/Linux extended wait response. If we see a clone
1236 event, we need to add the new LWP to our list (and not report the
1237 trap to higher layers). This function returns non-zero if the
1238 event should be ignored and we should wait again. If STOPPING is
1239 true, the new LWP remains stopped, otherwise it is continued. */
1240
1241 static int
1242 linux_handle_extended_wait (struct lwp_info *lp, int status,
1243 int stopping)
1244 {
1245 int pid = GET_LWP (lp->ptid);
1246 struct target_waitstatus *ourstatus = &lp->waitstatus;
1247 struct lwp_info *new_lp = NULL;
1248 int event = status >> 16;
1249
1250 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1251 || event == PTRACE_EVENT_CLONE)
1252 {
1253 unsigned long new_pid;
1254 int ret;
1255
1256 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
1257
1258 /* If we haven't already seen the new PID stop, wait for it now. */
1259 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1260 {
1261 /* The new child has a pending SIGSTOP. We can't affect it until it
1262 hits the SIGSTOP, but we're already attached. */
1263 ret = my_waitpid (new_pid, &status,
1264 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1265 if (ret == -1)
1266 perror_with_name (_("waiting for new child"));
1267 else if (ret != new_pid)
1268 internal_error (__FILE__, __LINE__,
1269 _("wait returned unexpected PID %d"), ret);
1270 else if (!WIFSTOPPED (status))
1271 internal_error (__FILE__, __LINE__,
1272 _("wait returned unexpected status 0x%x"), status);
1273 }
1274
1275 ourstatus->value.related_pid = new_pid;
1276
1277 if (event == PTRACE_EVENT_FORK)
1278 ourstatus->kind = TARGET_WAITKIND_FORKED;
1279 else if (event == PTRACE_EVENT_VFORK)
1280 ourstatus->kind = TARGET_WAITKIND_VFORKED;
1281 else
1282 {
1283 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1284 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (inferior_ptid)));
1285 new_lp->cloned = 1;
1286
1287 if (WSTOPSIG (status) != SIGSTOP)
1288 {
1289 /* This can happen if someone starts sending signals to
1290 the new thread before it gets a chance to run, which
1291 have a lower number than SIGSTOP (e.g. SIGUSR1).
1292 This is an unlikely case, and harder to handle for
1293 fork / vfork than for clone, so we do not try - but
1294 we handle it for clone events here. We'll send
1295 the other signal on to the thread below. */
1296
1297 new_lp->signalled = 1;
1298 }
1299 else
1300 status = 0;
1301
1302 if (stopping)
1303 new_lp->stopped = 1;
1304 else
1305 {
1306 new_lp->resumed = 1;
1307 ptrace (PTRACE_CONT, lp->waitstatus.value.related_pid, 0,
1308 status ? WSTOPSIG (status) : 0);
1309 }
1310
1311 if (debug_linux_nat)
1312 fprintf_unfiltered (gdb_stdlog,
1313 "LHEW: Got clone event from LWP %ld, resuming\n",
1314 GET_LWP (lp->ptid));
1315 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1316
1317 return 1;
1318 }
1319
1320 return 0;
1321 }
1322
1323 if (event == PTRACE_EVENT_EXEC)
1324 {
1325 ourstatus->kind = TARGET_WAITKIND_EXECD;
1326 ourstatus->value.execd_pathname
1327 = xstrdup (linux_child_pid_to_exec_file (pid));
1328
1329 if (linux_parent_pid)
1330 {
1331 detach_breakpoints (linux_parent_pid);
1332 ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
1333
1334 linux_parent_pid = 0;
1335 }
1336
1337 return 0;
1338 }
1339
1340 internal_error (__FILE__, __LINE__,
1341 _("unknown ptrace event %d"), event);
1342 }
1343
1344 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1345 exited. */
1346
1347 static int
1348 wait_lwp (struct lwp_info *lp)
1349 {
1350 pid_t pid;
1351 int status;
1352 int thread_dead = 0;
1353
1354 gdb_assert (!lp->stopped);
1355 gdb_assert (lp->status == 0);
1356
1357 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
1358 if (pid == -1 && errno == ECHILD)
1359 {
1360 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
1361 if (pid == -1 && errno == ECHILD)
1362 {
1363 /* The thread has previously exited. We need to delete it
1364 now because, for some vendor 2.4 kernels with NPTL
1365 support backported, there won't be an exit event unless
1366 it is the main thread. 2.6 kernels will report an exit
1367 event for each thread that exits, as expected. */
1368 thread_dead = 1;
1369 if (debug_linux_nat)
1370 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
1371 target_pid_to_str (lp->ptid));
1372 }
1373 }
1374
1375 if (!thread_dead)
1376 {
1377 gdb_assert (pid == GET_LWP (lp->ptid));
1378
1379 if (debug_linux_nat)
1380 {
1381 fprintf_unfiltered (gdb_stdlog,
1382 "WL: waitpid %s received %s\n",
1383 target_pid_to_str (lp->ptid),
1384 status_to_str (status));
1385 }
1386 }
1387
1388 /* Check if the thread has exited. */
1389 if (WIFEXITED (status) || WIFSIGNALED (status))
1390 {
1391 thread_dead = 1;
1392 if (debug_linux_nat)
1393 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
1394 target_pid_to_str (lp->ptid));
1395 }
1396
1397 if (thread_dead)
1398 {
1399 exit_lwp (lp);
1400 return 0;
1401 }
1402
1403 gdb_assert (WIFSTOPPED (status));
1404
1405 /* Handle GNU/Linux's extended waitstatus for trace events. */
1406 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1407 {
1408 if (debug_linux_nat)
1409 fprintf_unfiltered (gdb_stdlog,
1410 "WL: Handling extended status 0x%06x\n",
1411 status);
1412 if (linux_handle_extended_wait (lp, status, 1))
1413 return wait_lwp (lp);
1414 }
1415
1416 return status;
1417 }
1418
1419 /* Send a SIGSTOP to LP. */
1420
1421 static int
1422 stop_callback (struct lwp_info *lp, void *data)
1423 {
1424 if (!lp->stopped && !lp->signalled)
1425 {
1426 int ret;
1427
1428 if (debug_linux_nat)
1429 {
1430 fprintf_unfiltered (gdb_stdlog,
1431 "SC: kill %s **<SIGSTOP>**\n",
1432 target_pid_to_str (lp->ptid));
1433 }
1434 errno = 0;
1435 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
1436 if (debug_linux_nat)
1437 {
1438 fprintf_unfiltered (gdb_stdlog,
1439 "SC: lwp kill %d %s\n",
1440 ret,
1441 errno ? safe_strerror (errno) : "ERRNO-OK");
1442 }
1443
1444 lp->signalled = 1;
1445 gdb_assert (lp->status == 0);
1446 }
1447
1448 return 0;
1449 }
1450
1451 /* Wait until LP is stopped. If DATA is non-null it is interpreted as
1452 a pointer to a set of signals to be flushed immediately. */
1453
1454 static int
1455 stop_wait_callback (struct lwp_info *lp, void *data)
1456 {
1457 sigset_t *flush_mask = data;
1458
1459 if (!lp->stopped)
1460 {
1461 int status;
1462
1463 status = wait_lwp (lp);
1464 if (status == 0)
1465 return 0;
1466
1467 /* Ignore any signals in FLUSH_MASK. */
1468 if (flush_mask && sigismember (flush_mask, WSTOPSIG (status)))
1469 {
1470 if (!lp->signalled)
1471 {
1472 lp->stopped = 1;
1473 return 0;
1474 }
1475
1476 errno = 0;
1477 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1478 if (debug_linux_nat)
1479 fprintf_unfiltered (gdb_stdlog,
1480 "PTRACE_CONT %s, 0, 0 (%s)\n",
1481 target_pid_to_str (lp->ptid),
1482 errno ? safe_strerror (errno) : "OK");
1483
1484 return stop_wait_callback (lp, flush_mask);
1485 }
1486
1487 if (WSTOPSIG (status) != SIGSTOP)
1488 {
1489 if (WSTOPSIG (status) == SIGTRAP)
1490 {
1491 /* If a LWP other than the LWP that we're reporting an
1492 event for has hit a GDB breakpoint (as opposed to
1493 some random trap signal), then just arrange for it to
1494 hit it again later. We don't keep the SIGTRAP status
1495 and don't forward the SIGTRAP signal to the LWP. We
1496 will handle the current event, eventually we will
1497 resume all LWPs, and this one will get its breakpoint
1498 trap again.
1499
1500 If we do not do this, then we run the risk that the
1501 user will delete or disable the breakpoint, but the
1502 thread will have already tripped on it. */
1503
1504 /* Now resume this LWP and get the SIGSTOP event. */
1505 errno = 0;
1506 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1507 if (debug_linux_nat)
1508 {
1509 fprintf_unfiltered (gdb_stdlog,
1510 "PTRACE_CONT %s, 0, 0 (%s)\n",
1511 target_pid_to_str (lp->ptid),
1512 errno ? safe_strerror (errno) : "OK");
1513
1514 fprintf_unfiltered (gdb_stdlog,
1515 "SWC: Candidate SIGTRAP event in %s\n",
1516 target_pid_to_str (lp->ptid));
1517 }
1518 /* Hold the SIGTRAP for handling by linux_nat_wait. */
1519 stop_wait_callback (lp, data);
1520 /* If there's another event, throw it back into the queue. */
1521 if (lp->status)
1522 {
1523 if (debug_linux_nat)
1524 {
1525 fprintf_unfiltered (gdb_stdlog,
1526 "SWC: kill %s, %s\n",
1527 target_pid_to_str (lp->ptid),
1528 status_to_str ((int) status));
1529 }
1530 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
1531 }
1532 /* Save the sigtrap event. */
1533 lp->status = status;
1534 return 0;
1535 }
1536 else
1537 {
1538 /* The thread was stopped with a signal other than
1539 SIGSTOP, and didn't accidentally trip a breakpoint. */
1540
1541 if (debug_linux_nat)
1542 {
1543 fprintf_unfiltered (gdb_stdlog,
1544 "SWC: Pending event %s in %s\n",
1545 status_to_str ((int) status),
1546 target_pid_to_str (lp->ptid));
1547 }
1548 /* Now resume this LWP and get the SIGSTOP event. */
1549 errno = 0;
1550 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1551 if (debug_linux_nat)
1552 fprintf_unfiltered (gdb_stdlog,
1553 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
1554 target_pid_to_str (lp->ptid),
1555 errno ? safe_strerror (errno) : "OK");
1556
1557 /* Hold this event/waitstatus while we check to see if
1558 there are any more (we still want to get that SIGSTOP). */
1559 stop_wait_callback (lp, data);
1560 /* If the lp->status field is still empty, use it to hold
1561 this event. If not, then this event must be returned
1562 to the event queue of the LWP. */
1563 if (lp->status == 0)
1564 lp->status = status;
1565 else
1566 {
1567 if (debug_linux_nat)
1568 {
1569 fprintf_unfiltered (gdb_stdlog,
1570 "SWC: kill %s, %s\n",
1571 target_pid_to_str (lp->ptid),
1572 status_to_str ((int) status));
1573 }
1574 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
1575 }
1576 return 0;
1577 }
1578 }
1579 else
1580 {
1581 /* We caught the SIGSTOP that we intended to catch, so
1582 there's no SIGSTOP pending. */
1583 lp->stopped = 1;
1584 lp->signalled = 0;
1585 }
1586 }
1587
1588 return 0;
1589 }
1590
1591 /* Check whether PID has any pending signals in FLUSH_MASK. If so set
1592 the appropriate bits in PENDING, and return 1 - otherwise return 0. */
1593
1594 static int
1595 linux_nat_has_pending (int pid, sigset_t *pending, sigset_t *flush_mask)
1596 {
1597 sigset_t blocked, ignored;
1598 int i;
1599
1600 linux_proc_pending_signals (pid, pending, &blocked, &ignored);
1601
1602 if (!flush_mask)
1603 return 0;
1604
1605 for (i = 1; i < NSIG; i++)
1606 if (sigismember (pending, i))
1607 if (!sigismember (flush_mask, i)
1608 || sigismember (&blocked, i)
1609 || sigismember (&ignored, i))
1610 sigdelset (pending, i);
1611
1612 if (sigisemptyset (pending))
1613 return 0;
1614
1615 return 1;
1616 }
1617
1618 /* DATA is interpreted as a mask of signals to flush. If LP has
1619 signals pending, and they are all in the flush mask, then arrange
1620 to flush them. LP should be stopped, as should all other threads
1621 it might share a signal queue with. */
1622
1623 static int
1624 flush_callback (struct lwp_info *lp, void *data)
1625 {
1626 sigset_t *flush_mask = data;
1627 sigset_t pending, intersection, blocked, ignored;
1628 int pid, status;
1629
1630 /* Normally, when an LWP exits, it is removed from the LWP list. The
1631 last LWP isn't removed till later, however. So if there is only
1632 one LWP on the list, make sure it's alive. */
1633 if (lwp_list == lp && lp->next == NULL)
1634 if (!linux_nat_thread_alive (lp->ptid))
1635 return 0;
1636
1637 /* Just because the LWP is stopped doesn't mean that new signals
1638 can't arrive from outside, so this function must be careful of
1639 race conditions. However, because all threads are stopped, we
1640 can assume that the pending mask will not shrink unless we resume
1641 the LWP, and that it will then get another signal. We can't
1642 control which one, however. */
1643
1644 if (lp->status)
1645 {
1646 if (debug_linux_nat)
1647 printf_unfiltered (_("FC: LP has pending status %06x\n"), lp->status);
1648 if (WIFSTOPPED (lp->status) && sigismember (flush_mask, WSTOPSIG (lp->status)))
1649 lp->status = 0;
1650 }
1651
1652 /* While there is a pending signal we would like to flush, continue
1653 the inferior and collect another signal. But if there's already
1654 a saved status that we don't want to flush, we can't resume the
1655 inferior - if it stopped for some other reason we wouldn't have
1656 anywhere to save the new status. In that case, we must leave the
1657 signal unflushed (and possibly generate an extra SIGINT stop).
1658 That's much less bad than losing a signal. */
1659 while (lp->status == 0
1660 && linux_nat_has_pending (GET_LWP (lp->ptid), &pending, flush_mask))
1661 {
1662 int ret;
1663
1664 errno = 0;
1665 ret = ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1666 if (debug_linux_nat)
1667 fprintf_unfiltered (gdb_stderr,
1668 "FC: Sent PTRACE_CONT, ret %d %d\n", ret, errno);
1669
1670 lp->stopped = 0;
1671 stop_wait_callback (lp, flush_mask);
1672 if (debug_linux_nat)
1673 fprintf_unfiltered (gdb_stderr,
1674 "FC: Wait finished; saved status is %d\n",
1675 lp->status);
1676 }
1677
1678 return 0;
1679 }
1680
1681 /* Return non-zero if LP has a wait status pending. */
1682
1683 static int
1684 status_callback (struct lwp_info *lp, void *data)
1685 {
1686 /* Only report a pending wait status if we pretend that this has
1687 indeed been resumed. */
1688 return (lp->status != 0 && lp->resumed);
1689 }
1690
1691 /* Return non-zero if LP isn't stopped. */
1692
1693 static int
1694 running_callback (struct lwp_info *lp, void *data)
1695 {
1696 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
1697 }
1698
1699 /* Count the LWP's that have had events. */
1700
1701 static int
1702 count_events_callback (struct lwp_info *lp, void *data)
1703 {
1704 int *count = data;
1705
1706 gdb_assert (count != NULL);
1707
1708 /* Count only LWPs that have a SIGTRAP event pending. */
1709 if (lp->status != 0
1710 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
1711 (*count)++;
1712
1713 return 0;
1714 }
1715
1716 /* Select the LWP (if any) that is currently being single-stepped. */
1717
1718 static int
1719 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
1720 {
1721 if (lp->step && lp->status != 0)
1722 return 1;
1723 else
1724 return 0;
1725 }
1726
1727 /* Select the Nth LWP that has had a SIGTRAP event. */
1728
1729 static int
1730 select_event_lwp_callback (struct lwp_info *lp, void *data)
1731 {
1732 int *selector = data;
1733
1734 gdb_assert (selector != NULL);
1735
1736 /* Select only LWPs that have a SIGTRAP event pending. */
1737 if (lp->status != 0
1738 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
1739 if ((*selector)-- == 0)
1740 return 1;
1741
1742 return 0;
1743 }
1744
1745 static int
1746 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
1747 {
1748 struct lwp_info *event_lp = data;
1749
1750 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1751 if (lp == event_lp)
1752 return 0;
1753
1754 /* If a LWP other than the LWP that we're reporting an event for has
1755 hit a GDB breakpoint (as opposed to some random trap signal),
1756 then just arrange for it to hit it again later. We don't keep
1757 the SIGTRAP status and don't forward the SIGTRAP signal to the
1758 LWP. We will handle the current event, eventually we will resume
1759 all LWPs, and this one will get its breakpoint trap again.
1760
1761 If we do not do this, then we run the risk that the user will
1762 delete or disable the breakpoint, but the LWP will have already
1763 tripped on it. */
1764
1765 if (lp->status != 0
1766 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
1767 && breakpoint_inserted_here_p (read_pc_pid (lp->ptid) -
1768 gdbarch_decr_pc_after_break
1769 (current_gdbarch)))
1770 {
1771 if (debug_linux_nat)
1772 fprintf_unfiltered (gdb_stdlog,
1773 "CBC: Push back breakpoint for %s\n",
1774 target_pid_to_str (lp->ptid));
1775
1776 /* Back up the PC if necessary. */
1777 if (gdbarch_decr_pc_after_break (current_gdbarch))
1778 write_pc_pid (read_pc_pid (lp->ptid) - gdbarch_decr_pc_after_break
1779 (current_gdbarch),
1780 lp->ptid);
1781
1782 /* Throw away the SIGTRAP. */
1783 lp->status = 0;
1784 }
1785
1786 return 0;
1787 }
1788
1789 /* Select one LWP out of those that have events pending. */
1790
1791 static void
1792 select_event_lwp (struct lwp_info **orig_lp, int *status)
1793 {
1794 int num_events = 0;
1795 int random_selector;
1796 struct lwp_info *event_lp;
1797
1798 /* Record the wait status for the original LWP. */
1799 (*orig_lp)->status = *status;
1800
1801 /* Give preference to any LWP that is being single-stepped. */
1802 event_lp = iterate_over_lwps (select_singlestep_lwp_callback, NULL);
1803 if (event_lp != NULL)
1804 {
1805 if (debug_linux_nat)
1806 fprintf_unfiltered (gdb_stdlog,
1807 "SEL: Select single-step %s\n",
1808 target_pid_to_str (event_lp->ptid));
1809 }
1810 else
1811 {
1812 /* No single-stepping LWP. Select one at random, out of those
1813 which have had SIGTRAP events. */
1814
1815 /* First see how many SIGTRAP events we have. */
1816 iterate_over_lwps (count_events_callback, &num_events);
1817
1818 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1819 random_selector = (int)
1820 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1821
1822 if (debug_linux_nat && num_events > 1)
1823 fprintf_unfiltered (gdb_stdlog,
1824 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1825 num_events, random_selector);
1826
1827 event_lp = iterate_over_lwps (select_event_lwp_callback,
1828 &random_selector);
1829 }
1830
1831 if (event_lp != NULL)
1832 {
1833 /* Switch the event LWP. */
1834 *orig_lp = event_lp;
1835 *status = event_lp->status;
1836 }
1837
1838 /* Flush the wait status for the event LWP. */
1839 (*orig_lp)->status = 0;
1840 }
1841
1842 /* Return non-zero if LP has been resumed. */
1843
1844 static int
1845 resumed_callback (struct lwp_info *lp, void *data)
1846 {
1847 return lp->resumed;
1848 }
1849
1850 /* Stop an active thread, verify it still exists, then resume it. */
1851
1852 static int
1853 stop_and_resume_callback (struct lwp_info *lp, void *data)
1854 {
1855 struct lwp_info *ptr;
1856
1857 if (!lp->stopped && !lp->signalled)
1858 {
1859 stop_callback (lp, NULL);
1860 stop_wait_callback (lp, NULL);
1861 /* Resume if the lwp still exists. */
1862 for (ptr = lwp_list; ptr; ptr = ptr->next)
1863 if (lp == ptr)
1864 {
1865 resume_callback (lp, NULL);
1866 resume_set_callback (lp, NULL);
1867 }
1868 }
1869 return 0;
1870 }
1871
1872 static ptid_t
1873 linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
1874 {
1875 struct lwp_info *lp = NULL;
1876 int options = 0;
1877 int status = 0;
1878 pid_t pid = PIDGET (ptid);
1879 sigset_t flush_mask;
1880
1881 /* The first time we get here after starting a new inferior, we may
1882 not have added it to the LWP list yet - this is the earliest
1883 moment at which we know its PID. */
1884 if (num_lwps == 0)
1885 {
1886 gdb_assert (!is_lwp (inferior_ptid));
1887
1888 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
1889 GET_PID (inferior_ptid));
1890 lp = add_lwp (inferior_ptid);
1891 lp->resumed = 1;
1892 }
1893
1894 sigemptyset (&flush_mask);
1895
1896 /* Make sure SIGCHLD is blocked. */
1897 if (!sigismember (&blocked_mask, SIGCHLD))
1898 {
1899 sigaddset (&blocked_mask, SIGCHLD);
1900 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
1901 }
1902
1903 retry:
1904
1905 /* Make sure there is at least one LWP that has been resumed. */
1906 gdb_assert (iterate_over_lwps (resumed_callback, NULL));
1907
1908 /* First check if there is a LWP with a wait status pending. */
1909 if (pid == -1)
1910 {
1911 /* Any LWP that's been resumed will do. */
1912 lp = iterate_over_lwps (status_callback, NULL);
1913 if (lp)
1914 {
1915 status = lp->status;
1916 lp->status = 0;
1917
1918 if (debug_linux_nat && status)
1919 fprintf_unfiltered (gdb_stdlog,
1920 "LLW: Using pending wait status %s for %s.\n",
1921 status_to_str (status),
1922 target_pid_to_str (lp->ptid));
1923 }
1924
1925 /* But if we don't fine one, we'll have to wait, and check both
1926 cloned and uncloned processes. We start with the cloned
1927 processes. */
1928 options = __WCLONE | WNOHANG;
1929 }
1930 else if (is_lwp (ptid))
1931 {
1932 if (debug_linux_nat)
1933 fprintf_unfiltered (gdb_stdlog,
1934 "LLW: Waiting for specific LWP %s.\n",
1935 target_pid_to_str (ptid));
1936
1937 /* We have a specific LWP to check. */
1938 lp = find_lwp_pid (ptid);
1939 gdb_assert (lp);
1940 status = lp->status;
1941 lp->status = 0;
1942
1943 if (debug_linux_nat && status)
1944 fprintf_unfiltered (gdb_stdlog,
1945 "LLW: Using pending wait status %s for %s.\n",
1946 status_to_str (status),
1947 target_pid_to_str (lp->ptid));
1948
1949 /* If we have to wait, take into account whether PID is a cloned
1950 process or not. And we have to convert it to something that
1951 the layer beneath us can understand. */
1952 options = lp->cloned ? __WCLONE : 0;
1953 pid = GET_LWP (ptid);
1954 }
1955
1956 if (status && lp->signalled)
1957 {
1958 /* A pending SIGSTOP may interfere with the normal stream of
1959 events. In a typical case where interference is a problem,
1960 we have a SIGSTOP signal pending for LWP A while
1961 single-stepping it, encounter an event in LWP B, and take the
1962 pending SIGSTOP while trying to stop LWP A. After processing
1963 the event in LWP B, LWP A is continued, and we'll never see
1964 the SIGTRAP associated with the last time we were
1965 single-stepping LWP A. */
1966
1967 /* Resume the thread. It should halt immediately returning the
1968 pending SIGSTOP. */
1969 registers_changed ();
1970 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1971 lp->step, TARGET_SIGNAL_0);
1972 if (debug_linux_nat)
1973 fprintf_unfiltered (gdb_stdlog,
1974 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
1975 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1976 target_pid_to_str (lp->ptid));
1977 lp->stopped = 0;
1978 gdb_assert (lp->resumed);
1979
1980 /* This should catch the pending SIGSTOP. */
1981 stop_wait_callback (lp, NULL);
1982 }
1983
1984 set_sigint_trap (); /* Causes SIGINT to be passed on to the
1985 attached process. */
1986 set_sigio_trap ();
1987
1988 while (status == 0)
1989 {
1990 pid_t lwpid;
1991
1992 lwpid = my_waitpid (pid, &status, options);
1993 if (lwpid > 0)
1994 {
1995 gdb_assert (pid == -1 || lwpid == pid);
1996
1997 if (debug_linux_nat)
1998 {
1999 fprintf_unfiltered (gdb_stdlog,
2000 "LLW: waitpid %ld received %s\n",
2001 (long) lwpid, status_to_str (status));
2002 }
2003
2004 lp = find_lwp_pid (pid_to_ptid (lwpid));
2005
2006 /* Check for stop events reported by a process we didn't
2007 already know about - anything not already in our LWP
2008 list.
2009
2010 If we're expecting to receive stopped processes after
2011 fork, vfork, and clone events, then we'll just add the
2012 new one to our list and go back to waiting for the event
2013 to be reported - the stopped process might be returned
2014 from waitpid before or after the event is. */
2015 if (WIFSTOPPED (status) && !lp)
2016 {
2017 linux_record_stopped_pid (lwpid, status);
2018 status = 0;
2019 continue;
2020 }
2021
2022 /* Make sure we don't report an event for the exit of an LWP not in
2023 our list, i.e. not part of the current process. This can happen
2024 if we detach from a program we original forked and then it
2025 exits. */
2026 if (!WIFSTOPPED (status) && !lp)
2027 {
2028 status = 0;
2029 continue;
2030 }
2031
2032 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
2033 CLONE_PTRACE processes which do not use the thread library -
2034 otherwise we wouldn't find the new LWP this way. That doesn't
2035 currently work, and the following code is currently unreachable
2036 due to the two blocks above. If it's fixed some day, this code
2037 should be broken out into a function so that we can also pick up
2038 LWPs from the new interface. */
2039 if (!lp)
2040 {
2041 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
2042 if (options & __WCLONE)
2043 lp->cloned = 1;
2044
2045 gdb_assert (WIFSTOPPED (status)
2046 && WSTOPSIG (status) == SIGSTOP);
2047 lp->signalled = 1;
2048
2049 if (!in_thread_list (inferior_ptid))
2050 {
2051 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2052 GET_PID (inferior_ptid));
2053 add_thread (inferior_ptid);
2054 }
2055
2056 add_thread (lp->ptid);
2057 printf_unfiltered (_("[New %s]\n"),
2058 target_pid_to_str (lp->ptid));
2059 }
2060
2061 /* Handle GNU/Linux's extended waitstatus for trace events. */
2062 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2063 {
2064 if (debug_linux_nat)
2065 fprintf_unfiltered (gdb_stdlog,
2066 "LLW: Handling extended status 0x%06x\n",
2067 status);
2068 if (linux_handle_extended_wait (lp, status, 0))
2069 {
2070 status = 0;
2071 continue;
2072 }
2073 }
2074
2075 /* Check if the thread has exited. */
2076 if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1)
2077 {
2078 /* If this is the main thread, we must stop all threads and
2079 verify if they are still alive. This is because in the nptl
2080 thread model, there is no signal issued for exiting LWPs
2081 other than the main thread. We only get the main thread
2082 exit signal once all child threads have already exited.
2083 If we stop all the threads and use the stop_wait_callback
2084 to check if they have exited we can determine whether this
2085 signal should be ignored or whether it means the end of the
2086 debugged application, regardless of which threading model
2087 is being used. */
2088 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
2089 {
2090 lp->stopped = 1;
2091 iterate_over_lwps (stop_and_resume_callback, NULL);
2092 }
2093
2094 if (debug_linux_nat)
2095 fprintf_unfiltered (gdb_stdlog,
2096 "LLW: %s exited.\n",
2097 target_pid_to_str (lp->ptid));
2098
2099 exit_lwp (lp);
2100
2101 /* If there is at least one more LWP, then the exit signal
2102 was not the end of the debugged application and should be
2103 ignored. */
2104 if (num_lwps > 0)
2105 {
2106 /* Make sure there is at least one thread running. */
2107 gdb_assert (iterate_over_lwps (running_callback, NULL));
2108
2109 /* Discard the event. */
2110 status = 0;
2111 continue;
2112 }
2113 }
2114
2115 /* Check if the current LWP has previously exited. In the nptl
2116 thread model, LWPs other than the main thread do not issue
2117 signals when they exit so we must check whenever the thread
2118 has stopped. A similar check is made in stop_wait_callback(). */
2119 if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid))
2120 {
2121 if (debug_linux_nat)
2122 fprintf_unfiltered (gdb_stdlog,
2123 "LLW: %s exited.\n",
2124 target_pid_to_str (lp->ptid));
2125
2126 exit_lwp (lp);
2127
2128 /* Make sure there is at least one thread running. */
2129 gdb_assert (iterate_over_lwps (running_callback, NULL));
2130
2131 /* Discard the event. */
2132 status = 0;
2133 continue;
2134 }
2135
2136 /* Make sure we don't report a SIGSTOP that we sent
2137 ourselves in an attempt to stop an LWP. */
2138 if (lp->signalled
2139 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2140 {
2141 if (debug_linux_nat)
2142 fprintf_unfiltered (gdb_stdlog,
2143 "LLW: Delayed SIGSTOP caught for %s.\n",
2144 target_pid_to_str (lp->ptid));
2145
2146 /* This is a delayed SIGSTOP. */
2147 lp->signalled = 0;
2148
2149 registers_changed ();
2150 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2151 lp->step, TARGET_SIGNAL_0);
2152 if (debug_linux_nat)
2153 fprintf_unfiltered (gdb_stdlog,
2154 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2155 lp->step ?
2156 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2157 target_pid_to_str (lp->ptid));
2158
2159 lp->stopped = 0;
2160 gdb_assert (lp->resumed);
2161
2162 /* Discard the event. */
2163 status = 0;
2164 continue;
2165 }
2166
2167 break;
2168 }
2169
2170 if (pid == -1)
2171 {
2172 /* Alternate between checking cloned and uncloned processes. */
2173 options ^= __WCLONE;
2174
2175 /* And suspend every time we have checked both. */
2176 if (options & __WCLONE)
2177 sigsuspend (&suspend_mask);
2178 }
2179
2180 /* We shouldn't end up here unless we want to try again. */
2181 gdb_assert (status == 0);
2182 }
2183
2184 clear_sigio_trap ();
2185 clear_sigint_trap ();
2186
2187 gdb_assert (lp);
2188
2189 /* Don't report signals that GDB isn't interested in, such as
2190 signals that are neither printed nor stopped upon. Stopping all
2191 threads can be a bit time-consuming so if we want decent
2192 performance with heavily multi-threaded programs, especially when
2193 they're using a high frequency timer, we'd better avoid it if we
2194 can. */
2195
2196 if (WIFSTOPPED (status))
2197 {
2198 int signo = target_signal_from_host (WSTOPSIG (status));
2199
2200 /* If we get a signal while single-stepping, we may need special
2201 care, e.g. to skip the signal handler. Defer to common code. */
2202 if (!lp->step
2203 && signal_stop_state (signo) == 0
2204 && signal_print_state (signo) == 0
2205 && signal_pass_state (signo) == 1)
2206 {
2207 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2208 here? It is not clear we should. GDB may not expect
2209 other threads to run. On the other hand, not resuming
2210 newly attached threads may cause an unwanted delay in
2211 getting them running. */
2212 registers_changed ();
2213 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2214 lp->step, signo);
2215 if (debug_linux_nat)
2216 fprintf_unfiltered (gdb_stdlog,
2217 "LLW: %s %s, %s (preempt 'handle')\n",
2218 lp->step ?
2219 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2220 target_pid_to_str (lp->ptid),
2221 signo ? strsignal (signo) : "0");
2222 lp->stopped = 0;
2223 status = 0;
2224 goto retry;
2225 }
2226
2227 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
2228 {
2229 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2230 forwarded to the entire process group, that is, all LWP's
2231 will receive it. Since we only want to report it once,
2232 we try to flush it from all LWPs except this one. */
2233 sigaddset (&flush_mask, SIGINT);
2234 }
2235 }
2236
2237 /* This LWP is stopped now. */
2238 lp->stopped = 1;
2239
2240 if (debug_linux_nat)
2241 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
2242 status_to_str (status), target_pid_to_str (lp->ptid));
2243
2244 /* Now stop all other LWP's ... */
2245 iterate_over_lwps (stop_callback, NULL);
2246
2247 /* ... and wait until all of them have reported back that they're no
2248 longer running. */
2249 iterate_over_lwps (stop_wait_callback, &flush_mask);
2250 iterate_over_lwps (flush_callback, &flush_mask);
2251
2252 /* If we're not waiting for a specific LWP, choose an event LWP from
2253 among those that have had events. Giving equal priority to all
2254 LWPs that have had events helps prevent starvation. */
2255 if (pid == -1)
2256 select_event_lwp (&lp, &status);
2257
2258 /* Now that we've selected our final event LWP, cancel any
2259 breakpoints in other LWPs that have hit a GDB breakpoint. See
2260 the comment in cancel_breakpoints_callback to find out why. */
2261 iterate_over_lwps (cancel_breakpoints_callback, lp);
2262
2263 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2264 {
2265 trap_ptid = lp->ptid;
2266 if (debug_linux_nat)
2267 fprintf_unfiltered (gdb_stdlog,
2268 "LLW: trap_ptid is %s.\n",
2269 target_pid_to_str (trap_ptid));
2270 }
2271 else
2272 trap_ptid = null_ptid;
2273
2274 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2275 {
2276 *ourstatus = lp->waitstatus;
2277 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
2278 }
2279 else
2280 store_waitstatus (ourstatus, status);
2281
2282 return lp->ptid;
2283 }
2284
2285 static int
2286 kill_callback (struct lwp_info *lp, void *data)
2287 {
2288 errno = 0;
2289 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
2290 if (debug_linux_nat)
2291 fprintf_unfiltered (gdb_stdlog,
2292 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
2293 target_pid_to_str (lp->ptid),
2294 errno ? safe_strerror (errno) : "OK");
2295
2296 return 0;
2297 }
2298
2299 static int
2300 kill_wait_callback (struct lwp_info *lp, void *data)
2301 {
2302 pid_t pid;
2303
2304 /* We must make sure that there are no pending events (delayed
2305 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
2306 program doesn't interfere with any following debugging session. */
2307
2308 /* For cloned processes we must check both with __WCLONE and
2309 without, since the exit status of a cloned process isn't reported
2310 with __WCLONE. */
2311 if (lp->cloned)
2312 {
2313 do
2314 {
2315 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
2316 if (pid != (pid_t) -1 && debug_linux_nat)
2317 {
2318 fprintf_unfiltered (gdb_stdlog,
2319 "KWC: wait %s received unknown.\n",
2320 target_pid_to_str (lp->ptid));
2321 }
2322 }
2323 while (pid == GET_LWP (lp->ptid));
2324
2325 gdb_assert (pid == -1 && errno == ECHILD);
2326 }
2327
2328 do
2329 {
2330 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
2331 if (pid != (pid_t) -1 && debug_linux_nat)
2332 {
2333 fprintf_unfiltered (gdb_stdlog,
2334 "KWC: wait %s received unk.\n",
2335 target_pid_to_str (lp->ptid));
2336 }
2337 }
2338 while (pid == GET_LWP (lp->ptid));
2339
2340 gdb_assert (pid == -1 && errno == ECHILD);
2341 return 0;
2342 }
2343
2344 static void
2345 linux_nat_kill (void)
2346 {
2347 struct target_waitstatus last;
2348 ptid_t last_ptid;
2349 int status;
2350
2351 /* If we're stopped while forking and we haven't followed yet,
2352 kill the other task. We need to do this first because the
2353 parent will be sleeping if this is a vfork. */
2354
2355 get_last_target_status (&last_ptid, &last);
2356
2357 if (last.kind == TARGET_WAITKIND_FORKED
2358 || last.kind == TARGET_WAITKIND_VFORKED)
2359 {
2360 ptrace (PT_KILL, last.value.related_pid, 0, 0);
2361 wait (&status);
2362 }
2363
2364 if (forks_exist_p ())
2365 linux_fork_killall ();
2366 else
2367 {
2368 /* Kill all LWP's ... */
2369 iterate_over_lwps (kill_callback, NULL);
2370
2371 /* ... and wait until we've flushed all events. */
2372 iterate_over_lwps (kill_wait_callback, NULL);
2373 }
2374
2375 target_mourn_inferior ();
2376 }
2377
2378 static void
2379 linux_nat_mourn_inferior (void)
2380 {
2381 trap_ptid = null_ptid;
2382
2383 /* Destroy LWP info; it's no longer valid. */
2384 init_lwp_list ();
2385
2386 /* Restore the original signal mask. */
2387 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
2388 sigemptyset (&blocked_mask);
2389
2390 if (! forks_exist_p ())
2391 /* Normal case, no other forks available. */
2392 linux_ops->to_mourn_inferior ();
2393 else
2394 /* Multi-fork case. The current inferior_ptid has exited, but
2395 there are other viable forks to debug. Delete the exiting
2396 one and context-switch to the first available. */
2397 linux_fork_mourn_inferior ();
2398 }
2399
2400 static LONGEST
2401 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
2402 const char *annex, gdb_byte *readbuf,
2403 const gdb_byte *writebuf,
2404 ULONGEST offset, LONGEST len)
2405 {
2406 struct cleanup *old_chain = save_inferior_ptid ();
2407 LONGEST xfer;
2408
2409 if (is_lwp (inferior_ptid))
2410 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
2411
2412 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
2413 offset, len);
2414
2415 do_cleanups (old_chain);
2416 return xfer;
2417 }
2418
2419 static int
2420 linux_nat_thread_alive (ptid_t ptid)
2421 {
2422 gdb_assert (is_lwp (ptid));
2423
2424 errno = 0;
2425 ptrace (PTRACE_PEEKUSER, GET_LWP (ptid), 0, 0);
2426 if (debug_linux_nat)
2427 fprintf_unfiltered (gdb_stdlog,
2428 "LLTA: PTRACE_PEEKUSER %s, 0, 0 (%s)\n",
2429 target_pid_to_str (ptid),
2430 errno ? safe_strerror (errno) : "OK");
2431
2432 /* Not every Linux kernel implements PTRACE_PEEKUSER. But we can
2433 handle that case gracefully since ptrace will first do a lookup
2434 for the process based upon the passed-in pid. If that fails we
2435 will get either -ESRCH or -EPERM, otherwise the child exists and
2436 is alive. */
2437 if (errno == ESRCH || errno == EPERM)
2438 return 0;
2439
2440 return 1;
2441 }
2442
2443 static char *
2444 linux_nat_pid_to_str (ptid_t ptid)
2445 {
2446 static char buf[64];
2447
2448 if (lwp_list && lwp_list->next && is_lwp (ptid))
2449 {
2450 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
2451 return buf;
2452 }
2453
2454 return normal_pid_to_str (ptid);
2455 }
2456
2457 static void
2458 sigchld_handler (int signo)
2459 {
2460 /* Do nothing. The only reason for this handler is that it allows
2461 us to use sigsuspend in linux_nat_wait above to wait for the
2462 arrival of a SIGCHLD. */
2463 }
2464
2465 /* Accepts an integer PID; Returns a string representing a file that
2466 can be opened to get the symbols for the child process. */
2467
2468 static char *
2469 linux_child_pid_to_exec_file (int pid)
2470 {
2471 char *name1, *name2;
2472
2473 name1 = xmalloc (MAXPATHLEN);
2474 name2 = xmalloc (MAXPATHLEN);
2475 make_cleanup (xfree, name1);
2476 make_cleanup (xfree, name2);
2477 memset (name2, 0, MAXPATHLEN);
2478
2479 sprintf (name1, "/proc/%d/exe", pid);
2480 if (readlink (name1, name2, MAXPATHLEN) > 0)
2481 return name2;
2482 else
2483 return name1;
2484 }
2485
2486 /* Service function for corefiles and info proc. */
2487
2488 static int
2489 read_mapping (FILE *mapfile,
2490 long long *addr,
2491 long long *endaddr,
2492 char *permissions,
2493 long long *offset,
2494 char *device, long long *inode, char *filename)
2495 {
2496 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
2497 addr, endaddr, permissions, offset, device, inode);
2498
2499 filename[0] = '\0';
2500 if (ret > 0 && ret != EOF)
2501 {
2502 /* Eat everything up to EOL for the filename. This will prevent
2503 weird filenames (such as one with embedded whitespace) from
2504 confusing this code. It also makes this code more robust in
2505 respect to annotations the kernel may add after the filename.
2506
2507 Note the filename is used for informational purposes
2508 only. */
2509 ret += fscanf (mapfile, "%[^\n]\n", filename);
2510 }
2511
2512 return (ret != 0 && ret != EOF);
2513 }
2514
2515 /* Fills the "to_find_memory_regions" target vector. Lists the memory
2516 regions in the inferior for a corefile. */
2517
2518 static int
2519 linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
2520 unsigned long,
2521 int, int, int, void *), void *obfd)
2522 {
2523 long long pid = PIDGET (inferior_ptid);
2524 char mapsfilename[MAXPATHLEN];
2525 FILE *mapsfile;
2526 long long addr, endaddr, size, offset, inode;
2527 char permissions[8], device[8], filename[MAXPATHLEN];
2528 int read, write, exec;
2529 int ret;
2530
2531 /* Compose the filename for the /proc memory map, and open it. */
2532 sprintf (mapsfilename, "/proc/%lld/maps", pid);
2533 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
2534 error (_("Could not open %s."), mapsfilename);
2535
2536 if (info_verbose)
2537 fprintf_filtered (gdb_stdout,
2538 "Reading memory regions from %s\n", mapsfilename);
2539
2540 /* Now iterate until end-of-file. */
2541 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
2542 &offset, &device[0], &inode, &filename[0]))
2543 {
2544 size = endaddr - addr;
2545
2546 /* Get the segment's permissions. */
2547 read = (strchr (permissions, 'r') != 0);
2548 write = (strchr (permissions, 'w') != 0);
2549 exec = (strchr (permissions, 'x') != 0);
2550
2551 if (info_verbose)
2552 {
2553 fprintf_filtered (gdb_stdout,
2554 "Save segment, %lld bytes at 0x%s (%c%c%c)",
2555 size, paddr_nz (addr),
2556 read ? 'r' : ' ',
2557 write ? 'w' : ' ', exec ? 'x' : ' ');
2558 if (filename[0])
2559 fprintf_filtered (gdb_stdout, " for %s", filename);
2560 fprintf_filtered (gdb_stdout, "\n");
2561 }
2562
2563 /* Invoke the callback function to create the corefile
2564 segment. */
2565 func (addr, size, read, write, exec, obfd);
2566 }
2567 fclose (mapsfile);
2568 return 0;
2569 }
2570
2571 /* Records the thread's register state for the corefile note
2572 section. */
2573
2574 static char *
2575 linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
2576 char *note_data, int *note_size)
2577 {
2578 gdb_gregset_t gregs;
2579 gdb_fpregset_t fpregs;
2580 #ifdef FILL_FPXREGSET
2581 gdb_fpxregset_t fpxregs;
2582 #endif
2583 unsigned long lwp = ptid_get_lwp (ptid);
2584 struct regcache *regcache = get_thread_regcache (ptid);
2585 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2586 const struct regset *regset;
2587 int core_regset_p;
2588 struct cleanup *old_chain;
2589
2590 old_chain = save_inferior_ptid ();
2591 inferior_ptid = ptid;
2592 target_fetch_registers (regcache, -1);
2593 do_cleanups (old_chain);
2594
2595 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
2596 if (core_regset_p
2597 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
2598 sizeof (gregs))) != NULL
2599 && regset->collect_regset != NULL)
2600 regset->collect_regset (regset, regcache, -1,
2601 &gregs, sizeof (gregs));
2602 else
2603 fill_gregset (regcache, &gregs, -1);
2604
2605 note_data = (char *) elfcore_write_prstatus (obfd,
2606 note_data,
2607 note_size,
2608 lwp,
2609 stop_signal, &gregs);
2610
2611 if (core_regset_p
2612 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
2613 sizeof (fpregs))) != NULL
2614 && regset->collect_regset != NULL)
2615 regset->collect_regset (regset, regcache, -1,
2616 &fpregs, sizeof (fpregs));
2617 else
2618 fill_fpregset (regcache, &fpregs, -1);
2619
2620 note_data = (char *) elfcore_write_prfpreg (obfd,
2621 note_data,
2622 note_size,
2623 &fpregs, sizeof (fpregs));
2624
2625 #ifdef FILL_FPXREGSET
2626 if (core_regset_p
2627 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg-xfp",
2628 sizeof (fpxregs))) != NULL
2629 && regset->collect_regset != NULL)
2630 regset->collect_regset (regset, regcache, -1,
2631 &fpxregs, sizeof (fpxregs));
2632 else
2633 fill_fpxregset (regcache, &fpxregs, -1);
2634
2635 note_data = (char *) elfcore_write_prxfpreg (obfd,
2636 note_data,
2637 note_size,
2638 &fpxregs, sizeof (fpxregs));
2639 #endif
2640 return note_data;
2641 }
2642
2643 struct linux_nat_corefile_thread_data
2644 {
2645 bfd *obfd;
2646 char *note_data;
2647 int *note_size;
2648 int num_notes;
2649 };
2650
2651 /* Called by gdbthread.c once per thread. Records the thread's
2652 register state for the corefile note section. */
2653
2654 static int
2655 linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
2656 {
2657 struct linux_nat_corefile_thread_data *args = data;
2658
2659 args->note_data = linux_nat_do_thread_registers (args->obfd,
2660 ti->ptid,
2661 args->note_data,
2662 args->note_size);
2663 args->num_notes++;
2664
2665 return 0;
2666 }
2667
2668 /* Records the register state for the corefile note section. */
2669
2670 static char *
2671 linux_nat_do_registers (bfd *obfd, ptid_t ptid,
2672 char *note_data, int *note_size)
2673 {
2674 return linux_nat_do_thread_registers (obfd,
2675 ptid_build (ptid_get_pid (inferior_ptid),
2676 ptid_get_pid (inferior_ptid),
2677 0),
2678 note_data, note_size);
2679 }
2680
2681 /* Fills the "to_make_corefile_note" target vector. Builds the note
2682 section for a corefile, and returns it in a malloc buffer. */
2683
2684 static char *
2685 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
2686 {
2687 struct linux_nat_corefile_thread_data thread_args;
2688 struct cleanup *old_chain;
2689 char fname[16] = { '\0' };
2690 char psargs[80] = { '\0' };
2691 char *note_data = NULL;
2692 ptid_t current_ptid = inferior_ptid;
2693 gdb_byte *auxv;
2694 int auxv_len;
2695
2696 if (get_exec_file (0))
2697 {
2698 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
2699 strncpy (psargs, get_exec_file (0), sizeof (psargs));
2700 if (get_inferior_args ())
2701 {
2702 strncat (psargs, " ", sizeof (psargs) - strlen (psargs));
2703 strncat (psargs, get_inferior_args (),
2704 sizeof (psargs) - strlen (psargs));
2705 }
2706 note_data = (char *) elfcore_write_prpsinfo (obfd,
2707 note_data,
2708 note_size, fname, psargs);
2709 }
2710
2711 /* Dump information for threads. */
2712 thread_args.obfd = obfd;
2713 thread_args.note_data = note_data;
2714 thread_args.note_size = note_size;
2715 thread_args.num_notes = 0;
2716 iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args);
2717 if (thread_args.num_notes == 0)
2718 {
2719 /* iterate_over_threads didn't come up with any threads; just
2720 use inferior_ptid. */
2721 note_data = linux_nat_do_registers (obfd, inferior_ptid,
2722 note_data, note_size);
2723 }
2724 else
2725 {
2726 note_data = thread_args.note_data;
2727 }
2728
2729 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
2730 NULL, &auxv);
2731 if (auxv_len > 0)
2732 {
2733 note_data = elfcore_write_note (obfd, note_data, note_size,
2734 "CORE", NT_AUXV, auxv, auxv_len);
2735 xfree (auxv);
2736 }
2737
2738 make_cleanup (xfree, note_data);
2739 return note_data;
2740 }
2741
2742 /* Implement the "info proc" command. */
2743
2744 static void
2745 linux_nat_info_proc_cmd (char *args, int from_tty)
2746 {
2747 long long pid = PIDGET (inferior_ptid);
2748 FILE *procfile;
2749 char **argv = NULL;
2750 char buffer[MAXPATHLEN];
2751 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
2752 int cmdline_f = 1;
2753 int cwd_f = 1;
2754 int exe_f = 1;
2755 int mappings_f = 0;
2756 int environ_f = 0;
2757 int status_f = 0;
2758 int stat_f = 0;
2759 int all = 0;
2760 struct stat dummy;
2761
2762 if (args)
2763 {
2764 /* Break up 'args' into an argv array. */
2765 if ((argv = buildargv (args)) == NULL)
2766 nomem (0);
2767 else
2768 make_cleanup_freeargv (argv);
2769 }
2770 while (argv != NULL && *argv != NULL)
2771 {
2772 if (isdigit (argv[0][0]))
2773 {
2774 pid = strtoul (argv[0], NULL, 10);
2775 }
2776 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
2777 {
2778 mappings_f = 1;
2779 }
2780 else if (strcmp (argv[0], "status") == 0)
2781 {
2782 status_f = 1;
2783 }
2784 else if (strcmp (argv[0], "stat") == 0)
2785 {
2786 stat_f = 1;
2787 }
2788 else if (strcmp (argv[0], "cmd") == 0)
2789 {
2790 cmdline_f = 1;
2791 }
2792 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
2793 {
2794 exe_f = 1;
2795 }
2796 else if (strcmp (argv[0], "cwd") == 0)
2797 {
2798 cwd_f = 1;
2799 }
2800 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
2801 {
2802 all = 1;
2803 }
2804 else
2805 {
2806 /* [...] (future options here) */
2807 }
2808 argv++;
2809 }
2810 if (pid == 0)
2811 error (_("No current process: you must name one."));
2812
2813 sprintf (fname1, "/proc/%lld", pid);
2814 if (stat (fname1, &dummy) != 0)
2815 error (_("No /proc directory: '%s'"), fname1);
2816
2817 printf_filtered (_("process %lld\n"), pid);
2818 if (cmdline_f || all)
2819 {
2820 sprintf (fname1, "/proc/%lld/cmdline", pid);
2821 if ((procfile = fopen (fname1, "r")) != NULL)
2822 {
2823 fgets (buffer, sizeof (buffer), procfile);
2824 printf_filtered ("cmdline = '%s'\n", buffer);
2825 fclose (procfile);
2826 }
2827 else
2828 warning (_("unable to open /proc file '%s'"), fname1);
2829 }
2830 if (cwd_f || all)
2831 {
2832 sprintf (fname1, "/proc/%lld/cwd", pid);
2833 memset (fname2, 0, sizeof (fname2));
2834 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
2835 printf_filtered ("cwd = '%s'\n", fname2);
2836 else
2837 warning (_("unable to read link '%s'"), fname1);
2838 }
2839 if (exe_f || all)
2840 {
2841 sprintf (fname1, "/proc/%lld/exe", pid);
2842 memset (fname2, 0, sizeof (fname2));
2843 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
2844 printf_filtered ("exe = '%s'\n", fname2);
2845 else
2846 warning (_("unable to read link '%s'"), fname1);
2847 }
2848 if (mappings_f || all)
2849 {
2850 sprintf (fname1, "/proc/%lld/maps", pid);
2851 if ((procfile = fopen (fname1, "r")) != NULL)
2852 {
2853 long long addr, endaddr, size, offset, inode;
2854 char permissions[8], device[8], filename[MAXPATHLEN];
2855
2856 printf_filtered (_("Mapped address spaces:\n\n"));
2857 if (gdbarch_addr_bit (current_gdbarch) == 32)
2858 {
2859 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
2860 "Start Addr",
2861 " End Addr",
2862 " Size", " Offset", "objfile");
2863 }
2864 else
2865 {
2866 printf_filtered (" %18s %18s %10s %10s %7s\n",
2867 "Start Addr",
2868 " End Addr",
2869 " Size", " Offset", "objfile");
2870 }
2871
2872 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
2873 &offset, &device[0], &inode, &filename[0]))
2874 {
2875 size = endaddr - addr;
2876
2877 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
2878 calls here (and possibly above) should be abstracted
2879 out into their own functions? Andrew suggests using
2880 a generic local_address_string instead to print out
2881 the addresses; that makes sense to me, too. */
2882
2883 if (gdbarch_addr_bit (current_gdbarch) == 32)
2884 {
2885 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
2886 (unsigned long) addr, /* FIXME: pr_addr */
2887 (unsigned long) endaddr,
2888 (int) size,
2889 (unsigned int) offset,
2890 filename[0] ? filename : "");
2891 }
2892 else
2893 {
2894 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
2895 (unsigned long) addr, /* FIXME: pr_addr */
2896 (unsigned long) endaddr,
2897 (int) size,
2898 (unsigned int) offset,
2899 filename[0] ? filename : "");
2900 }
2901 }
2902
2903 fclose (procfile);
2904 }
2905 else
2906 warning (_("unable to open /proc file '%s'"), fname1);
2907 }
2908 if (status_f || all)
2909 {
2910 sprintf (fname1, "/proc/%lld/status", pid);
2911 if ((procfile = fopen (fname1, "r")) != NULL)
2912 {
2913 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
2914 puts_filtered (buffer);
2915 fclose (procfile);
2916 }
2917 else
2918 warning (_("unable to open /proc file '%s'"), fname1);
2919 }
2920 if (stat_f || all)
2921 {
2922 sprintf (fname1, "/proc/%lld/stat", pid);
2923 if ((procfile = fopen (fname1, "r")) != NULL)
2924 {
2925 int itmp;
2926 char ctmp;
2927 long ltmp;
2928
2929 if (fscanf (procfile, "%d ", &itmp) > 0)
2930 printf_filtered (_("Process: %d\n"), itmp);
2931 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
2932 printf_filtered (_("Exec file: %s\n"), buffer);
2933 if (fscanf (procfile, "%c ", &ctmp) > 0)
2934 printf_filtered (_("State: %c\n"), ctmp);
2935 if (fscanf (procfile, "%d ", &itmp) > 0)
2936 printf_filtered (_("Parent process: %d\n"), itmp);
2937 if (fscanf (procfile, "%d ", &itmp) > 0)
2938 printf_filtered (_("Process group: %d\n"), itmp);
2939 if (fscanf (procfile, "%d ", &itmp) > 0)
2940 printf_filtered (_("Session id: %d\n"), itmp);
2941 if (fscanf (procfile, "%d ", &itmp) > 0)
2942 printf_filtered (_("TTY: %d\n"), itmp);
2943 if (fscanf (procfile, "%d ", &itmp) > 0)
2944 printf_filtered (_("TTY owner process group: %d\n"), itmp);
2945 if (fscanf (procfile, "%lu ", &ltmp) > 0)
2946 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
2947 if (fscanf (procfile, "%lu ", &ltmp) > 0)
2948 printf_filtered (_("Minor faults (no memory page): %lu\n"),
2949 (unsigned long) ltmp);
2950 if (fscanf (procfile, "%lu ", &ltmp) > 0)
2951 printf_filtered (_("Minor faults, children: %lu\n"),
2952 (unsigned long) ltmp);
2953 if (fscanf (procfile, "%lu ", &ltmp) > 0)
2954 printf_filtered (_("Major faults (memory page faults): %lu\n"),
2955 (unsigned long) ltmp);
2956 if (fscanf (procfile, "%lu ", &ltmp) > 0)
2957 printf_filtered (_("Major faults, children: %lu\n"),
2958 (unsigned long) ltmp);
2959 if (fscanf (procfile, "%ld ", &ltmp) > 0)
2960 printf_filtered (_("utime: %ld\n"), ltmp);
2961 if (fscanf (procfile, "%ld ", &ltmp) > 0)
2962 printf_filtered (_("stime: %ld\n"), ltmp);
2963 if (fscanf (procfile, "%ld ", &ltmp) > 0)
2964 printf_filtered (_("utime, children: %ld\n"), ltmp);
2965 if (fscanf (procfile, "%ld ", &ltmp) > 0)
2966 printf_filtered (_("stime, children: %ld\n"), ltmp);
2967 if (fscanf (procfile, "%ld ", &ltmp) > 0)
2968 printf_filtered (_("jiffies remaining in current time slice: %ld\n"),
2969 ltmp);
2970 if (fscanf (procfile, "%ld ", &ltmp) > 0)
2971 printf_filtered (_("'nice' value: %ld\n"), ltmp);
2972 if (fscanf (procfile, "%lu ", &ltmp) > 0)
2973 printf_filtered (_("jiffies until next timeout: %lu\n"),
2974 (unsigned long) ltmp);
2975 if (fscanf (procfile, "%lu ", &ltmp) > 0)
2976 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
2977 (unsigned long) ltmp);
2978 if (fscanf (procfile, "%ld ", &ltmp) > 0)
2979 printf_filtered (_("start time (jiffies since system boot): %ld\n"),
2980 ltmp);
2981 if (fscanf (procfile, "%lu ", &ltmp) > 0)
2982 printf_filtered (_("Virtual memory size: %lu\n"),
2983 (unsigned long) ltmp);
2984 if (fscanf (procfile, "%lu ", &ltmp) > 0)
2985 printf_filtered (_("Resident set size: %lu\n"), (unsigned long) ltmp);
2986 if (fscanf (procfile, "%lu ", &ltmp) > 0)
2987 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
2988 if (fscanf (procfile, "%lu ", &ltmp) > 0)
2989 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
2990 if (fscanf (procfile, "%lu ", &ltmp) > 0)
2991 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
2992 if (fscanf (procfile, "%lu ", &ltmp) > 0)
2993 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
2994 #if 0 /* Don't know how architecture-dependent the rest is...
2995 Anyway the signal bitmap info is available from "status". */
2996 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
2997 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
2998 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
2999 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
3000 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3001 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
3002 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3003 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
3004 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3005 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
3006 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3007 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
3008 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3009 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
3010 #endif
3011 fclose (procfile);
3012 }
3013 else
3014 warning (_("unable to open /proc file '%s'"), fname1);
3015 }
3016 }
3017
3018 /* Implement the to_xfer_partial interface for memory reads using the /proc
3019 filesystem. Because we can use a single read() call for /proc, this
3020 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3021 but it doesn't support writes. */
3022
3023 static LONGEST
3024 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3025 const char *annex, gdb_byte *readbuf,
3026 const gdb_byte *writebuf,
3027 ULONGEST offset, LONGEST len)
3028 {
3029 LONGEST ret;
3030 int fd;
3031 char filename[64];
3032
3033 if (object != TARGET_OBJECT_MEMORY || !readbuf)
3034 return 0;
3035
3036 /* Don't bother for one word. */
3037 if (len < 3 * sizeof (long))
3038 return 0;
3039
3040 /* We could keep this file open and cache it - possibly one per
3041 thread. That requires some juggling, but is even faster. */
3042 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
3043 fd = open (filename, O_RDONLY | O_LARGEFILE);
3044 if (fd == -1)
3045 return 0;
3046
3047 /* If pread64 is available, use it. It's faster if the kernel
3048 supports it (only one syscall), and it's 64-bit safe even on
3049 32-bit platforms (for instance, SPARC debugging a SPARC64
3050 application). */
3051 #ifdef HAVE_PREAD64
3052 if (pread64 (fd, readbuf, len, offset) != len)
3053 #else
3054 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
3055 #endif
3056 ret = 0;
3057 else
3058 ret = len;
3059
3060 close (fd);
3061 return ret;
3062 }
3063
3064 /* Parse LINE as a signal set and add its set bits to SIGS. */
3065
3066 static void
3067 add_line_to_sigset (const char *line, sigset_t *sigs)
3068 {
3069 int len = strlen (line) - 1;
3070 const char *p;
3071 int signum;
3072
3073 if (line[len] != '\n')
3074 error (_("Could not parse signal set: %s"), line);
3075
3076 p = line;
3077 signum = len * 4;
3078 while (len-- > 0)
3079 {
3080 int digit;
3081
3082 if (*p >= '0' && *p <= '9')
3083 digit = *p - '0';
3084 else if (*p >= 'a' && *p <= 'f')
3085 digit = *p - 'a' + 10;
3086 else
3087 error (_("Could not parse signal set: %s"), line);
3088
3089 signum -= 4;
3090
3091 if (digit & 1)
3092 sigaddset (sigs, signum + 1);
3093 if (digit & 2)
3094 sigaddset (sigs, signum + 2);
3095 if (digit & 4)
3096 sigaddset (sigs, signum + 3);
3097 if (digit & 8)
3098 sigaddset (sigs, signum + 4);
3099
3100 p++;
3101 }
3102 }
3103
3104 /* Find process PID's pending signals from /proc/pid/status and set
3105 SIGS to match. */
3106
3107 void
3108 linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
3109 {
3110 FILE *procfile;
3111 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
3112 int signum;
3113
3114 sigemptyset (pending);
3115 sigemptyset (blocked);
3116 sigemptyset (ignored);
3117 sprintf (fname, "/proc/%d/status", pid);
3118 procfile = fopen (fname, "r");
3119 if (procfile == NULL)
3120 error (_("Could not open %s"), fname);
3121
3122 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
3123 {
3124 /* Normal queued signals are on the SigPnd line in the status
3125 file. However, 2.6 kernels also have a "shared" pending
3126 queue for delivering signals to a thread group, so check for
3127 a ShdPnd line also.
3128
3129 Unfortunately some Red Hat kernels include the shared pending
3130 queue but not the ShdPnd status field. */
3131
3132 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
3133 add_line_to_sigset (buffer + 8, pending);
3134 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
3135 add_line_to_sigset (buffer + 8, pending);
3136 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
3137 add_line_to_sigset (buffer + 8, blocked);
3138 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
3139 add_line_to_sigset (buffer + 8, ignored);
3140 }
3141
3142 fclose (procfile);
3143 }
3144
3145 static LONGEST
3146 linux_xfer_partial (struct target_ops *ops, enum target_object object,
3147 const char *annex, gdb_byte *readbuf,
3148 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3149 {
3150 LONGEST xfer;
3151
3152 if (object == TARGET_OBJECT_AUXV)
3153 return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
3154 offset, len);
3155
3156 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
3157 offset, len);
3158 if (xfer != 0)
3159 return xfer;
3160
3161 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
3162 offset, len);
3163 }
3164
3165 /* Create a prototype generic Linux target. The client can override
3166 it with local methods. */
3167
3168 static void
3169 linux_target_install_ops (struct target_ops *t)
3170 {
3171 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
3172 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
3173 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
3174 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
3175 t->to_post_startup_inferior = linux_child_post_startup_inferior;
3176 t->to_post_attach = linux_child_post_attach;
3177 t->to_follow_fork = linux_child_follow_fork;
3178 t->to_find_memory_regions = linux_nat_find_memory_regions;
3179 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
3180
3181 super_xfer_partial = t->to_xfer_partial;
3182 t->to_xfer_partial = linux_xfer_partial;
3183 }
3184
3185 struct target_ops *
3186 linux_target (void)
3187 {
3188 struct target_ops *t;
3189
3190 t = inf_ptrace_target ();
3191 linux_target_install_ops (t);
3192
3193 return t;
3194 }
3195
3196 struct target_ops *
3197 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
3198 {
3199 struct target_ops *t;
3200
3201 t = inf_ptrace_trad_target (register_u_offset);
3202 linux_target_install_ops (t);
3203
3204 return t;
3205 }
3206
3207 void
3208 linux_nat_add_target (struct target_ops *t)
3209 {
3210 /* Save the provided single-threaded target. We save this in a separate
3211 variable because another target we've inherited from (e.g. inf-ptrace)
3212 may have saved a pointer to T; we want to use it for the final
3213 process stratum target. */
3214 linux_ops_saved = *t;
3215 linux_ops = &linux_ops_saved;
3216
3217 /* Override some methods for multithreading. */
3218 t->to_attach = linux_nat_attach;
3219 t->to_detach = linux_nat_detach;
3220 t->to_resume = linux_nat_resume;
3221 t->to_wait = linux_nat_wait;
3222 t->to_xfer_partial = linux_nat_xfer_partial;
3223 t->to_kill = linux_nat_kill;
3224 t->to_mourn_inferior = linux_nat_mourn_inferior;
3225 t->to_thread_alive = linux_nat_thread_alive;
3226 t->to_pid_to_str = linux_nat_pid_to_str;
3227 t->to_has_thread_control = tc_schedlock;
3228
3229 /* We don't change the stratum; this target will sit at
3230 process_stratum and thread_db will set at thread_stratum. This
3231 is a little strange, since this is a multi-threaded-capable
3232 target, but we want to be on the stack below thread_db, and we
3233 also want to be used for single-threaded processes. */
3234
3235 add_target (t);
3236
3237 /* TODO: Eliminate this and have libthread_db use
3238 find_target_beneath. */
3239 thread_db_init (t);
3240 }
3241
3242 void
3243 _initialize_linux_nat (void)
3244 {
3245 struct sigaction action;
3246
3247 add_info ("proc", linux_nat_info_proc_cmd, _("\
3248 Show /proc process information about any running process.\n\
3249 Specify any process id, or use the program being debugged by default.\n\
3250 Specify any of the following keywords for detailed info:\n\
3251 mappings -- list of mapped memory regions.\n\
3252 stat -- list a bunch of random process info.\n\
3253 status -- list a different bunch of random process info.\n\
3254 all -- list all available /proc info."));
3255
3256 /* Save the original signal mask. */
3257 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
3258
3259 action.sa_handler = sigchld_handler;
3260 sigemptyset (&action.sa_mask);
3261 action.sa_flags = SA_RESTART;
3262 sigaction (SIGCHLD, &action, NULL);
3263
3264 /* Make sure we don't block SIGCHLD during a sigsuspend. */
3265 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
3266 sigdelset (&suspend_mask, SIGCHLD);
3267
3268 sigemptyset (&blocked_mask);
3269
3270 add_setshow_zinteger_cmd ("lin-lwp", no_class, &debug_linux_nat, _("\
3271 Set debugging of GNU/Linux lwp module."), _("\
3272 Show debugging of GNU/Linux lwp module."), _("\
3273 Enables printf debugging output."),
3274 NULL,
3275 show_debug_linux_nat,
3276 &setdebuglist, &showdebuglist);
3277 }
3278 \f
3279
3280 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
3281 the GNU/Linux Threads library and therefore doesn't really belong
3282 here. */
3283
3284 /* Read variable NAME in the target and return its value if found.
3285 Otherwise return zero. It is assumed that the type of the variable
3286 is `int'. */
3287
3288 static int
3289 get_signo (const char *name)
3290 {
3291 struct minimal_symbol *ms;
3292 int signo;
3293
3294 ms = lookup_minimal_symbol (name, NULL, NULL);
3295 if (ms == NULL)
3296 return 0;
3297
3298 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
3299 sizeof (signo)) != 0)
3300 return 0;
3301
3302 return signo;
3303 }
3304
3305 /* Return the set of signals used by the threads library in *SET. */
3306
3307 void
3308 lin_thread_get_thread_signals (sigset_t *set)
3309 {
3310 struct sigaction action;
3311 int restart, cancel;
3312
3313 sigemptyset (set);
3314
3315 restart = get_signo ("__pthread_sig_restart");
3316 cancel = get_signo ("__pthread_sig_cancel");
3317
3318 /* LinuxThreads normally uses the first two RT signals, but in some legacy
3319 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
3320 not provide any way for the debugger to query the signal numbers -
3321 fortunately they don't change! */
3322
3323 if (restart == 0)
3324 restart = __SIGRTMIN;
3325
3326 if (cancel == 0)
3327 cancel = __SIGRTMIN + 1;
3328
3329 sigaddset (set, restart);
3330 sigaddset (set, cancel);
3331
3332 /* The GNU/Linux Threads library makes terminating threads send a
3333 special "cancel" signal instead of SIGCHLD. Make sure we catch
3334 those (to prevent them from terminating GDB itself, which is
3335 likely to be their default action) and treat them the same way as
3336 SIGCHLD. */
3337
3338 action.sa_handler = sigchld_handler;
3339 sigemptyset (&action.sa_mask);
3340 action.sa_flags = SA_RESTART;
3341 sigaction (cancel, &action, NULL);
3342
3343 /* We block the "cancel" signal throughout this code ... */
3344 sigaddset (&blocked_mask, cancel);
3345 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
3346
3347 /* ... except during a sigsuspend. */
3348 sigdelset (&suspend_mask, cancel);
3349 }
3350