]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/gdbserver/linux-low.c
sim: add helper macros for branch profiling
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
545587ee 2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
4c38e0a4 3 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
da6d8c04
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
19
20#include "server.h"
58caa3dc 21#include "linux-low.h"
da6d8c04 22
58caa3dc 23#include <sys/wait.h>
da6d8c04
DJ
24#include <stdio.h>
25#include <sys/param.h>
da6d8c04 26#include <sys/ptrace.h>
da6d8c04
DJ
27#include <signal.h>
28#include <sys/ioctl.h>
29#include <fcntl.h>
d07c63e7 30#include <string.h>
0a30fbc4
DJ
31#include <stdlib.h>
32#include <unistd.h>
fa6a77dc 33#include <errno.h>
fd500816 34#include <sys/syscall.h>
f9387fc3 35#include <sched.h>
07e059b5
VP
36#include <ctype.h>
37#include <pwd.h>
38#include <sys/types.h>
39#include <dirent.h>
efcbbd14
UW
40#include <sys/stat.h>
41#include <sys/vfs.h>
1570b33e 42#include <sys/uio.h>
957f3f49
DE
43#ifndef ELFMAG0
44/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
45 then ELFMAG0 will have been defined. If it didn't get included by
46 gdb_proc_service.h then including it will likely introduce a duplicate
47 definition of elf_fpregset_t. */
48#include <elf.h>
49#endif
efcbbd14
UW
50
51#ifndef SPUFS_MAGIC
52#define SPUFS_MAGIC 0x23c9b64e
53#endif
da6d8c04 54
32ca6d61
DJ
55#ifndef PTRACE_GETSIGINFO
56# define PTRACE_GETSIGINFO 0x4202
57# define PTRACE_SETSIGINFO 0x4203
58#endif
59
fd462a61
DJ
60#ifndef O_LARGEFILE
61#define O_LARGEFILE 0
62#endif
63
24a09b5f
DJ
64/* If the system headers did not provide the constants, hard-code the normal
65 values. */
66#ifndef PTRACE_EVENT_FORK
67
68#define PTRACE_SETOPTIONS 0x4200
69#define PTRACE_GETEVENTMSG 0x4201
70
71/* options set using PTRACE_SETOPTIONS */
72#define PTRACE_O_TRACESYSGOOD 0x00000001
73#define PTRACE_O_TRACEFORK 0x00000002
74#define PTRACE_O_TRACEVFORK 0x00000004
75#define PTRACE_O_TRACECLONE 0x00000008
76#define PTRACE_O_TRACEEXEC 0x00000010
77#define PTRACE_O_TRACEVFORKDONE 0x00000020
78#define PTRACE_O_TRACEEXIT 0x00000040
79
80/* Wait extended result codes for the above trace options. */
81#define PTRACE_EVENT_FORK 1
82#define PTRACE_EVENT_VFORK 2
83#define PTRACE_EVENT_CLONE 3
84#define PTRACE_EVENT_EXEC 4
85#define PTRACE_EVENT_VFORK_DONE 5
86#define PTRACE_EVENT_EXIT 6
87
88#endif /* PTRACE_EVENT_FORK */
89
90/* We can't always assume that this flag is available, but all systems
91 with the ptrace event handlers also have __WALL, so it's safe to use
92 in some contexts. */
93#ifndef __WALL
94#define __WALL 0x40000000 /* Wait for any child. */
95#endif
96
ec8ebe72
DE
97#ifndef W_STOPCODE
98#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
99#endif
100
42c81e2a
DJ
101#ifdef __UCLIBC__
102#if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
103#define HAS_NOMMU
104#endif
105#endif
106
24a09b5f
DJ
107/* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
108 representation of the thread ID.
611cb4a5 109
54a0b537 110 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
95954743
PA
111 the same as the LWP ID.
112
113 ``all_processes'' is keyed by the "overall process ID", which
114 GNU/Linux calls tgid, "thread group ID". */
0d62e5e8 115
54a0b537 116struct inferior_list all_lwps;
0d62e5e8 117
24a09b5f
DJ
118/* A list of all unknown processes which receive stop signals. Some other
119 process will presumably claim each of these as forked children
120 momentarily. */
121
122struct inferior_list stopped_pids;
123
0d62e5e8
DJ
124/* FIXME this is a bit of a hack, and could be removed. */
125int stopping_threads;
126
127/* FIXME make into a target method? */
24a09b5f 128int using_threads = 1;
24a09b5f 129
95954743
PA
130/* This flag is true iff we've just created or attached to our first
131 inferior but it has not stopped yet. As soon as it does, we need
132 to call the low target's arch_setup callback. Doing this only on
133 the first inferior avoids reinializing the architecture on every
134 inferior, and avoids messing with the register caches of the
135 already running inferiors. NOTE: this assumes all inferiors under
136 control of gdbserver have the same architecture. */
d61ddec4
UW
137static int new_inferior;
138
2acc282a 139static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 140 int step, int signal, siginfo_t *info);
2bd7c093 141static void linux_resume (struct thread_resume *resume_info, size_t n);
54a0b537 142static void stop_all_lwps (void);
95954743 143static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
95954743 144static void *add_lwp (ptid_t ptid);
c35fafde 145static int linux_stopped_by_watchpoint (void);
95954743 146static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
dc146f7c 147static int linux_core_of_thread (ptid_t ptid);
d50171e4
PA
148static void proceed_all_lwps (void);
149static void unstop_all_lwps (struct lwp_info *except);
d50171e4
PA
150static int finish_step_over (struct lwp_info *lwp);
151static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
152static int kill_lwp (unsigned long lwpid, int signo);
153
154/* True if the low target can hardware single-step. Such targets
155 don't need a BREAKPOINT_REINSERT_ADDR callback. */
156
157static int
158can_hardware_single_step (void)
159{
160 return (the_low_target.breakpoint_reinsert_addr == NULL);
161}
162
163/* True if the low target supports memory breakpoints. If so, we'll
164 have a GET_PC implementation. */
165
166static int
167supports_breakpoints (void)
168{
169 return (the_low_target.get_pc != NULL);
170}
0d62e5e8
DJ
171
172struct pending_signals
173{
174 int signal;
32ca6d61 175 siginfo_t info;
0d62e5e8
DJ
176 struct pending_signals *prev;
177};
611cb4a5 178
14ce3065
DE
179#define PTRACE_ARG3_TYPE void *
180#define PTRACE_ARG4_TYPE void *
c6ecbae5 181#define PTRACE_XFER_TYPE long
da6d8c04 182
58caa3dc 183#ifdef HAVE_LINUX_REGSETS
52fa2412
UW
184static char *disabled_regsets;
185static int num_regsets;
58caa3dc
DJ
186#endif
187
bd99dc85
PA
188/* The read/write ends of the pipe registered as waitable file in the
189 event loop. */
190static int linux_event_pipe[2] = { -1, -1 };
191
192/* True if we're currently in async mode. */
193#define target_is_async_p() (linux_event_pipe[0] != -1)
194
195static void send_sigstop (struct inferior_list_entry *entry);
196static void wait_for_sigstop (struct inferior_list_entry *entry);
197
d0722149
DE
198/* Accepts an integer PID; Returns a string representing a file that
199 can be opened to get info for the child process.
200 Space for the result is malloc'd, caller must free. */
201
202char *
203linux_child_pid_to_exec_file (int pid)
204{
205 char *name1, *name2;
206
207 name1 = xmalloc (MAXPATHLEN);
208 name2 = xmalloc (MAXPATHLEN);
209 memset (name2, 0, MAXPATHLEN);
210
211 sprintf (name1, "/proc/%d/exe", pid);
212 if (readlink (name1, name2, MAXPATHLEN) > 0)
213 {
214 free (name1);
215 return name2;
216 }
217 else
218 {
219 free (name2);
220 return name1;
221 }
222}
223
224/* Return non-zero if HEADER is a 64-bit ELF file. */
225
226static int
957f3f49 227elf_64_header_p (const Elf64_Ehdr *header)
d0722149
DE
228{
229 return (header->e_ident[EI_MAG0] == ELFMAG0
230 && header->e_ident[EI_MAG1] == ELFMAG1
231 && header->e_ident[EI_MAG2] == ELFMAG2
232 && header->e_ident[EI_MAG3] == ELFMAG3
233 && header->e_ident[EI_CLASS] == ELFCLASS64);
234}
235
236/* Return non-zero if FILE is a 64-bit ELF file,
237 zero if the file is not a 64-bit ELF file,
238 and -1 if the file is not accessible or doesn't exist. */
239
240int
241elf_64_file_p (const char *file)
242{
957f3f49 243 Elf64_Ehdr header;
d0722149
DE
244 int fd;
245
246 fd = open (file, O_RDONLY);
247 if (fd < 0)
248 return -1;
249
250 if (read (fd, &header, sizeof (header)) != sizeof (header))
251 {
252 close (fd);
253 return 0;
254 }
255 close (fd);
256
257 return elf_64_header_p (&header);
258}
259
bd99dc85
PA
260static void
261delete_lwp (struct lwp_info *lwp)
262{
263 remove_thread (get_lwp_thread (lwp));
264 remove_inferior (&all_lwps, &lwp->head);
aa5ca48f 265 free (lwp->arch_private);
bd99dc85
PA
266 free (lwp);
267}
268
95954743
PA
269/* Add a process to the common process list, and set its private
270 data. */
271
272static struct process_info *
273linux_add_process (int pid, int attached)
274{
275 struct process_info *proc;
276
277 /* Is this the first process? If so, then set the arch. */
278 if (all_processes.head == NULL)
279 new_inferior = 1;
280
281 proc = add_process (pid, attached);
282 proc->private = xcalloc (1, sizeof (*proc->private));
283
aa5ca48f
DE
284 if (the_low_target.new_process != NULL)
285 proc->private->arch_private = the_low_target.new_process ();
286
95954743
PA
287 return proc;
288}
289
07d4f67e
DE
290/* Wrapper function for waitpid which handles EINTR, and emulates
291 __WALL for systems where that is not available. */
292
293static int
294my_waitpid (int pid, int *status, int flags)
295{
296 int ret, out_errno;
297
298 if (debug_threads)
299 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
300
301 if (flags & __WALL)
302 {
303 sigset_t block_mask, org_mask, wake_mask;
304 int wnohang;
305
306 wnohang = (flags & WNOHANG) != 0;
307 flags &= ~(__WALL | __WCLONE);
308 flags |= WNOHANG;
309
310 /* Block all signals while here. This avoids knowing about
311 LinuxThread's signals. */
312 sigfillset (&block_mask);
313 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
314
315 /* ... except during the sigsuspend below. */
316 sigemptyset (&wake_mask);
317
318 while (1)
319 {
320 /* Since all signals are blocked, there's no need to check
321 for EINTR here. */
322 ret = waitpid (pid, status, flags);
323 out_errno = errno;
324
325 if (ret == -1 && out_errno != ECHILD)
326 break;
327 else if (ret > 0)
328 break;
329
330 if (flags & __WCLONE)
331 {
332 /* We've tried both flavors now. If WNOHANG is set,
333 there's nothing else to do, just bail out. */
334 if (wnohang)
335 break;
336
337 if (debug_threads)
338 fprintf (stderr, "blocking\n");
339
340 /* Block waiting for signals. */
341 sigsuspend (&wake_mask);
342 }
343
344 flags ^= __WCLONE;
345 }
346
347 sigprocmask (SIG_SETMASK, &org_mask, NULL);
348 }
349 else
350 {
351 do
352 ret = waitpid (pid, status, flags);
353 while (ret == -1 && errno == EINTR);
354 out_errno = errno;
355 }
356
357 if (debug_threads)
358 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
359 pid, flags, status ? *status : -1, ret);
360
361 errno = out_errno;
362 return ret;
363}
364
bd99dc85
PA
365/* Handle a GNU/Linux extended wait response. If we see a clone
366 event, we need to add the new LWP to our list (and not report the
367 trap to higher layers). */
0d62e5e8 368
24a09b5f 369static void
54a0b537 370handle_extended_wait (struct lwp_info *event_child, int wstat)
24a09b5f
DJ
371{
372 int event = wstat >> 16;
54a0b537 373 struct lwp_info *new_lwp;
24a09b5f
DJ
374
375 if (event == PTRACE_EVENT_CLONE)
376 {
95954743 377 ptid_t ptid;
24a09b5f 378 unsigned long new_pid;
836acd6d 379 int ret, status = W_STOPCODE (SIGSTOP);
24a09b5f 380
bd99dc85 381 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
24a09b5f
DJ
382
383 /* If we haven't already seen the new PID stop, wait for it now. */
384 if (! pull_pid_from_list (&stopped_pids, new_pid))
385 {
386 /* The new child has a pending SIGSTOP. We can't affect it until it
387 hits the SIGSTOP, but we're already attached. */
388
97438e3f 389 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
390
391 if (ret == -1)
392 perror_with_name ("waiting for new child");
393 else if (ret != new_pid)
394 warning ("wait returned unexpected PID %d", ret);
da5898ce 395 else if (!WIFSTOPPED (status))
24a09b5f
DJ
396 warning ("wait returned unexpected status 0x%x", status);
397 }
398
14ce3065 399 ptrace (PTRACE_SETOPTIONS, new_pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
24a09b5f 400
95954743
PA
401 ptid = ptid_build (pid_of (event_child), new_pid, 0);
402 new_lwp = (struct lwp_info *) add_lwp (ptid);
403 add_thread (ptid, new_lwp);
24a09b5f 404
e27d73f6
DE
405 /* Either we're going to immediately resume the new thread
406 or leave it stopped. linux_resume_one_lwp is a nop if it
407 thinks the thread is currently running, so set this first
408 before calling linux_resume_one_lwp. */
409 new_lwp->stopped = 1;
410
da5898ce
DJ
411 /* Normally we will get the pending SIGSTOP. But in some cases
412 we might get another signal delivered to the group first.
f21cc1a2 413 If we do get another signal, be sure not to lose it. */
da5898ce
DJ
414 if (WSTOPSIG (status) == SIGSTOP)
415 {
d50171e4
PA
416 if (stopping_threads)
417 new_lwp->stop_pc = get_stop_pc (new_lwp);
418 else
e27d73f6 419 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
da5898ce 420 }
24a09b5f 421 else
da5898ce 422 {
54a0b537 423 new_lwp->stop_expected = 1;
d50171e4 424
da5898ce
DJ
425 if (stopping_threads)
426 {
d50171e4 427 new_lwp->stop_pc = get_stop_pc (new_lwp);
54a0b537
PA
428 new_lwp->status_pending_p = 1;
429 new_lwp->status_pending = status;
da5898ce
DJ
430 }
431 else
432 /* Pass the signal on. This is what GDB does - except
433 shouldn't we really report it instead? */
e27d73f6 434 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
da5898ce 435 }
24a09b5f
DJ
436
437 /* Always resume the current thread. If we are stopping
438 threads, it will have a pending SIGSTOP; we may as well
439 collect it now. */
2acc282a 440 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
24a09b5f
DJ
441 }
442}
443
d50171e4
PA
444/* Return the PC as read from the regcache of LWP, without any
445 adjustment. */
446
447static CORE_ADDR
448get_pc (struct lwp_info *lwp)
449{
450 struct thread_info *saved_inferior;
451 struct regcache *regcache;
452 CORE_ADDR pc;
453
454 if (the_low_target.get_pc == NULL)
455 return 0;
456
457 saved_inferior = current_inferior;
458 current_inferior = get_lwp_thread (lwp);
459
460 regcache = get_thread_regcache (current_inferior, 1);
461 pc = (*the_low_target.get_pc) (regcache);
462
463 if (debug_threads)
464 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
465
466 current_inferior = saved_inferior;
467 return pc;
468}
469
470/* This function should only be called if LWP got a SIGTRAP.
0d62e5e8
DJ
471 The SIGTRAP could mean several things.
472
473 On i386, where decr_pc_after_break is non-zero:
474 If we were single-stepping this process using PTRACE_SINGLESTEP,
475 we will get only the one SIGTRAP (even if the instruction we
476 stepped over was a breakpoint). The value of $eip will be the
477 next instruction.
478 If we continue the process using PTRACE_CONT, we will get a
479 SIGTRAP when we hit a breakpoint. The value of $eip will be
480 the instruction after the breakpoint (i.e. needs to be
481 decremented). If we report the SIGTRAP to GDB, we must also
482 report the undecremented PC. If we cancel the SIGTRAP, we
483 must resume at the decremented PC.
484
485 (Presumably, not yet tested) On a non-decr_pc_after_break machine
486 with hardware or kernel single-step:
487 If we single-step over a breakpoint instruction, our PC will
488 point at the following instruction. If we continue and hit a
489 breakpoint instruction, our PC will point at the breakpoint
490 instruction. */
491
492static CORE_ADDR
d50171e4 493get_stop_pc (struct lwp_info *lwp)
0d62e5e8 494{
d50171e4
PA
495 CORE_ADDR stop_pc;
496
497 if (the_low_target.get_pc == NULL)
498 return 0;
0d62e5e8 499
d50171e4
PA
500 stop_pc = get_pc (lwp);
501
bdabb078
PA
502 if (WSTOPSIG (lwp->last_status) == SIGTRAP
503 && !lwp->stepping
504 && !lwp->stopped_by_watchpoint
505 && lwp->last_status >> 16 == 0)
47c0c975
DE
506 stop_pc -= the_low_target.decr_pc_after_break;
507
508 if (debug_threads)
509 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
510
511 return stop_pc;
0d62e5e8 512}
ce3a066d 513
0d62e5e8 514static void *
95954743 515add_lwp (ptid_t ptid)
611cb4a5 516{
54a0b537 517 struct lwp_info *lwp;
0d62e5e8 518
54a0b537
PA
519 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
520 memset (lwp, 0, sizeof (*lwp));
0d62e5e8 521
95954743 522 lwp->head.id = ptid;
0d62e5e8 523
aa5ca48f
DE
524 if (the_low_target.new_thread != NULL)
525 lwp->arch_private = the_low_target.new_thread ();
526
54a0b537 527 add_inferior_to_list (&all_lwps, &lwp->head);
0d62e5e8 528
54a0b537 529 return lwp;
0d62e5e8 530}
611cb4a5 531
da6d8c04
DJ
532/* Start an inferior process and returns its pid.
533 ALLARGS is a vector of program-name and args. */
534
ce3a066d
DJ
535static int
536linux_create_inferior (char *program, char **allargs)
da6d8c04 537{
a6dbe5df 538 struct lwp_info *new_lwp;
da6d8c04 539 int pid;
95954743 540 ptid_t ptid;
da6d8c04 541
42c81e2a 542#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
543 pid = vfork ();
544#else
da6d8c04 545 pid = fork ();
52fb6437 546#endif
da6d8c04
DJ
547 if (pid < 0)
548 perror_with_name ("fork");
549
550 if (pid == 0)
551 {
552 ptrace (PTRACE_TRACEME, 0, 0, 0);
553
60c3d7b0 554#ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 555 signal (__SIGRTMIN + 1, SIG_DFL);
60c3d7b0 556#endif
0d62e5e8 557
a9fa9f7d
DJ
558 setpgid (0, 0);
559
2b876972
DJ
560 execv (program, allargs);
561 if (errno == ENOENT)
562 execvp (program, allargs);
da6d8c04
DJ
563
564 fprintf (stderr, "Cannot exec %s: %s.\n", program,
d07c63e7 565 strerror (errno));
da6d8c04
DJ
566 fflush (stderr);
567 _exit (0177);
568 }
569
95954743
PA
570 linux_add_process (pid, 0);
571
572 ptid = ptid_build (pid, pid, 0);
573 new_lwp = add_lwp (ptid);
574 add_thread (ptid, new_lwp);
a6dbe5df 575 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 576
a9fa9f7d 577 return pid;
da6d8c04
DJ
578}
579
580/* Attach to an inferior process. */
581
95954743
PA
582static void
583linux_attach_lwp_1 (unsigned long lwpid, int initial)
da6d8c04 584{
95954743 585 ptid_t ptid;
54a0b537 586 struct lwp_info *new_lwp;
611cb4a5 587
95954743 588 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
da6d8c04 589 {
95954743 590 if (!initial)
2d717e4f
DJ
591 {
592 /* If we fail to attach to an LWP, just warn. */
95954743 593 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
2d717e4f
DJ
594 strerror (errno), errno);
595 fflush (stderr);
596 return;
597 }
598 else
599 /* If we fail to attach to a process, report an error. */
95954743 600 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
43d5792c 601 strerror (errno), errno);
da6d8c04
DJ
602 }
603
95954743
PA
604 if (initial)
605 /* NOTE/FIXME: This lwp might have not been the tgid. */
606 ptid = ptid_build (lwpid, lwpid, 0);
607 else
608 {
609 /* Note that extracting the pid from the current inferior is
610 safe, since we're always called in the context of the same
611 process as this new thread. */
612 int pid = pid_of (get_thread_lwp (current_inferior));
613 ptid = ptid_build (pid, lwpid, 0);
614 }
24a09b5f 615
95954743
PA
616 new_lwp = (struct lwp_info *) add_lwp (ptid);
617 add_thread (ptid, new_lwp);
0d62e5e8 618
a6dbe5df
PA
619 /* We need to wait for SIGSTOP before being able to make the next
620 ptrace call on this LWP. */
621 new_lwp->must_set_ptrace_flags = 1;
622
0d62e5e8 623 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
624 brings it to a halt.
625
626 There are several cases to consider here:
627
628 1) gdbserver has already attached to the process and is being notified
1b3f6016 629 of a new thread that is being created.
d50171e4
PA
630 In this case we should ignore that SIGSTOP and resume the
631 process. This is handled below by setting stop_expected = 1,
8336d594 632 and the fact that add_thread sets last_resume_kind ==
d50171e4 633 resume_continue.
0e21c1ec
DE
634
635 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
636 to it via attach_inferior.
637 In this case we want the process thread to stop.
d50171e4
PA
638 This is handled by having linux_attach set last_resume_kind ==
639 resume_stop after we return.
1b3f6016
PA
640 ??? If the process already has several threads we leave the other
641 threads running.
0e21c1ec
DE
642
643 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
644 existing threads.
645 In this case we want the thread to stop.
646 FIXME: This case is currently not properly handled.
647 We should wait for the SIGSTOP but don't. Things work apparently
648 because enough time passes between when we ptrace (ATTACH) and when
649 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
650
651 On the other hand, if we are currently trying to stop all threads, we
652 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 653 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
654 end of the list, and so the new thread has not yet reached
655 wait_for_sigstop (but will). */
d50171e4 656 new_lwp->stop_expected = 1;
0d62e5e8
DJ
657}
658
95954743
PA
659void
660linux_attach_lwp (unsigned long lwpid)
661{
662 linux_attach_lwp_1 (lwpid, 0);
663}
664
0d62e5e8 665int
a1928bad 666linux_attach (unsigned long pid)
0d62e5e8 667{
95954743 668 linux_attach_lwp_1 (pid, 1);
95954743 669 linux_add_process (pid, 1);
0d62e5e8 670
bd99dc85
PA
671 if (!non_stop)
672 {
8336d594
PA
673 struct thread_info *thread;
674
675 /* Don't ignore the initial SIGSTOP if we just attached to this
676 process. It will be collected by wait shortly. */
677 thread = find_thread_ptid (ptid_build (pid, pid, 0));
678 thread->last_resume_kind = resume_stop;
bd99dc85 679 }
0d62e5e8 680
95954743
PA
681 return 0;
682}
683
684struct counter
685{
686 int pid;
687 int count;
688};
689
690static int
691second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
692{
693 struct counter *counter = args;
694
695 if (ptid_get_pid (entry->id) == counter->pid)
696 {
697 if (++counter->count > 1)
698 return 1;
699 }
d61ddec4 700
da6d8c04
DJ
701 return 0;
702}
703
95954743
PA
704static int
705last_thread_of_process_p (struct thread_info *thread)
706{
707 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
708 int pid = ptid_get_pid (ptid);
709 struct counter counter = { pid , 0 };
da6d8c04 710
95954743
PA
711 return (find_inferior (&all_threads,
712 second_thread_of_pid_p, &counter) == NULL);
713}
714
715/* Kill the inferior lwp. */
716
717static int
718linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
da6d8c04 719{
0d62e5e8 720 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 721 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 722 int wstat;
95954743
PA
723 int pid = * (int *) args;
724
725 if (ptid_get_pid (entry->id) != pid)
726 return 0;
0d62e5e8 727
fd500816
DJ
728 /* We avoid killing the first thread here, because of a Linux kernel (at
729 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
730 the children get a chance to be reaped, it will remain a zombie
731 forever. */
95954743 732
12b42a12 733 if (lwpid_of (lwp) == pid)
95954743
PA
734 {
735 if (debug_threads)
736 fprintf (stderr, "lkop: is last of process %s\n",
737 target_pid_to_str (entry->id));
738 return 0;
739 }
fd500816 740
bd99dc85
PA
741 /* If we're killing a running inferior, make sure it is stopped
742 first, as PTRACE_KILL will not work otherwise. */
743 if (!lwp->stopped)
744 send_sigstop (&lwp->head);
745
0d62e5e8
DJ
746 do
747 {
bd99dc85 748 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
0d62e5e8
DJ
749
750 /* Make sure it died. The loop is most likely unnecessary. */
95954743 751 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
bd99dc85 752 } while (pid > 0 && WIFSTOPPED (wstat));
95954743
PA
753
754 return 0;
da6d8c04
DJ
755}
756
95954743
PA
757static int
758linux_kill (int pid)
0d62e5e8 759{
95954743 760 struct process_info *process;
54a0b537 761 struct lwp_info *lwp;
95954743 762 struct thread_info *thread;
fd500816 763 int wstat;
95954743 764 int lwpid;
fd500816 765
95954743
PA
766 process = find_process_pid (pid);
767 if (process == NULL)
768 return -1;
9d606399 769
95954743 770 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
fd500816 771
54a0b537 772 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 773 thread in the list, so do so now. */
95954743
PA
774 lwp = find_lwp_pid (pid_to_ptid (pid));
775 thread = get_lwp_thread (lwp);
bd99dc85
PA
776
777 if (debug_threads)
95954743
PA
778 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
779 lwpid_of (lwp), pid);
bd99dc85
PA
780
781 /* If we're killing a running inferior, make sure it is stopped
782 first, as PTRACE_KILL will not work otherwise. */
783 if (!lwp->stopped)
784 send_sigstop (&lwp->head);
785
fd500816
DJ
786 do
787 {
bd99dc85 788 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
fd500816
DJ
789
790 /* Make sure it died. The loop is most likely unnecessary. */
95954743
PA
791 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
792 } while (lwpid > 0 && WIFSTOPPED (wstat));
2d717e4f 793
bd99dc85 794 delete_lwp (lwp);
8336d594
PA
795
796 the_target->mourn (process);
95954743 797 return 0;
0d62e5e8
DJ
798}
799
95954743
PA
800static int
801linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
6ad8ae5c
DJ
802{
803 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 804 struct lwp_info *lwp = get_thread_lwp (thread);
95954743
PA
805 int pid = * (int *) args;
806
807 if (ptid_get_pid (entry->id) != pid)
808 return 0;
6ad8ae5c 809
bd99dc85
PA
810 /* If we're detaching from a running inferior, make sure it is
811 stopped first, as PTRACE_DETACH will not work otherwise. */
812 if (!lwp->stopped)
813 {
95954743 814 int lwpid = lwpid_of (lwp);
bd99dc85
PA
815
816 stopping_threads = 1;
817 send_sigstop (&lwp->head);
818
819 /* If this detects a new thread through a clone event, the new
820 thread is appended to the end of the lwp list, so we'll
821 eventually detach from it. */
822 wait_for_sigstop (&lwp->head);
823 stopping_threads = 0;
824
825 /* If LWP exits while we're trying to stop it, there's nothing
826 left to do. */
95954743 827 lwp = find_lwp_pid (pid_to_ptid (lwpid));
bd99dc85 828 if (lwp == NULL)
95954743 829 return 0;
bd99dc85
PA
830 }
831
ae13219e
DJ
832 /* If this process is stopped but is expecting a SIGSTOP, then make
833 sure we take care of that now. This isn't absolutely guaranteed
834 to collect the SIGSTOP, but is fairly likely to. */
54a0b537 835 if (lwp->stop_expected)
ae13219e 836 {
bd99dc85 837 int wstat;
ae13219e 838 /* Clear stop_expected, so that the SIGSTOP will be reported. */
54a0b537
PA
839 lwp->stop_expected = 0;
840 if (lwp->stopped)
2acc282a 841 linux_resume_one_lwp (lwp, 0, 0, NULL);
95954743 842 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
ae13219e
DJ
843 }
844
845 /* Flush any pending changes to the process's registers. */
846 regcache_invalidate_one ((struct inferior_list_entry *)
54a0b537 847 get_lwp_thread (lwp));
ae13219e
DJ
848
849 /* Finally, let it resume. */
bd99dc85
PA
850 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
851
852 delete_lwp (lwp);
95954743 853 return 0;
6ad8ae5c
DJ
854}
855
dd6953e1 856static int
95954743 857any_thread_of (struct inferior_list_entry *entry, void *args)
6ad8ae5c 858{
95954743
PA
859 int *pid_p = args;
860
861 if (ptid_get_pid (entry->id) == *pid_p)
862 return 1;
863
864 return 0;
865}
866
867static int
868linux_detach (int pid)
869{
870 struct process_info *process;
871
872 process = find_process_pid (pid);
873 if (process == NULL)
874 return -1;
875
ca5c370d 876#ifdef USE_THREAD_DB
8336d594 877 thread_db_detach (process);
ca5c370d
PA
878#endif
879
95954743
PA
880 current_inferior =
881 (struct thread_info *) find_inferior (&all_threads, any_thread_of, &pid);
882
ae13219e 883 delete_all_breakpoints ();
95954743 884 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
8336d594
PA
885
886 the_target->mourn (process);
dd6953e1 887 return 0;
6ad8ae5c
DJ
888}
889
8336d594
PA
890static void
891linux_mourn (struct process_info *process)
892{
893 struct process_info_private *priv;
894
895#ifdef USE_THREAD_DB
896 thread_db_mourn (process);
897#endif
898
899 /* Freeing all private data. */
900 priv = process->private;
901 free (priv->arch_private);
902 free (priv);
903 process->private = NULL;
904}
905
444d6139 906static void
95954743 907linux_join (int pid)
444d6139 908{
444d6139 909 int status, ret;
95954743 910 struct process_info *process;
bd99dc85 911
95954743
PA
912 process = find_process_pid (pid);
913 if (process == NULL)
914 return;
444d6139
PA
915
916 do {
95954743 917 ret = my_waitpid (pid, &status, 0);
444d6139
PA
918 if (WIFEXITED (status) || WIFSIGNALED (status))
919 break;
920 } while (ret != -1 || errno != ECHILD);
921}
922
6ad8ae5c 923/* Return nonzero if the given thread is still alive. */
0d62e5e8 924static int
95954743 925linux_thread_alive (ptid_t ptid)
0d62e5e8 926{
95954743
PA
927 struct lwp_info *lwp = find_lwp_pid (ptid);
928
929 /* We assume we always know if a thread exits. If a whole process
930 exited but we still haven't been able to report it to GDB, we'll
931 hold on to the last lwp of the dead process. */
932 if (lwp != NULL)
933 return !lwp->dead;
0d62e5e8
DJ
934 else
935 return 0;
936}
937
6bf5e0ba 938/* Return 1 if this lwp has an interesting status pending. */
611cb4a5 939static int
d50171e4 940status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
0d62e5e8 941{
54a0b537 942 struct lwp_info *lwp = (struct lwp_info *) entry;
95954743 943 ptid_t ptid = * (ptid_t *) arg;
d50171e4 944 struct thread_info *thread = get_lwp_thread (lwp);
95954743
PA
945
946 /* Check if we're only interested in events from a specific process
947 or its lwps. */
948 if (!ptid_equal (minus_one_ptid, ptid)
949 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
950 return 0;
0d62e5e8 951
d50171e4
PA
952 thread = get_lwp_thread (lwp);
953
954 /* If we got a `vCont;t', but we haven't reported a stop yet, do
955 report any status pending the LWP may have. */
8336d594 956 if (thread->last_resume_kind == resume_stop
d50171e4
PA
957 && thread->last_status.kind == TARGET_WAITKIND_STOPPED)
958 return 0;
0d62e5e8 959
d50171e4 960 return lwp->status_pending_p;
0d62e5e8
DJ
961}
962
95954743
PA
963static int
964same_lwp (struct inferior_list_entry *entry, void *data)
965{
966 ptid_t ptid = *(ptid_t *) data;
967 int lwp;
968
969 if (ptid_get_lwp (ptid) != 0)
970 lwp = ptid_get_lwp (ptid);
971 else
972 lwp = ptid_get_pid (ptid);
973
974 if (ptid_get_lwp (entry->id) == lwp)
975 return 1;
976
977 return 0;
978}
979
980struct lwp_info *
981find_lwp_pid (ptid_t ptid)
982{
983 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
984}
985
bd99dc85 986static struct lwp_info *
95954743 987linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
611cb4a5 988{
0d62e5e8 989 int ret;
95954743 990 int to_wait_for = -1;
bd99dc85 991 struct lwp_info *child = NULL;
0d62e5e8 992
bd99dc85 993 if (debug_threads)
95954743
PA
994 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
995
996 if (ptid_equal (ptid, minus_one_ptid))
997 to_wait_for = -1; /* any child */
998 else
999 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
0d62e5e8 1000
bd99dc85 1001 options |= __WALL;
0d62e5e8 1002
bd99dc85 1003retry:
0d62e5e8 1004
bd99dc85
PA
1005 ret = my_waitpid (to_wait_for, wstatp, options);
1006 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1007 return NULL;
1008 else if (ret == -1)
1009 perror_with_name ("waitpid");
0d62e5e8
DJ
1010
1011 if (debug_threads
1012 && (!WIFSTOPPED (*wstatp)
1013 || (WSTOPSIG (*wstatp) != 32
1014 && WSTOPSIG (*wstatp) != 33)))
1015 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1016
95954743 1017 child = find_lwp_pid (pid_to_ptid (ret));
0d62e5e8 1018
24a09b5f
DJ
1019 /* If we didn't find a process, one of two things presumably happened:
1020 - A process we started and then detached from has exited. Ignore it.
1021 - A process we are controlling has forked and the new child's stop
1022 was reported to us by the kernel. Save its PID. */
bd99dc85 1023 if (child == NULL && WIFSTOPPED (*wstatp))
24a09b5f
DJ
1024 {
1025 add_pid_to_list (&stopped_pids, ret);
1026 goto retry;
1027 }
bd99dc85 1028 else if (child == NULL)
24a09b5f
DJ
1029 goto retry;
1030
bd99dc85 1031 child->stopped = 1;
0d62e5e8 1032
bd99dc85 1033 child->last_status = *wstatp;
32ca6d61 1034
d61ddec4
UW
1035 /* Architecture-specific setup after inferior is running.
1036 This needs to happen after we have attached to the inferior
1037 and it is stopped for the first time, but before we access
1038 any inferior registers. */
1039 if (new_inferior)
1040 {
1041 the_low_target.arch_setup ();
52fa2412
UW
1042#ifdef HAVE_LINUX_REGSETS
1043 memset (disabled_regsets, 0, num_regsets);
1044#endif
d61ddec4
UW
1045 new_inferior = 0;
1046 }
1047
c3adc08c
PA
1048 /* Fetch the possibly triggered data watchpoint info and store it in
1049 CHILD.
1050
1051 On some archs, like x86, that use debug registers to set
1052 watchpoints, it's possible that the way to know which watched
1053 address trapped, is to check the register that is used to select
1054 which address to watch. Problem is, between setting the
1055 watchpoint and reading back which data address trapped, the user
1056 may change the set of watchpoints, and, as a consequence, GDB
1057 changes the debug registers in the inferior. To avoid reading
1058 back a stale stopped-data-address when that happens, we cache in
1059 LP the fact that a watchpoint trapped, and the corresponding data
1060 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1061 changes the debug registers meanwhile, we have the cached data we
1062 can rely on. */
1063
1064 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1065 {
1066 if (the_low_target.stopped_by_watchpoint == NULL)
1067 {
1068 child->stopped_by_watchpoint = 0;
1069 }
1070 else
1071 {
1072 struct thread_info *saved_inferior;
1073
1074 saved_inferior = current_inferior;
1075 current_inferior = get_lwp_thread (child);
1076
1077 child->stopped_by_watchpoint
1078 = the_low_target.stopped_by_watchpoint ();
1079
1080 if (child->stopped_by_watchpoint)
1081 {
1082 if (the_low_target.stopped_data_address != NULL)
1083 child->stopped_data_address
1084 = the_low_target.stopped_data_address ();
1085 else
1086 child->stopped_data_address = 0;
1087 }
1088
1089 current_inferior = saved_inferior;
1090 }
1091 }
1092
d50171e4
PA
1093 /* Store the STOP_PC, with adjustment applied. This depends on the
1094 architecture being defined already (so that CHILD has a valid
1095 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1096 not). */
1097 if (WIFSTOPPED (*wstatp))
1098 child->stop_pc = get_stop_pc (child);
1099
0d62e5e8 1100 if (debug_threads
47c0c975
DE
1101 && WIFSTOPPED (*wstatp)
1102 && the_low_target.get_pc != NULL)
0d62e5e8 1103 {
896c7fbb 1104 struct thread_info *saved_inferior = current_inferior;
bce522a2 1105 struct regcache *regcache;
47c0c975
DE
1106 CORE_ADDR pc;
1107
d50171e4 1108 current_inferior = get_lwp_thread (child);
bce522a2 1109 regcache = get_thread_regcache (current_inferior, 1);
442ea881 1110 pc = (*the_low_target.get_pc) (regcache);
47c0c975 1111 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
896c7fbb 1112 current_inferior = saved_inferior;
0d62e5e8 1113 }
bd99dc85
PA
1114
1115 return child;
0d62e5e8 1116}
611cb4a5 1117
219f2f23
PA
1118/* This function should only be called if the LWP got a SIGTRAP.
1119
1120 Handle any tracepoint steps or hits. Return true if a tracepoint
1121 event was handled, 0 otherwise. */
1122
1123static int
1124handle_tracepoints (struct lwp_info *lwp)
1125{
1126 struct thread_info *tinfo = get_lwp_thread (lwp);
1127 int tpoint_related_event = 0;
1128
1129 /* And we need to be sure that any all-threads-stopping doesn't try
1130 to move threads out of the jump pads, as it could deadlock the
1131 inferior (LWP could be in the jump pad, maybe even holding the
1132 lock.) */
1133
1134 /* Do any necessary step collect actions. */
1135 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1136
1137 /* See if we just hit a tracepoint and do its main collect
1138 actions. */
1139 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1140
1141 if (tpoint_related_event)
1142 {
1143 if (debug_threads)
1144 fprintf (stderr, "got a tracepoint event\n");
1145 return 1;
1146 }
1147
1148 return 0;
1149}
1150
d50171e4
PA
1151/* Arrange for a breakpoint to be hit again later. We don't keep the
1152 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1153 will handle the current event, eventually we will resume this LWP,
1154 and this breakpoint will trap again. */
1155
1156static int
1157cancel_breakpoint (struct lwp_info *lwp)
1158{
1159 struct thread_info *saved_inferior;
d50171e4
PA
1160
1161 /* There's nothing to do if we don't support breakpoints. */
1162 if (!supports_breakpoints ())
1163 return 0;
1164
d50171e4
PA
1165 /* breakpoint_at reads from current inferior. */
1166 saved_inferior = current_inferior;
1167 current_inferior = get_lwp_thread (lwp);
1168
1169 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1170 {
1171 if (debug_threads)
1172 fprintf (stderr,
1173 "CB: Push back breakpoint for %s\n",
fc7238bb 1174 target_pid_to_str (ptid_of (lwp)));
d50171e4
PA
1175
1176 /* Back up the PC if necessary. */
1177 if (the_low_target.decr_pc_after_break)
1178 {
1179 struct regcache *regcache
fc7238bb 1180 = get_thread_regcache (current_inferior, 1);
d50171e4
PA
1181 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1182 }
1183
1184 current_inferior = saved_inferior;
1185 return 1;
1186 }
1187 else
1188 {
1189 if (debug_threads)
1190 fprintf (stderr,
1191 "CB: No breakpoint found at %s for [%s]\n",
1192 paddress (lwp->stop_pc),
fc7238bb 1193 target_pid_to_str (ptid_of (lwp)));
d50171e4
PA
1194 }
1195
1196 current_inferior = saved_inferior;
1197 return 0;
1198}
1199
1200/* When the event-loop is doing a step-over, this points at the thread
1201 being stepped. */
1202ptid_t step_over_bkpt;
1203
bd99dc85
PA
1204/* Wait for an event from child PID. If PID is -1, wait for any
1205 child. Store the stop status through the status pointer WSTAT.
1206 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1207 event was found and OPTIONS contains WNOHANG. Return the PID of
1208 the stopped child otherwise. */
1209
0d62e5e8 1210static int
95954743 1211linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
0d62e5e8 1212{
d50171e4
PA
1213 struct lwp_info *event_child, *requested_child;
1214
d50171e4
PA
1215 event_child = NULL;
1216 requested_child = NULL;
0d62e5e8 1217
95954743 1218 /* Check for a lwp with a pending status. */
bd99dc85 1219
95954743
PA
1220 if (ptid_equal (ptid, minus_one_ptid)
1221 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
0d62e5e8 1222 {
54a0b537 1223 event_child = (struct lwp_info *)
d50171e4 1224 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
0d62e5e8 1225 if (debug_threads && event_child)
bd99dc85 1226 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
0d62e5e8
DJ
1227 }
1228 else
1229 {
95954743 1230 requested_child = find_lwp_pid (ptid);
d50171e4
PA
1231
1232 if (requested_child->status_pending_p)
bd99dc85 1233 event_child = requested_child;
0d62e5e8 1234 }
611cb4a5 1235
0d62e5e8
DJ
1236 if (event_child != NULL)
1237 {
bd99dc85
PA
1238 if (debug_threads)
1239 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1240 lwpid_of (event_child), event_child->status_pending);
1241 *wstat = event_child->status_pending;
1242 event_child->status_pending_p = 0;
1243 event_child->status_pending = 0;
1244 current_inferior = get_lwp_thread (event_child);
1245 return lwpid_of (event_child);
0d62e5e8
DJ
1246 }
1247
1248 /* We only enter this loop if no process has a pending wait status. Thus
1249 any action taken in response to a wait status inside this loop is
1250 responding as soon as we detect the status, not after any pending
1251 events. */
1252 while (1)
1253 {
6bf5e0ba 1254 event_child = linux_wait_for_lwp (ptid, wstat, options);
0d62e5e8 1255
bd99dc85 1256 if ((options & WNOHANG) && event_child == NULL)
d50171e4
PA
1257 {
1258 if (debug_threads)
1259 fprintf (stderr, "WNOHANG set, no event found\n");
1260 return 0;
1261 }
0d62e5e8
DJ
1262
1263 if (event_child == NULL)
1264 error ("event from unknown child");
611cb4a5 1265
bd99dc85 1266 current_inferior = get_lwp_thread (event_child);
0d62e5e8 1267
89be2091 1268 /* Check for thread exit. */
bd99dc85 1269 if (! WIFSTOPPED (*wstat))
0d62e5e8 1270 {
89be2091 1271 if (debug_threads)
95954743 1272 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
89be2091
DJ
1273
1274 /* If the last thread is exiting, just return. */
95954743 1275 if (last_thread_of_process_p (current_inferior))
bd99dc85
PA
1276 {
1277 if (debug_threads)
95954743
PA
1278 fprintf (stderr, "LWP %ld is last lwp of process\n",
1279 lwpid_of (event_child));
bd99dc85
PA
1280 return lwpid_of (event_child);
1281 }
89be2091 1282
bd99dc85
PA
1283 if (!non_stop)
1284 {
1285 current_inferior = (struct thread_info *) all_threads.head;
1286 if (debug_threads)
1287 fprintf (stderr, "Current inferior is now %ld\n",
1288 lwpid_of (get_thread_lwp (current_inferior)));
1289 }
1290 else
1291 {
1292 current_inferior = NULL;
1293 if (debug_threads)
1294 fprintf (stderr, "Current inferior is now <NULL>\n");
1295 }
89be2091
DJ
1296
1297 /* If we were waiting for this particular child to do something...
1298 well, it did something. */
bd99dc85 1299 if (requested_child != NULL)
d50171e4
PA
1300 {
1301 int lwpid = lwpid_of (event_child);
1302
1303 /* Cancel the step-over operation --- the thread that
1304 started it is gone. */
1305 if (finish_step_over (event_child))
1306 unstop_all_lwps (event_child);
1307 delete_lwp (event_child);
1308 return lwpid;
1309 }
1310
1311 delete_lwp (event_child);
89be2091
DJ
1312
1313 /* Wait for a more interesting event. */
1314 continue;
1315 }
1316
a6dbe5df
PA
1317 if (event_child->must_set_ptrace_flags)
1318 {
1319 ptrace (PTRACE_SETOPTIONS, lwpid_of (event_child),
14ce3065 1320 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
a6dbe5df
PA
1321 event_child->must_set_ptrace_flags = 0;
1322 }
1323
bd99dc85
PA
1324 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1325 && *wstat >> 16 != 0)
24a09b5f 1326 {
bd99dc85 1327 handle_extended_wait (event_child, *wstat);
24a09b5f
DJ
1328 continue;
1329 }
1330
89be2091
DJ
1331 /* If GDB is not interested in this signal, don't stop other
1332 threads, and don't report it to GDB. Just resume the
1333 inferior right away. We do this for threading-related
69f223ed
DJ
1334 signals as well as any that GDB specifically requested we
1335 ignore. But never ignore SIGSTOP if we sent it ourselves,
1336 and do not ignore signals when stepping - they may require
1337 special handling to skip the signal handler. */
89be2091
DJ
1338 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
1339 thread library? */
bd99dc85 1340 if (WIFSTOPPED (*wstat)
69f223ed 1341 && !event_child->stepping
24a09b5f 1342 && (
60c3d7b0 1343#if defined (USE_THREAD_DB) && defined (__SIGRTMIN)
cdbfd419 1344 (current_process ()->private->thread_db != NULL
bd99dc85
PA
1345 && (WSTOPSIG (*wstat) == __SIGRTMIN
1346 || WSTOPSIG (*wstat) == __SIGRTMIN + 1))
24a09b5f
DJ
1347 ||
1348#endif
bd99dc85 1349 (pass_signals[target_signal_from_host (WSTOPSIG (*wstat))]
d50171e4
PA
1350 && !(WSTOPSIG (*wstat) == SIGSTOP
1351 && event_child->stop_expected))))
89be2091
DJ
1352 {
1353 siginfo_t info, *info_p;
1354
1355 if (debug_threads)
24a09b5f 1356 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
bd99dc85 1357 WSTOPSIG (*wstat), lwpid_of (event_child));
89be2091 1358
bd99dc85 1359 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
89be2091
DJ
1360 info_p = &info;
1361 else
1362 info_p = NULL;
d50171e4 1363 linux_resume_one_lwp (event_child, event_child->stepping,
bd99dc85 1364 WSTOPSIG (*wstat), info_p);
89be2091 1365 continue;
0d62e5e8 1366 }
611cb4a5 1367
d50171e4
PA
1368 if (WIFSTOPPED (*wstat)
1369 && WSTOPSIG (*wstat) == SIGSTOP
1370 && event_child->stop_expected)
1371 {
1372 int should_stop;
1373
1374 if (debug_threads)
1375 fprintf (stderr, "Expected stop.\n");
1376 event_child->stop_expected = 0;
1377
8336d594 1378 should_stop = (current_inferior->last_resume_kind == resume_stop
d50171e4
PA
1379 || stopping_threads);
1380
1381 if (!should_stop)
1382 {
1383 linux_resume_one_lwp (event_child,
1384 event_child->stepping, 0, NULL);
1385 continue;
1386 }
1387 }
1388
bd99dc85 1389 return lwpid_of (event_child);
611cb4a5 1390 }
0d62e5e8 1391
611cb4a5
DJ
1392 /* NOTREACHED */
1393 return 0;
1394}
1395
95954743
PA
1396static int
1397linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1398{
1399 ptid_t wait_ptid;
1400
1401 if (ptid_is_pid (ptid))
1402 {
1403 /* A request to wait for a specific tgid. This is not possible
1404 with waitpid, so instead, we wait for any child, and leave
1405 children we're not interested in right now with a pending
1406 status to report later. */
1407 wait_ptid = minus_one_ptid;
1408 }
1409 else
1410 wait_ptid = ptid;
1411
1412 while (1)
1413 {
1414 int event_pid;
1415
1416 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1417
1418 if (event_pid > 0
1419 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1420 {
1421 struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1422
1423 if (! WIFSTOPPED (*wstat))
1424 mark_lwp_dead (event_child, *wstat);
1425 else
1426 {
1427 event_child->status_pending_p = 1;
1428 event_child->status_pending = *wstat;
1429 }
1430 }
1431 else
1432 return event_pid;
1433 }
1434}
1435
6bf5e0ba
PA
1436
1437/* Count the LWP's that have had events. */
1438
1439static int
1440count_events_callback (struct inferior_list_entry *entry, void *data)
1441{
1442 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1443 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1444 int *count = data;
1445
1446 gdb_assert (count != NULL);
1447
1448 /* Count only resumed LWPs that have a SIGTRAP event pending that
1449 should be reported to GDB. */
8336d594
PA
1450 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1451 && thread->last_resume_kind != resume_stop
6bf5e0ba
PA
1452 && lp->status_pending_p
1453 && WIFSTOPPED (lp->status_pending)
1454 && WSTOPSIG (lp->status_pending) == SIGTRAP
1455 && !breakpoint_inserted_here (lp->stop_pc))
1456 (*count)++;
1457
1458 return 0;
1459}
1460
1461/* Select the LWP (if any) that is currently being single-stepped. */
1462
1463static int
1464select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1465{
1466 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1467 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba 1468
8336d594
PA
1469 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1470 && thread->last_resume_kind == resume_step
6bf5e0ba
PA
1471 && lp->status_pending_p)
1472 return 1;
1473 else
1474 return 0;
1475}
1476
1477/* Select the Nth LWP that has had a SIGTRAP event that should be
1478 reported to GDB. */
1479
1480static int
1481select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1482{
1483 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1484 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1485 int *selector = data;
1486
1487 gdb_assert (selector != NULL);
1488
1489 /* Select only resumed LWPs that have a SIGTRAP event pending. */
8336d594
PA
1490 if (thread->last_resume_kind != resume_stop
1491 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
6bf5e0ba
PA
1492 && lp->status_pending_p
1493 && WIFSTOPPED (lp->status_pending)
1494 && WSTOPSIG (lp->status_pending) == SIGTRAP
1495 && !breakpoint_inserted_here (lp->stop_pc))
1496 if ((*selector)-- == 0)
1497 return 1;
1498
1499 return 0;
1500}
1501
1502static int
1503cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1504{
1505 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1506 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1507 struct lwp_info *event_lp = data;
1508
1509 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1510 if (lp == event_lp)
1511 return 0;
1512
1513 /* If a LWP other than the LWP that we're reporting an event for has
1514 hit a GDB breakpoint (as opposed to some random trap signal),
1515 then just arrange for it to hit it again later. We don't keep
1516 the SIGTRAP status and don't forward the SIGTRAP signal to the
1517 LWP. We will handle the current event, eventually we will resume
1518 all LWPs, and this one will get its breakpoint trap again.
1519
1520 If we do not do this, then we run the risk that the user will
1521 delete or disable the breakpoint, but the LWP will have already
1522 tripped on it. */
1523
8336d594
PA
1524 if (thread->last_resume_kind != resume_stop
1525 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
6bf5e0ba
PA
1526 && lp->status_pending_p
1527 && WIFSTOPPED (lp->status_pending)
1528 && WSTOPSIG (lp->status_pending) == SIGTRAP
bdabb078
PA
1529 && !lp->stepping
1530 && !lp->stopped_by_watchpoint
6bf5e0ba
PA
1531 && cancel_breakpoint (lp))
1532 /* Throw away the SIGTRAP. */
1533 lp->status_pending_p = 0;
1534
1535 return 0;
1536}
1537
1538/* Select one LWP out of those that have events pending. */
1539
1540static void
1541select_event_lwp (struct lwp_info **orig_lp)
1542{
1543 int num_events = 0;
1544 int random_selector;
1545 struct lwp_info *event_lp;
1546
1547 /* Give preference to any LWP that is being single-stepped. */
1548 event_lp
1549 = (struct lwp_info *) find_inferior (&all_lwps,
1550 select_singlestep_lwp_callback, NULL);
1551 if (event_lp != NULL)
1552 {
1553 if (debug_threads)
1554 fprintf (stderr,
1555 "SEL: Select single-step %s\n",
1556 target_pid_to_str (ptid_of (event_lp)));
1557 }
1558 else
1559 {
1560 /* No single-stepping LWP. Select one at random, out of those
1561 which have had SIGTRAP events. */
1562
1563 /* First see how many SIGTRAP events we have. */
1564 find_inferior (&all_lwps, count_events_callback, &num_events);
1565
1566 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1567 random_selector = (int)
1568 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1569
1570 if (debug_threads && num_events > 1)
1571 fprintf (stderr,
1572 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1573 num_events, random_selector);
1574
1575 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1576 select_event_lwp_callback,
1577 &random_selector);
1578 }
1579
1580 if (event_lp != NULL)
1581 {
1582 /* Switch the event LWP. */
1583 *orig_lp = event_lp;
1584 }
1585}
1586
d50171e4
PA
1587/* Set this inferior LWP's state as "want-stopped". We won't resume
1588 this LWP until the client gives us another action for it. */
1589
1590static void
1591gdb_wants_lwp_stopped (struct inferior_list_entry *entry)
1592{
1593 struct lwp_info *lwp = (struct lwp_info *) entry;
1594 struct thread_info *thread = get_lwp_thread (lwp);
1595
1596 /* Most threads are stopped implicitly (all-stop); tag that with
1597 signal 0. The thread being explicitly reported stopped to the
1598 client, gets it's status fixed up afterwards. */
1599 thread->last_status.kind = TARGET_WAITKIND_STOPPED;
1600 thread->last_status.value.sig = TARGET_SIGNAL_0;
1601
8336d594 1602 thread->last_resume_kind = resume_stop;
d50171e4
PA
1603}
1604
1605/* Set all LWP's states as "want-stopped". */
1606
1607static void
1608gdb_wants_all_stopped (void)
1609{
1610 for_each_inferior (&all_lwps, gdb_wants_lwp_stopped);
1611}
1612
0d62e5e8 1613/* Wait for process, returns status. */
da6d8c04 1614
95954743
PA
1615static ptid_t
1616linux_wait_1 (ptid_t ptid,
1617 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 1618{
e5f1222d 1619 int w;
fc7238bb 1620 struct lwp_info *event_child;
bd99dc85 1621 int options;
bd99dc85 1622 int pid;
6bf5e0ba
PA
1623 int step_over_finished;
1624 int bp_explains_trap;
1625 int maybe_internal_trap;
1626 int report_to_gdb;
219f2f23 1627 int trace_event;
bd99dc85
PA
1628
1629 /* Translate generic target options into linux options. */
1630 options = __WALL;
1631 if (target_options & TARGET_WNOHANG)
1632 options |= WNOHANG;
0d62e5e8
DJ
1633
1634retry:
bd99dc85
PA
1635 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1636
0d62e5e8
DJ
1637 /* If we were only supposed to resume one thread, only wait for
1638 that thread - if it's still alive. If it died, however - which
1639 can happen if we're coming from the thread death case below -
1640 then we need to make sure we restart the other threads. We could
1641 pick a thread at random or restart all; restarting all is less
1642 arbitrary. */
95954743
PA
1643 if (!non_stop
1644 && !ptid_equal (cont_thread, null_ptid)
1645 && !ptid_equal (cont_thread, minus_one_ptid))
0d62e5e8 1646 {
fc7238bb
PA
1647 struct thread_info *thread;
1648
bd99dc85
PA
1649 thread = (struct thread_info *) find_inferior_id (&all_threads,
1650 cont_thread);
0d62e5e8
DJ
1651
1652 /* No stepping, no signal - unless one is pending already, of course. */
bd99dc85 1653 if (thread == NULL)
64386c31
DJ
1654 {
1655 struct thread_resume resume_info;
95954743 1656 resume_info.thread = minus_one_ptid;
bd99dc85
PA
1657 resume_info.kind = resume_continue;
1658 resume_info.sig = 0;
2bd7c093 1659 linux_resume (&resume_info, 1);
64386c31 1660 }
bd99dc85 1661 else
95954743 1662 ptid = cont_thread;
0d62e5e8 1663 }
da6d8c04 1664
6bf5e0ba
PA
1665 if (ptid_equal (step_over_bkpt, null_ptid))
1666 pid = linux_wait_for_event (ptid, &w, options);
1667 else
1668 {
1669 if (debug_threads)
1670 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
1671 target_pid_to_str (step_over_bkpt));
1672 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
1673 }
1674
bd99dc85 1675 if (pid == 0) /* only if TARGET_WNOHANG */
95954743 1676 return null_ptid;
bd99dc85 1677
6bf5e0ba 1678 event_child = get_thread_lwp (current_inferior);
da6d8c04 1679
0d62e5e8
DJ
1680 /* If we are waiting for a particular child, and it exited,
1681 linux_wait_for_event will return its exit status. Similarly if
1682 the last child exited. If this is not the last child, however,
1683 do not report it as exited until there is a 'thread exited' response
1684 available in the remote protocol. Instead, just wait for another event.
1685 This should be safe, because if the thread crashed we will already
1686 have reported the termination signal to GDB; that should stop any
1687 in-progress stepping operations, etc.
1688
1689 Report the exit status of the last thread to exit. This matches
1690 LinuxThreads' behavior. */
1691
95954743 1692 if (last_thread_of_process_p (current_inferior))
da6d8c04 1693 {
bd99dc85 1694 if (WIFEXITED (w) || WIFSIGNALED (w))
0d62e5e8 1695 {
6bf5e0ba 1696 delete_lwp (event_child);
5b1c542e 1697
bd99dc85 1698 current_inferior = NULL;
5b1c542e 1699
bd99dc85
PA
1700 if (WIFEXITED (w))
1701 {
1702 ourstatus->kind = TARGET_WAITKIND_EXITED;
1703 ourstatus->value.integer = WEXITSTATUS (w);
1704
1705 if (debug_threads)
1706 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
1707 }
1708 else
1709 {
1710 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1711 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
1712
1713 if (debug_threads)
1714 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
1715
1716 }
5b1c542e 1717
95954743 1718 return pid_to_ptid (pid);
0d62e5e8 1719 }
da6d8c04 1720 }
0d62e5e8 1721 else
da6d8c04 1722 {
0d62e5e8
DJ
1723 if (!WIFSTOPPED (w))
1724 goto retry;
da6d8c04
DJ
1725 }
1726
6bf5e0ba
PA
1727 /* If this event was not handled before, and is not a SIGTRAP, we
1728 report it. SIGILL and SIGSEGV are also treated as traps in case
1729 a breakpoint is inserted at the current PC. If this target does
1730 not support internal breakpoints at all, we also report the
1731 SIGTRAP without further processing; it's of no concern to us. */
1732 maybe_internal_trap
1733 = (supports_breakpoints ()
1734 && (WSTOPSIG (w) == SIGTRAP
1735 || ((WSTOPSIG (w) == SIGILL
1736 || WSTOPSIG (w) == SIGSEGV)
1737 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
1738
1739 if (maybe_internal_trap)
1740 {
1741 /* Handle anything that requires bookkeeping before deciding to
1742 report the event or continue waiting. */
1743
1744 /* First check if we can explain the SIGTRAP with an internal
1745 breakpoint, or if we should possibly report the event to GDB.
1746 Do this before anything that may remove or insert a
1747 breakpoint. */
1748 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
1749
1750 /* We have a SIGTRAP, possibly a step-over dance has just
1751 finished. If so, tweak the state machine accordingly,
1752 reinsert breakpoints and delete any reinsert (software
1753 single-step) breakpoints. */
1754 step_over_finished = finish_step_over (event_child);
1755
1756 /* Now invoke the callbacks of any internal breakpoints there. */
1757 check_breakpoints (event_child->stop_pc);
1758
219f2f23
PA
1759 /* Handle tracepoint data collecting. This may overflow the
1760 trace buffer, and cause a tracing stop, removing
1761 breakpoints. */
1762 trace_event = handle_tracepoints (event_child);
1763
6bf5e0ba
PA
1764 if (bp_explains_trap)
1765 {
1766 /* If we stepped or ran into an internal breakpoint, we've
1767 already handled it. So next time we resume (from this
1768 PC), we should step over it. */
1769 if (debug_threads)
1770 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1771
8b07ae33
PA
1772 if (breakpoint_here (event_child->stop_pc))
1773 event_child->need_step_over = 1;
6bf5e0ba
PA
1774 }
1775 }
1776 else
1777 {
1778 /* We have some other signal, possibly a step-over dance was in
1779 progress, and it should be cancelled too. */
1780 step_over_finished = finish_step_over (event_child);
219f2f23
PA
1781
1782 trace_event = 0;
6bf5e0ba
PA
1783 }
1784
1785 /* We have all the data we need. Either report the event to GDB, or
1786 resume threads and keep waiting for more. */
1787
1788 /* Check If GDB would be interested in this event. If GDB wanted
1789 this thread to single step, we always want to report the SIGTRAP,
8b07ae33
PA
1790 and let GDB handle it. Watchpoints should always be reported.
1791 So should signals we can't explain. A SIGTRAP we can't explain
1792 could be a GDB breakpoint --- we may or not support Z0
1793 breakpoints. If we do, we're be able to handle GDB breakpoints
1794 on top of internal breakpoints, by handling the internal
1795 breakpoint and still reporting the event to GDB. If we don't,
1796 we're out of luck, GDB won't see the breakpoint hit. */
6bf5e0ba 1797 report_to_gdb = (!maybe_internal_trap
8336d594 1798 || current_inferior->last_resume_kind == resume_step
6bf5e0ba 1799 || event_child->stopped_by_watchpoint
219f2f23 1800 || (!step_over_finished && !bp_explains_trap && !trace_event)
8b07ae33 1801 || gdb_breakpoint_here (event_child->stop_pc));
6bf5e0ba
PA
1802
1803 /* We found no reason GDB would want us to stop. We either hit one
1804 of our own breakpoints, or finished an internal step GDB
1805 shouldn't know about. */
1806 if (!report_to_gdb)
1807 {
1808 if (debug_threads)
1809 {
1810 if (bp_explains_trap)
1811 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1812 if (step_over_finished)
1813 fprintf (stderr, "Step-over finished.\n");
219f2f23
PA
1814 if (trace_event)
1815 fprintf (stderr, "Tracepoint event.\n");
6bf5e0ba
PA
1816 }
1817
1818 /* We're not reporting this breakpoint to GDB, so apply the
1819 decr_pc_after_break adjustment to the inferior's regcache
1820 ourselves. */
1821
1822 if (the_low_target.set_pc != NULL)
1823 {
1824 struct regcache *regcache
1825 = get_thread_regcache (get_lwp_thread (event_child), 1);
1826 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
1827 }
1828
1829 /* We've finished stepping over a breakpoint. We've stopped all
1830 LWPs momentarily except the stepping one. This is where we
1831 resume them all again. We're going to keep waiting, so use
1832 proceed, which handles stepping over the next breakpoint. */
1833 if (debug_threads)
1834 fprintf (stderr, "proceeding all threads.\n");
1835 proceed_all_lwps ();
1836 goto retry;
1837 }
1838
1839 if (debug_threads)
1840 {
8336d594 1841 if (current_inferior->last_resume_kind == resume_step)
6bf5e0ba
PA
1842 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
1843 if (event_child->stopped_by_watchpoint)
1844 fprintf (stderr, "Stopped by watchpoint.\n");
8b07ae33
PA
1845 if (gdb_breakpoint_here (event_child->stop_pc))
1846 fprintf (stderr, "Stopped by GDB breakpoint.\n");
6bf5e0ba
PA
1847 if (debug_threads)
1848 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
1849 }
1850
1851 /* Alright, we're going to report a stop. */
1852
1853 if (!non_stop)
1854 {
1855 /* In all-stop, stop all threads. */
1856 stop_all_lwps ();
1857
1858 /* If we're not waiting for a specific LWP, choose an event LWP
1859 from among those that have had events. Giving equal priority
1860 to all LWPs that have had events helps prevent
1861 starvation. */
1862 if (ptid_equal (ptid, minus_one_ptid))
1863 {
1864 event_child->status_pending_p = 1;
1865 event_child->status_pending = w;
1866
1867 select_event_lwp (&event_child);
1868
1869 event_child->status_pending_p = 0;
1870 w = event_child->status_pending;
1871 }
1872
1873 /* Now that we've selected our final event LWP, cancel any
1874 breakpoints in other LWPs that have hit a GDB breakpoint.
1875 See the comment in cancel_breakpoints_callback to find out
1876 why. */
1877 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
1878 }
1879 else
1880 {
1881 /* If we just finished a step-over, then all threads had been
1882 momentarily paused. In all-stop, that's fine, we want
1883 threads stopped by now anyway. In non-stop, we need to
1884 re-resume threads that GDB wanted to be running. */
1885 if (step_over_finished)
1886 unstop_all_lwps (event_child);
1887 }
1888
5b1c542e 1889 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 1890
d50171e4
PA
1891 /* Do this before the gdb_wants_all_stopped calls below, since they
1892 always set last_resume_kind to resume_stop. */
8336d594
PA
1893 if (current_inferior->last_resume_kind == resume_stop
1894 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
1895 {
1896 /* A thread that has been requested to stop by GDB with vCont;t,
1897 and it stopped cleanly, so report as SIG0. The use of
1898 SIGSTOP is an implementation detail. */
1899 ourstatus->value.sig = TARGET_SIGNAL_0;
1900 }
8336d594
PA
1901 else if (current_inferior->last_resume_kind == resume_stop
1902 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
1903 {
1904 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 1905 but, it stopped for other reasons. */
bd99dc85
PA
1906 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1907 }
1908 else
1909 {
1910 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1911 }
1912
d50171e4
PA
1913 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
1914
1915 if (!non_stop)
1916 {
d50171e4
PA
1917 /* From GDB's perspective, all-stop mode always stops all
1918 threads implicitly. Tag all threads as "want-stopped". */
1919 gdb_wants_all_stopped ();
1920 }
1921 else
1922 {
1923 /* We're reporting this LWP as stopped. Update it's
1924 "want-stopped" state to what the client wants, until it gets
1925 a new resume action. */
6bf5e0ba 1926 gdb_wants_lwp_stopped (&event_child->head);
d50171e4
PA
1927 }
1928
bd99dc85 1929 if (debug_threads)
95954743 1930 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
6bf5e0ba 1931 target_pid_to_str (ptid_of (event_child)),
bd99dc85
PA
1932 ourstatus->kind,
1933 ourstatus->value.sig);
1934
6bf5e0ba
PA
1935 get_lwp_thread (event_child)->last_status = *ourstatus;
1936 return ptid_of (event_child);
bd99dc85
PA
1937}
1938
1939/* Get rid of any pending event in the pipe. */
1940static void
1941async_file_flush (void)
1942{
1943 int ret;
1944 char buf;
1945
1946 do
1947 ret = read (linux_event_pipe[0], &buf, 1);
1948 while (ret >= 0 || (ret == -1 && errno == EINTR));
1949}
1950
1951/* Put something in the pipe, so the event loop wakes up. */
1952static void
1953async_file_mark (void)
1954{
1955 int ret;
1956
1957 async_file_flush ();
1958
1959 do
1960 ret = write (linux_event_pipe[1], "+", 1);
1961 while (ret == 0 || (ret == -1 && errno == EINTR));
1962
1963 /* Ignore EAGAIN. If the pipe is full, the event loop will already
1964 be awakened anyway. */
1965}
1966
95954743
PA
1967static ptid_t
1968linux_wait (ptid_t ptid,
1969 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 1970{
95954743 1971 ptid_t event_ptid;
bd99dc85
PA
1972
1973 if (debug_threads)
95954743 1974 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
bd99dc85
PA
1975
1976 /* Flush the async file first. */
1977 if (target_is_async_p ())
1978 async_file_flush ();
1979
95954743 1980 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
bd99dc85
PA
1981
1982 /* If at least one stop was reported, there may be more. A single
1983 SIGCHLD can signal more than one child stop. */
1984 if (target_is_async_p ()
1985 && (target_options & TARGET_WNOHANG) != 0
95954743 1986 && !ptid_equal (event_ptid, null_ptid))
bd99dc85
PA
1987 async_file_mark ();
1988
1989 return event_ptid;
da6d8c04
DJ
1990}
1991
c5f62d5f 1992/* Send a signal to an LWP. */
fd500816
DJ
1993
1994static int
a1928bad 1995kill_lwp (unsigned long lwpid, int signo)
fd500816 1996{
c5f62d5f
DE
1997 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1998 fails, then we are not using nptl threads and we should be using kill. */
fd500816 1999
c5f62d5f
DE
2000#ifdef __NR_tkill
2001 {
2002 static int tkill_failed;
fd500816 2003
c5f62d5f
DE
2004 if (!tkill_failed)
2005 {
2006 int ret;
2007
2008 errno = 0;
2009 ret = syscall (__NR_tkill, lwpid, signo);
2010 if (errno != ENOSYS)
2011 return ret;
2012 tkill_failed = 1;
2013 }
2014 }
fd500816
DJ
2015#endif
2016
2017 return kill (lwpid, signo);
2018}
2019
0d62e5e8
DJ
2020static void
2021send_sigstop (struct inferior_list_entry *entry)
2022{
54a0b537 2023 struct lwp_info *lwp = (struct lwp_info *) entry;
bd99dc85 2024 int pid;
0d62e5e8 2025
54a0b537 2026 if (lwp->stopped)
0d62e5e8
DJ
2027 return;
2028
bd99dc85
PA
2029 pid = lwpid_of (lwp);
2030
0d62e5e8
DJ
2031 /* If we already have a pending stop signal for this process, don't
2032 send another. */
54a0b537 2033 if (lwp->stop_expected)
0d62e5e8 2034 {
ae13219e 2035 if (debug_threads)
bd99dc85 2036 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
ae13219e 2037
0d62e5e8
DJ
2038 return;
2039 }
2040
2041 if (debug_threads)
bd99dc85 2042 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
0d62e5e8 2043
d50171e4 2044 lwp->stop_expected = 1;
bd99dc85 2045 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
2046}
2047
95954743
PA
2048static void
2049mark_lwp_dead (struct lwp_info *lwp, int wstat)
2050{
2051 /* It's dead, really. */
2052 lwp->dead = 1;
2053
2054 /* Store the exit status for later. */
2055 lwp->status_pending_p = 1;
2056 lwp->status_pending = wstat;
2057
95954743
PA
2058 /* Prevent trying to stop it. */
2059 lwp->stopped = 1;
2060
2061 /* No further stops are expected from a dead lwp. */
2062 lwp->stop_expected = 0;
2063}
2064
0d62e5e8
DJ
2065static void
2066wait_for_sigstop (struct inferior_list_entry *entry)
2067{
54a0b537 2068 struct lwp_info *lwp = (struct lwp_info *) entry;
bd99dc85 2069 struct thread_info *saved_inferior;
a1928bad 2070 int wstat;
95954743
PA
2071 ptid_t saved_tid;
2072 ptid_t ptid;
d50171e4 2073 int pid;
0d62e5e8 2074
54a0b537 2075 if (lwp->stopped)
d50171e4
PA
2076 {
2077 if (debug_threads)
2078 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2079 lwpid_of (lwp));
2080 return;
2081 }
0d62e5e8
DJ
2082
2083 saved_inferior = current_inferior;
bd99dc85
PA
2084 if (saved_inferior != NULL)
2085 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2086 else
95954743 2087 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 2088
95954743 2089 ptid = lwp->head.id;
bd99dc85 2090
d50171e4
PA
2091 if (debug_threads)
2092 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2093
2094 pid = linux_wait_for_event (ptid, &wstat, __WALL);
0d62e5e8
DJ
2095
2096 /* If we stopped with a non-SIGSTOP signal, save it for later
2097 and record the pending SIGSTOP. If the process exited, just
2098 return. */
d50171e4 2099 if (WIFSTOPPED (wstat))
0d62e5e8
DJ
2100 {
2101 if (debug_threads)
d50171e4
PA
2102 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2103 lwpid_of (lwp), WSTOPSIG (wstat));
c35fafde 2104
d50171e4 2105 if (WSTOPSIG (wstat) != SIGSTOP)
c35fafde
PA
2106 {
2107 if (debug_threads)
d50171e4
PA
2108 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2109 lwpid_of (lwp), wstat);
2110
c35fafde
PA
2111 lwp->status_pending_p = 1;
2112 lwp->status_pending = wstat;
2113 }
0d62e5e8 2114 }
d50171e4 2115 else
95954743
PA
2116 {
2117 if (debug_threads)
d50171e4 2118 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
95954743 2119
d50171e4
PA
2120 lwp = find_lwp_pid (pid_to_ptid (pid));
2121 if (lwp)
2122 {
2123 /* Leave this status pending for the next time we're able to
2124 report it. In the mean time, we'll report this lwp as
2125 dead to GDB, so GDB doesn't try to read registers and
2126 memory from it. This can only happen if this was the
2127 last thread of the process; otherwise, PID is removed
2128 from the thread tables before linux_wait_for_event
2129 returns. */
2130 mark_lwp_dead (lwp, wstat);
2131 }
95954743 2132 }
0d62e5e8 2133
bd99dc85 2134 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
0d62e5e8
DJ
2135 current_inferior = saved_inferior;
2136 else
2137 {
2138 if (debug_threads)
2139 fprintf (stderr, "Previously current thread died.\n");
2140
bd99dc85
PA
2141 if (non_stop)
2142 {
2143 /* We can't change the current inferior behind GDB's back,
2144 otherwise, a subsequent command may apply to the wrong
2145 process. */
2146 current_inferior = NULL;
2147 }
2148 else
2149 {
2150 /* Set a valid thread as current. */
2151 set_desired_inferior (0);
2152 }
0d62e5e8
DJ
2153 }
2154}
2155
2156static void
54a0b537 2157stop_all_lwps (void)
0d62e5e8
DJ
2158{
2159 stopping_threads = 1;
54a0b537
PA
2160 for_each_inferior (&all_lwps, send_sigstop);
2161 for_each_inferior (&all_lwps, wait_for_sigstop);
0d62e5e8
DJ
2162 stopping_threads = 0;
2163}
2164
da6d8c04
DJ
2165/* Resume execution of the inferior process.
2166 If STEP is nonzero, single-step it.
2167 If SIGNAL is nonzero, give it that signal. */
2168
ce3a066d 2169static void
2acc282a 2170linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 2171 int step, int signal, siginfo_t *info)
da6d8c04 2172{
0d62e5e8
DJ
2173 struct thread_info *saved_inferior;
2174
54a0b537 2175 if (lwp->stopped == 0)
0d62e5e8
DJ
2176 return;
2177
219f2f23
PA
2178 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2179 user used the "jump" command, or "set $pc = foo"). */
2180 if (lwp->stop_pc != get_pc (lwp))
2181 {
2182 /* Collecting 'while-stepping' actions doesn't make sense
2183 anymore. */
2184 release_while_stepping_state_list (get_lwp_thread (lwp));
2185 }
2186
0d62e5e8
DJ
2187 /* If we have pending signals or status, and a new signal, enqueue the
2188 signal. Also enqueue the signal if we are waiting to reinsert a
2189 breakpoint; it will be picked up again below. */
2190 if (signal != 0
54a0b537
PA
2191 && (lwp->status_pending_p || lwp->pending_signals != NULL
2192 || lwp->bp_reinsert != 0))
0d62e5e8
DJ
2193 {
2194 struct pending_signals *p_sig;
bca929d3 2195 p_sig = xmalloc (sizeof (*p_sig));
54a0b537 2196 p_sig->prev = lwp->pending_signals;
0d62e5e8 2197 p_sig->signal = signal;
32ca6d61
DJ
2198 if (info == NULL)
2199 memset (&p_sig->info, 0, sizeof (siginfo_t));
2200 else
2201 memcpy (&p_sig->info, info, sizeof (siginfo_t));
54a0b537 2202 lwp->pending_signals = p_sig;
0d62e5e8
DJ
2203 }
2204
d50171e4
PA
2205 if (lwp->status_pending_p)
2206 {
2207 if (debug_threads)
2208 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2209 " has pending status\n",
2210 lwpid_of (lwp), step ? "step" : "continue", signal,
2211 lwp->stop_expected ? "expected" : "not expected");
2212 return;
2213 }
0d62e5e8
DJ
2214
2215 saved_inferior = current_inferior;
54a0b537 2216 current_inferior = get_lwp_thread (lwp);
0d62e5e8
DJ
2217
2218 if (debug_threads)
1b3f6016 2219 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
bd99dc85 2220 lwpid_of (lwp), step ? "step" : "continue", signal,
54a0b537 2221 lwp->stop_expected ? "expected" : "not expected");
0d62e5e8
DJ
2222
2223 /* This bit needs some thinking about. If we get a signal that
2224 we must report while a single-step reinsert is still pending,
2225 we often end up resuming the thread. It might be better to
2226 (ew) allow a stack of pending events; then we could be sure that
2227 the reinsert happened right away and not lose any signals.
2228
2229 Making this stack would also shrink the window in which breakpoints are
54a0b537 2230 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
2231 complete correctness, so it won't solve that problem. It may be
2232 worthwhile just to solve this one, however. */
54a0b537 2233 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
2234 {
2235 if (debug_threads)
d50171e4
PA
2236 fprintf (stderr, " pending reinsert at 0x%s\n",
2237 paddress (lwp->bp_reinsert));
2238
2239 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2240 {
2241 if (step == 0)
2242 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2243
2244 step = 1;
2245 }
0d62e5e8
DJ
2246
2247 /* Postpone any pending signal. It was enqueued above. */
2248 signal = 0;
2249 }
2250
219f2f23
PA
2251 /* If we have while-stepping actions in this thread set it stepping.
2252 If we have a signal to deliver, it may or may not be set to
2253 SIG_IGN, we don't know. Assume so, and allow collecting
2254 while-stepping into a signal handler. A possible smart thing to
2255 do would be to set an internal breakpoint at the signal return
2256 address, continue, and carry on catching this while-stepping
2257 action only when that breakpoint is hit. A future
2258 enhancement. */
2259 if (get_lwp_thread (lwp)->while_stepping != NULL
2260 && can_hardware_single_step ())
2261 {
2262 if (debug_threads)
2263 fprintf (stderr,
2264 "lwp %ld has a while-stepping action -> forcing step.\n",
2265 lwpid_of (lwp));
2266 step = 1;
2267 }
2268
aa691b87 2269 if (debug_threads && the_low_target.get_pc != NULL)
0d62e5e8 2270 {
442ea881
PA
2271 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
2272 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
47c0c975 2273 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
0d62e5e8
DJ
2274 }
2275
2276 /* If we have pending signals, consume one unless we are trying to reinsert
2277 a breakpoint. */
54a0b537 2278 if (lwp->pending_signals != NULL && lwp->bp_reinsert == 0)
0d62e5e8
DJ
2279 {
2280 struct pending_signals **p_sig;
2281
54a0b537 2282 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
2283 while ((*p_sig)->prev != NULL)
2284 p_sig = &(*p_sig)->prev;
2285
2286 signal = (*p_sig)->signal;
32ca6d61 2287 if ((*p_sig)->info.si_signo != 0)
bd99dc85 2288 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
32ca6d61 2289
0d62e5e8
DJ
2290 free (*p_sig);
2291 *p_sig = NULL;
2292 }
2293
aa5ca48f
DE
2294 if (the_low_target.prepare_to_resume != NULL)
2295 the_low_target.prepare_to_resume (lwp);
2296
0d62e5e8 2297 regcache_invalidate_one ((struct inferior_list_entry *)
54a0b537 2298 get_lwp_thread (lwp));
da6d8c04 2299 errno = 0;
54a0b537 2300 lwp->stopped = 0;
c3adc08c 2301 lwp->stopped_by_watchpoint = 0;
54a0b537 2302 lwp->stepping = step;
14ce3065
DE
2303 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
2304 /* Coerce to a uintptr_t first to avoid potential gcc warning
2305 of coercing an 8 byte integer to a 4 byte pointer. */
2306 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
0d62e5e8
DJ
2307
2308 current_inferior = saved_inferior;
da6d8c04 2309 if (errno)
3221518c
UW
2310 {
2311 /* ESRCH from ptrace either means that the thread was already
2312 running (an error) or that it is gone (a race condition). If
2313 it's gone, we will get a notification the next time we wait,
2314 so we can ignore the error. We could differentiate these
2315 two, but it's tricky without waiting; the thread still exists
2316 as a zombie, so sending it signal 0 would succeed. So just
2317 ignore ESRCH. */
2318 if (errno == ESRCH)
2319 return;
2320
2321 perror_with_name ("ptrace");
2322 }
da6d8c04
DJ
2323}
2324
2bd7c093
PA
2325struct thread_resume_array
2326{
2327 struct thread_resume *resume;
2328 size_t n;
2329};
64386c31
DJ
2330
2331/* This function is called once per thread. We look up the thread
5544ad89
DJ
2332 in RESUME_PTR, and mark the thread with a pointer to the appropriate
2333 resume request.
2334
2335 This algorithm is O(threads * resume elements), but resume elements
2336 is small (and will remain small at least until GDB supports thread
2337 suspension). */
2bd7c093
PA
2338static int
2339linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
0d62e5e8 2340{
54a0b537 2341 struct lwp_info *lwp;
64386c31 2342 struct thread_info *thread;
5544ad89 2343 int ndx;
2bd7c093 2344 struct thread_resume_array *r;
64386c31
DJ
2345
2346 thread = (struct thread_info *) entry;
54a0b537 2347 lwp = get_thread_lwp (thread);
2bd7c093 2348 r = arg;
64386c31 2349
2bd7c093 2350 for (ndx = 0; ndx < r->n; ndx++)
95954743
PA
2351 {
2352 ptid_t ptid = r->resume[ndx].thread;
2353 if (ptid_equal (ptid, minus_one_ptid)
2354 || ptid_equal (ptid, entry->id)
2355 || (ptid_is_pid (ptid)
2356 && (ptid_get_pid (ptid) == pid_of (lwp)))
2357 || (ptid_get_lwp (ptid) == -1
2358 && (ptid_get_pid (ptid) == pid_of (lwp))))
2359 {
d50171e4 2360 if (r->resume[ndx].kind == resume_stop
8336d594 2361 && thread->last_resume_kind == resume_stop)
d50171e4
PA
2362 {
2363 if (debug_threads)
2364 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
2365 thread->last_status.kind == TARGET_WAITKIND_STOPPED
2366 ? "stopped"
2367 : "stopping",
2368 lwpid_of (lwp));
2369
2370 continue;
2371 }
2372
95954743 2373 lwp->resume = &r->resume[ndx];
8336d594 2374 thread->last_resume_kind = lwp->resume->kind;
95954743
PA
2375 return 0;
2376 }
2377 }
2bd7c093
PA
2378
2379 /* No resume action for this thread. */
2380 lwp->resume = NULL;
64386c31 2381
2bd7c093 2382 return 0;
5544ad89
DJ
2383}
2384
5544ad89 2385
bd99dc85
PA
2386/* Set *FLAG_P if this lwp has an interesting status pending. */
2387static int
2388resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
5544ad89 2389{
bd99dc85 2390 struct lwp_info *lwp = (struct lwp_info *) entry;
5544ad89 2391
bd99dc85
PA
2392 /* LWPs which will not be resumed are not interesting, because
2393 we might not wait for them next time through linux_wait. */
2bd7c093 2394 if (lwp->resume == NULL)
bd99dc85 2395 return 0;
64386c31 2396
bd99dc85 2397 if (lwp->status_pending_p)
d50171e4
PA
2398 * (int *) flag_p = 1;
2399
2400 return 0;
2401}
2402
2403/* Return 1 if this lwp that GDB wants running is stopped at an
2404 internal breakpoint that we need to step over. It assumes that any
2405 required STOP_PC adjustment has already been propagated to the
2406 inferior's regcache. */
2407
2408static int
2409need_step_over_p (struct inferior_list_entry *entry, void *dummy)
2410{
2411 struct lwp_info *lwp = (struct lwp_info *) entry;
8336d594 2412 struct thread_info *thread;
d50171e4
PA
2413 struct thread_info *saved_inferior;
2414 CORE_ADDR pc;
2415
2416 /* LWPs which will not be resumed are not interesting, because we
2417 might not wait for them next time through linux_wait. */
2418
2419 if (!lwp->stopped)
2420 {
2421 if (debug_threads)
2422 fprintf (stderr,
2423 "Need step over [LWP %ld]? Ignoring, not stopped\n",
2424 lwpid_of (lwp));
2425 return 0;
2426 }
2427
8336d594
PA
2428 thread = get_lwp_thread (lwp);
2429
2430 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
2431 {
2432 if (debug_threads)
2433 fprintf (stderr,
2434 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
2435 lwpid_of (lwp));
2436 return 0;
2437 }
2438
2439 if (!lwp->need_step_over)
2440 {
2441 if (debug_threads)
2442 fprintf (stderr,
2443 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
2444 }
5544ad89 2445
bd99dc85 2446 if (lwp->status_pending_p)
d50171e4
PA
2447 {
2448 if (debug_threads)
2449 fprintf (stderr,
2450 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
2451 lwpid_of (lwp));
2452 return 0;
2453 }
2454
2455 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
2456 or we have. */
2457 pc = get_pc (lwp);
2458
2459 /* If the PC has changed since we stopped, then don't do anything,
2460 and let the breakpoint/tracepoint be hit. This happens if, for
2461 instance, GDB handled the decr_pc_after_break subtraction itself,
2462 GDB is OOL stepping this thread, or the user has issued a "jump"
2463 command, or poked thread's registers herself. */
2464 if (pc != lwp->stop_pc)
2465 {
2466 if (debug_threads)
2467 fprintf (stderr,
2468 "Need step over [LWP %ld]? Cancelling, PC was changed. "
2469 "Old stop_pc was 0x%s, PC is now 0x%s\n",
2470 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
2471
2472 lwp->need_step_over = 0;
2473 return 0;
2474 }
2475
2476 saved_inferior = current_inferior;
8336d594 2477 current_inferior = thread;
d50171e4 2478
8b07ae33 2479 /* We can only step over breakpoints we know about. */
d50171e4
PA
2480 if (breakpoint_here (pc))
2481 {
8b07ae33
PA
2482 /* Don't step over a breakpoint that GDB expects to hit
2483 though. */
2484 if (gdb_breakpoint_here (pc))
2485 {
2486 if (debug_threads)
2487 fprintf (stderr,
2488 "Need step over [LWP %ld]? yes, but found"
2489 " GDB breakpoint at 0x%s; skipping step over\n",
2490 lwpid_of (lwp), paddress (pc));
d50171e4 2491
8b07ae33
PA
2492 current_inferior = saved_inferior;
2493 return 0;
2494 }
2495 else
2496 {
2497 if (debug_threads)
2498 fprintf (stderr,
2499 "Need step over [LWP %ld]? yes, found breakpoint at 0x%s\n",
2500 lwpid_of (lwp), paddress (pc));
d50171e4 2501
8b07ae33
PA
2502 /* We've found an lwp that needs stepping over --- return 1 so
2503 that find_inferior stops looking. */
2504 current_inferior = saved_inferior;
2505
2506 /* If the step over is cancelled, this is set again. */
2507 lwp->need_step_over = 0;
2508 return 1;
2509 }
d50171e4
PA
2510 }
2511
2512 current_inferior = saved_inferior;
2513
2514 if (debug_threads)
2515 fprintf (stderr,
2516 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
2517 lwpid_of (lwp), paddress (pc));
c6ecbae5 2518
bd99dc85 2519 return 0;
5544ad89
DJ
2520}
2521
d50171e4
PA
2522/* Start a step-over operation on LWP. When LWP stopped at a
2523 breakpoint, to make progress, we need to remove the breakpoint out
2524 of the way. If we let other threads run while we do that, they may
2525 pass by the breakpoint location and miss hitting it. To avoid
2526 that, a step-over momentarily stops all threads while LWP is
2527 single-stepped while the breakpoint is temporarily uninserted from
2528 the inferior. When the single-step finishes, we reinsert the
2529 breakpoint, and let all threads that are supposed to be running,
2530 run again.
2531
2532 On targets that don't support hardware single-step, we don't
2533 currently support full software single-stepping. Instead, we only
2534 support stepping over the thread event breakpoint, by asking the
2535 low target where to place a reinsert breakpoint. Since this
2536 routine assumes the breakpoint being stepped over is a thread event
2537 breakpoint, it usually assumes the return address of the current
2538 function is a good enough place to set the reinsert breakpoint. */
2539
2540static int
2541start_step_over (struct lwp_info *lwp)
2542{
2543 struct thread_info *saved_inferior;
2544 CORE_ADDR pc;
2545 int step;
2546
2547 if (debug_threads)
2548 fprintf (stderr,
2549 "Starting step-over on LWP %ld. Stopping all threads\n",
2550 lwpid_of (lwp));
2551
2552 stop_all_lwps ();
2553
2554 if (debug_threads)
2555 fprintf (stderr, "Done stopping all threads for step-over.\n");
2556
2557 /* Note, we should always reach here with an already adjusted PC,
2558 either by GDB (if we're resuming due to GDB's request), or by our
2559 caller, if we just finished handling an internal breakpoint GDB
2560 shouldn't care about. */
2561 pc = get_pc (lwp);
2562
2563 saved_inferior = current_inferior;
2564 current_inferior = get_lwp_thread (lwp);
2565
2566 lwp->bp_reinsert = pc;
2567 uninsert_breakpoints_at (pc);
2568
2569 if (can_hardware_single_step ())
2570 {
2571 step = 1;
2572 }
2573 else
2574 {
2575 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
2576 set_reinsert_breakpoint (raddr);
2577 step = 0;
2578 }
2579
2580 current_inferior = saved_inferior;
2581
2582 linux_resume_one_lwp (lwp, step, 0, NULL);
2583
2584 /* Require next event from this LWP. */
2585 step_over_bkpt = lwp->head.id;
2586 return 1;
2587}
2588
2589/* Finish a step-over. Reinsert the breakpoint we had uninserted in
2590 start_step_over, if still there, and delete any reinsert
2591 breakpoints we've set, on non hardware single-step targets. */
2592
2593static int
2594finish_step_over (struct lwp_info *lwp)
2595{
2596 if (lwp->bp_reinsert != 0)
2597 {
2598 if (debug_threads)
2599 fprintf (stderr, "Finished step over.\n");
2600
2601 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
2602 may be no breakpoint to reinsert there by now. */
2603 reinsert_breakpoints_at (lwp->bp_reinsert);
2604
2605 lwp->bp_reinsert = 0;
2606
2607 /* Delete any software-single-step reinsert breakpoints. No
2608 longer needed. We don't have to worry about other threads
2609 hitting this trap, and later not being able to explain it,
2610 because we were stepping over a breakpoint, and we hold all
2611 threads but LWP stopped while doing that. */
2612 if (!can_hardware_single_step ())
2613 delete_reinsert_breakpoints ();
2614
2615 step_over_bkpt = null_ptid;
2616 return 1;
2617 }
2618 else
2619 return 0;
2620}
2621
5544ad89
DJ
2622/* This function is called once per thread. We check the thread's resume
2623 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 2624 stopped; and what signal, if any, it should be sent.
5544ad89 2625
bd99dc85
PA
2626 For threads which we aren't explicitly told otherwise, we preserve
2627 the stepping flag; this is used for stepping over gdbserver-placed
2628 breakpoints.
2629
2630 If pending_flags was set in any thread, we queue any needed
2631 signals, since we won't actually resume. We already have a pending
2632 event to report, so we don't need to preserve any step requests;
2633 they should be re-issued if necessary. */
2634
2635static int
2636linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5544ad89 2637{
54a0b537 2638 struct lwp_info *lwp;
5544ad89 2639 struct thread_info *thread;
bd99dc85 2640 int step;
d50171e4
PA
2641 int leave_all_stopped = * (int *) arg;
2642 int leave_pending;
5544ad89
DJ
2643
2644 thread = (struct thread_info *) entry;
54a0b537 2645 lwp = get_thread_lwp (thread);
5544ad89 2646
2bd7c093 2647 if (lwp->resume == NULL)
bd99dc85 2648 return 0;
5544ad89 2649
bd99dc85 2650 if (lwp->resume->kind == resume_stop)
5544ad89 2651 {
bd99dc85 2652 if (debug_threads)
d50171e4 2653 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
bd99dc85
PA
2654
2655 if (!lwp->stopped)
2656 {
2657 if (debug_threads)
d50171e4 2658 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
bd99dc85 2659
d50171e4
PA
2660 /* Stop the thread, and wait for the event asynchronously,
2661 through the event loop. */
bd99dc85
PA
2662 send_sigstop (&lwp->head);
2663 }
2664 else
2665 {
2666 if (debug_threads)
d50171e4
PA
2667 fprintf (stderr, "already stopped LWP %ld\n",
2668 lwpid_of (lwp));
2669
2670 /* The LWP may have been stopped in an internal event that
2671 was not meant to be notified back to GDB (e.g., gdbserver
2672 breakpoint), so we should be reporting a stop event in
2673 this case too. */
2674
2675 /* If the thread already has a pending SIGSTOP, this is a
2676 no-op. Otherwise, something later will presumably resume
2677 the thread and this will cause it to cancel any pending
2678 operation, due to last_resume_kind == resume_stop. If
2679 the thread already has a pending status to report, we
2680 will still report it the next time we wait - see
2681 status_pending_p_callback. */
2682 send_sigstop (&lwp->head);
bd99dc85 2683 }
32ca6d61 2684
bd99dc85
PA
2685 /* For stop requests, we're done. */
2686 lwp->resume = NULL;
fc7238bb 2687 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 2688 return 0;
5544ad89
DJ
2689 }
2690
bd99dc85
PA
2691 /* If this thread which is about to be resumed has a pending status,
2692 then don't resume any threads - we can just report the pending
2693 status. Make sure to queue any signals that would otherwise be
2694 sent. In all-stop mode, we do this decision based on if *any*
d50171e4
PA
2695 thread has a pending status. If there's a thread that needs the
2696 step-over-breakpoint dance, then don't resume any other thread
2697 but that particular one. */
2698 leave_pending = (lwp->status_pending_p || leave_all_stopped);
5544ad89 2699
d50171e4 2700 if (!leave_pending)
bd99dc85
PA
2701 {
2702 if (debug_threads)
2703 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
5544ad89 2704
d50171e4 2705 step = (lwp->resume->kind == resume_step);
2acc282a 2706 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
bd99dc85
PA
2707 }
2708 else
2709 {
2710 if (debug_threads)
2711 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
5544ad89 2712
bd99dc85
PA
2713 /* If we have a new signal, enqueue the signal. */
2714 if (lwp->resume->sig != 0)
2715 {
2716 struct pending_signals *p_sig;
2717 p_sig = xmalloc (sizeof (*p_sig));
2718 p_sig->prev = lwp->pending_signals;
2719 p_sig->signal = lwp->resume->sig;
2720 memset (&p_sig->info, 0, sizeof (siginfo_t));
2721
2722 /* If this is the same signal we were previously stopped by,
2723 make sure to queue its siginfo. We can ignore the return
2724 value of ptrace; if it fails, we'll skip
2725 PTRACE_SETSIGINFO. */
2726 if (WIFSTOPPED (lwp->last_status)
2727 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
2728 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
2729
2730 lwp->pending_signals = p_sig;
2731 }
2732 }
5544ad89 2733
fc7238bb 2734 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 2735 lwp->resume = NULL;
5544ad89 2736 return 0;
0d62e5e8
DJ
2737}
2738
2739static void
2bd7c093 2740linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 2741{
2bd7c093 2742 struct thread_resume_array array = { resume_info, n };
d50171e4
PA
2743 struct lwp_info *need_step_over = NULL;
2744 int any_pending;
2745 int leave_all_stopped;
c6ecbae5 2746
2bd7c093 2747 find_inferior (&all_threads, linux_set_resume_request, &array);
5544ad89 2748
d50171e4
PA
2749 /* If there is a thread which would otherwise be resumed, which has
2750 a pending status, then don't resume any threads - we can just
2751 report the pending status. Make sure to queue any signals that
2752 would otherwise be sent. In non-stop mode, we'll apply this
2753 logic to each thread individually. We consume all pending events
2754 before considering to start a step-over (in all-stop). */
2755 any_pending = 0;
bd99dc85 2756 if (!non_stop)
d50171e4
PA
2757 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
2758
2759 /* If there is a thread which would otherwise be resumed, which is
2760 stopped at a breakpoint that needs stepping over, then don't
2761 resume any threads - have it step over the breakpoint with all
2762 other threads stopped, then resume all threads again. Make sure
2763 to queue any signals that would otherwise be delivered or
2764 queued. */
2765 if (!any_pending && supports_breakpoints ())
2766 need_step_over
2767 = (struct lwp_info *) find_inferior (&all_lwps,
2768 need_step_over_p, NULL);
2769
2770 leave_all_stopped = (need_step_over != NULL || any_pending);
2771
2772 if (debug_threads)
2773 {
2774 if (need_step_over != NULL)
2775 fprintf (stderr, "Not resuming all, need step over\n");
2776 else if (any_pending)
2777 fprintf (stderr,
2778 "Not resuming, all-stop and found "
2779 "an LWP with pending status\n");
2780 else
2781 fprintf (stderr, "Resuming, no pending status or step over needed\n");
2782 }
2783
2784 /* Even if we're leaving threads stopped, queue all signals we'd
2785 otherwise deliver. */
2786 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
2787
2788 if (need_step_over)
2789 start_step_over (need_step_over);
2790}
2791
2792/* This function is called once per thread. We check the thread's
2793 last resume request, which will tell us whether to resume, step, or
2794 leave the thread stopped. Any signal the client requested to be
2795 delivered has already been enqueued at this point.
2796
2797 If any thread that GDB wants running is stopped at an internal
2798 breakpoint that needs stepping over, we start a step-over operation
2799 on that particular thread, and leave all others stopped. */
2800
2801static void
2802proceed_one_lwp (struct inferior_list_entry *entry)
2803{
2804 struct lwp_info *lwp;
8336d594 2805 struct thread_info *thread;
d50171e4
PA
2806 int step;
2807
2808 lwp = (struct lwp_info *) entry;
2809
2810 if (debug_threads)
2811 fprintf (stderr,
2812 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
2813
2814 if (!lwp->stopped)
2815 {
2816 if (debug_threads)
2817 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
2818 return;
2819 }
2820
8336d594
PA
2821 thread = get_lwp_thread (lwp);
2822
2823 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
2824 {
2825 if (debug_threads)
2826 fprintf (stderr, " client wants LWP %ld stopped\n", lwpid_of (lwp));
2827 return;
2828 }
2829
2830 if (lwp->status_pending_p)
2831 {
2832 if (debug_threads)
2833 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
2834 lwpid_of (lwp));
2835 return;
2836 }
2837
2838 if (lwp->suspended)
2839 {
2840 if (debug_threads)
2841 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
2842 return;
2843 }
2844
8336d594 2845 step = thread->last_resume_kind == resume_step;
d50171e4
PA
2846 linux_resume_one_lwp (lwp, step, 0, NULL);
2847}
2848
2849/* When we finish a step-over, set threads running again. If there's
2850 another thread that may need a step-over, now's the time to start
2851 it. Eventually, we'll move all threads past their breakpoints. */
2852
2853static void
2854proceed_all_lwps (void)
2855{
2856 struct lwp_info *need_step_over;
2857
2858 /* If there is a thread which would otherwise be resumed, which is
2859 stopped at a breakpoint that needs stepping over, then don't
2860 resume any threads - have it step over the breakpoint with all
2861 other threads stopped, then resume all threads again. */
2862
2863 if (supports_breakpoints ())
2864 {
2865 need_step_over
2866 = (struct lwp_info *) find_inferior (&all_lwps,
2867 need_step_over_p, NULL);
2868
2869 if (need_step_over != NULL)
2870 {
2871 if (debug_threads)
2872 fprintf (stderr, "proceed_all_lwps: found "
2873 "thread %ld needing a step-over\n",
2874 lwpid_of (need_step_over));
2875
2876 start_step_over (need_step_over);
2877 return;
2878 }
2879 }
5544ad89 2880
d50171e4
PA
2881 if (debug_threads)
2882 fprintf (stderr, "Proceeding, no step-over needed\n");
2883
2884 for_each_inferior (&all_lwps, proceed_one_lwp);
2885}
2886
2887/* Stopped LWPs that the client wanted to be running, that don't have
2888 pending statuses, are set to run again, except for EXCEPT, if not
2889 NULL. This undoes a stop_all_lwps call. */
2890
2891static void
2892unstop_all_lwps (struct lwp_info *except)
2893{
5544ad89
DJ
2894 if (debug_threads)
2895 {
d50171e4
PA
2896 if (except)
2897 fprintf (stderr,
2898 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
5544ad89 2899 else
d50171e4
PA
2900 fprintf (stderr,
2901 "unstopping all lwps\n");
5544ad89
DJ
2902 }
2903
d50171e4
PA
2904 /* Make sure proceed_one_lwp doesn't try to resume this thread. */
2905 if (except != NULL)
2906 ++except->suspended;
2907
2908 for_each_inferior (&all_lwps, proceed_one_lwp);
2909
2910 if (except != NULL)
2911 --except->suspended;
0d62e5e8
DJ
2912}
2913
2914#ifdef HAVE_LINUX_USRREGS
da6d8c04
DJ
2915
2916int
0a30fbc4 2917register_addr (int regnum)
da6d8c04
DJ
2918{
2919 int addr;
2920
2ec06d2e 2921 if (regnum < 0 || regnum >= the_low_target.num_regs)
da6d8c04
DJ
2922 error ("Invalid register number %d.", regnum);
2923
2ec06d2e 2924 addr = the_low_target.regmap[regnum];
da6d8c04
DJ
2925
2926 return addr;
2927}
2928
58caa3dc 2929/* Fetch one register. */
da6d8c04 2930static void
442ea881 2931fetch_register (struct regcache *regcache, int regno)
da6d8c04
DJ
2932{
2933 CORE_ADDR regaddr;
48d93c75 2934 int i, size;
0d62e5e8 2935 char *buf;
95954743 2936 int pid;
da6d8c04 2937
2ec06d2e 2938 if (regno >= the_low_target.num_regs)
0a30fbc4 2939 return;
2ec06d2e 2940 if ((*the_low_target.cannot_fetch_register) (regno))
0a30fbc4 2941 return;
da6d8c04 2942
0a30fbc4
DJ
2943 regaddr = register_addr (regno);
2944 if (regaddr == -1)
2945 return;
95954743
PA
2946
2947 pid = lwpid_of (get_thread_lwp (current_inferior));
1b3f6016
PA
2948 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2949 & - sizeof (PTRACE_XFER_TYPE));
48d93c75
UW
2950 buf = alloca (size);
2951 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
da6d8c04
DJ
2952 {
2953 errno = 0;
0d62e5e8 2954 *(PTRACE_XFER_TYPE *) (buf + i) =
14ce3065
DE
2955 ptrace (PTRACE_PEEKUSER, pid,
2956 /* Coerce to a uintptr_t first to avoid potential gcc warning
2957 of coercing an 8 byte integer to a 4 byte pointer. */
2958 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
da6d8c04
DJ
2959 regaddr += sizeof (PTRACE_XFER_TYPE);
2960 if (errno != 0)
f52cd8cd 2961 error ("reading register %d: %s", regno, strerror (errno));
da6d8c04 2962 }
ee1a7ae4
UW
2963
2964 if (the_low_target.supply_ptrace_register)
442ea881 2965 the_low_target.supply_ptrace_register (regcache, regno, buf);
5a1f5858 2966 else
442ea881 2967 supply_register (regcache, regno, buf);
da6d8c04
DJ
2968}
2969
2970/* Fetch all registers, or just one, from the child process. */
58caa3dc 2971static void
442ea881 2972usr_fetch_inferior_registers (struct regcache *regcache, int regno)
da6d8c04 2973{
4463ce24 2974 if (regno == -1)
2ec06d2e 2975 for (regno = 0; regno < the_low_target.num_regs; regno++)
442ea881 2976 fetch_register (regcache, regno);
da6d8c04 2977 else
442ea881 2978 fetch_register (regcache, regno);
da6d8c04
DJ
2979}
2980
2981/* Store our register values back into the inferior.
2982 If REGNO is -1, do this for all registers.
2983 Otherwise, REGNO specifies which register (so we can save time). */
58caa3dc 2984static void
442ea881 2985usr_store_inferior_registers (struct regcache *regcache, int regno)
da6d8c04
DJ
2986{
2987 CORE_ADDR regaddr;
48d93c75 2988 int i, size;
0d62e5e8 2989 char *buf;
55ac2b99 2990 int pid;
da6d8c04
DJ
2991
2992 if (regno >= 0)
2993 {
2ec06d2e 2994 if (regno >= the_low_target.num_regs)
0a30fbc4
DJ
2995 return;
2996
bc1e36ca 2997 if ((*the_low_target.cannot_store_register) (regno) == 1)
0a30fbc4
DJ
2998 return;
2999
3000 regaddr = register_addr (regno);
3001 if (regaddr == -1)
da6d8c04 3002 return;
da6d8c04 3003 errno = 0;
48d93c75
UW
3004 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3005 & - sizeof (PTRACE_XFER_TYPE);
3006 buf = alloca (size);
3007 memset (buf, 0, size);
ee1a7ae4
UW
3008
3009 if (the_low_target.collect_ptrace_register)
442ea881 3010 the_low_target.collect_ptrace_register (regcache, regno, buf);
5a1f5858 3011 else
442ea881 3012 collect_register (regcache, regno, buf);
ee1a7ae4 3013
95954743 3014 pid = lwpid_of (get_thread_lwp (current_inferior));
48d93c75 3015 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
da6d8c04 3016 {
0a30fbc4 3017 errno = 0;
14ce3065
DE
3018 ptrace (PTRACE_POKEUSER, pid,
3019 /* Coerce to a uintptr_t first to avoid potential gcc warning
3020 about coercing an 8 byte integer to a 4 byte pointer. */
3021 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
3022 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
da6d8c04
DJ
3023 if (errno != 0)
3024 {
1b3f6016
PA
3025 /* At this point, ESRCH should mean the process is
3026 already gone, in which case we simply ignore attempts
3027 to change its registers. See also the related
3028 comment in linux_resume_one_lwp. */
3221518c
UW
3029 if (errno == ESRCH)
3030 return;
3031
bc1e36ca 3032 if ((*the_low_target.cannot_store_register) (regno) == 0)
f52cd8cd 3033 error ("writing register %d: %s", regno, strerror (errno));
da6d8c04 3034 }
2ff29de4 3035 regaddr += sizeof (PTRACE_XFER_TYPE);
da6d8c04 3036 }
da6d8c04
DJ
3037 }
3038 else
2ec06d2e 3039 for (regno = 0; regno < the_low_target.num_regs; regno++)
442ea881 3040 usr_store_inferior_registers (regcache, regno);
da6d8c04 3041}
58caa3dc
DJ
3042#endif /* HAVE_LINUX_USRREGS */
3043
3044
3045
3046#ifdef HAVE_LINUX_REGSETS
3047
3048static int
442ea881 3049regsets_fetch_inferior_registers (struct regcache *regcache)
58caa3dc
DJ
3050{
3051 struct regset_info *regset;
e9d25b98 3052 int saw_general_regs = 0;
95954743 3053 int pid;
1570b33e 3054 struct iovec iov;
58caa3dc
DJ
3055
3056 regset = target_regsets;
3057
95954743 3058 pid = lwpid_of (get_thread_lwp (current_inferior));
58caa3dc
DJ
3059 while (regset->size >= 0)
3060 {
1570b33e
L
3061 void *buf, *data;
3062 int nt_type, res;
58caa3dc 3063
52fa2412 3064 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
58caa3dc
DJ
3065 {
3066 regset ++;
3067 continue;
3068 }
3069
bca929d3 3070 buf = xmalloc (regset->size);
1570b33e
L
3071
3072 nt_type = regset->nt_type;
3073 if (nt_type)
3074 {
3075 iov.iov_base = buf;
3076 iov.iov_len = regset->size;
3077 data = (void *) &iov;
3078 }
3079 else
3080 data = buf;
3081
dfb64f85 3082#ifndef __sparc__
1570b33e 3083 res = ptrace (regset->get_request, pid, nt_type, data);
dfb64f85 3084#else
1570b33e 3085 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 3086#endif
58caa3dc
DJ
3087 if (res < 0)
3088 {
3089 if (errno == EIO)
3090 {
52fa2412
UW
3091 /* If we get EIO on a regset, do not try it again for
3092 this process. */
3093 disabled_regsets[regset - target_regsets] = 1;
fdeb2a12 3094 free (buf);
52fa2412 3095 continue;
58caa3dc
DJ
3096 }
3097 else
3098 {
0d62e5e8 3099 char s[256];
95954743
PA
3100 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3101 pid);
0d62e5e8 3102 perror (s);
58caa3dc
DJ
3103 }
3104 }
e9d25b98
DJ
3105 else if (regset->type == GENERAL_REGS)
3106 saw_general_regs = 1;
442ea881 3107 regset->store_function (regcache, buf);
58caa3dc 3108 regset ++;
fdeb2a12 3109 free (buf);
58caa3dc 3110 }
e9d25b98
DJ
3111 if (saw_general_regs)
3112 return 0;
3113 else
3114 return 1;
58caa3dc
DJ
3115}
3116
3117static int
442ea881 3118regsets_store_inferior_registers (struct regcache *regcache)
58caa3dc
DJ
3119{
3120 struct regset_info *regset;
e9d25b98 3121 int saw_general_regs = 0;
95954743 3122 int pid;
1570b33e 3123 struct iovec iov;
58caa3dc
DJ
3124
3125 regset = target_regsets;
3126
95954743 3127 pid = lwpid_of (get_thread_lwp (current_inferior));
58caa3dc
DJ
3128 while (regset->size >= 0)
3129 {
1570b33e
L
3130 void *buf, *data;
3131 int nt_type, res;
58caa3dc 3132
52fa2412 3133 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
58caa3dc
DJ
3134 {
3135 regset ++;
3136 continue;
3137 }
3138
bca929d3 3139 buf = xmalloc (regset->size);
545587ee
DJ
3140
3141 /* First fill the buffer with the current register set contents,
3142 in case there are any items in the kernel's regset that are
3143 not in gdbserver's regcache. */
1570b33e
L
3144
3145 nt_type = regset->nt_type;
3146 if (nt_type)
3147 {
3148 iov.iov_base = buf;
3149 iov.iov_len = regset->size;
3150 data = (void *) &iov;
3151 }
3152 else
3153 data = buf;
3154
dfb64f85 3155#ifndef __sparc__
1570b33e 3156 res = ptrace (regset->get_request, pid, nt_type, data);
dfb64f85 3157#else
1570b33e 3158 res = ptrace (regset->get_request, pid, &iov, data);
dfb64f85 3159#endif
545587ee
DJ
3160
3161 if (res == 0)
3162 {
3163 /* Then overlay our cached registers on that. */
442ea881 3164 regset->fill_function (regcache, buf);
545587ee
DJ
3165
3166 /* Only now do we write the register set. */
dfb64f85 3167#ifndef __sparc__
1570b33e 3168 res = ptrace (regset->set_request, pid, nt_type, data);
dfb64f85 3169#else
1570b33e 3170 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 3171#endif
545587ee
DJ
3172 }
3173
58caa3dc
DJ
3174 if (res < 0)
3175 {
3176 if (errno == EIO)
3177 {
52fa2412
UW
3178 /* If we get EIO on a regset, do not try it again for
3179 this process. */
3180 disabled_regsets[regset - target_regsets] = 1;
fdeb2a12 3181 free (buf);
52fa2412 3182 continue;
58caa3dc 3183 }
3221518c
UW
3184 else if (errno == ESRCH)
3185 {
1b3f6016
PA
3186 /* At this point, ESRCH should mean the process is
3187 already gone, in which case we simply ignore attempts
3188 to change its registers. See also the related
3189 comment in linux_resume_one_lwp. */
fdeb2a12 3190 free (buf);
3221518c
UW
3191 return 0;
3192 }
58caa3dc
DJ
3193 else
3194 {
ce3a066d 3195 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
3196 }
3197 }
e9d25b98
DJ
3198 else if (regset->type == GENERAL_REGS)
3199 saw_general_regs = 1;
58caa3dc 3200 regset ++;
09ec9b38 3201 free (buf);
58caa3dc 3202 }
e9d25b98
DJ
3203 if (saw_general_regs)
3204 return 0;
3205 else
3206 return 1;
ce3a066d 3207 return 0;
58caa3dc
DJ
3208}
3209
3210#endif /* HAVE_LINUX_REGSETS */
3211
3212
3213void
442ea881 3214linux_fetch_registers (struct regcache *regcache, int regno)
58caa3dc
DJ
3215{
3216#ifdef HAVE_LINUX_REGSETS
442ea881 3217 if (regsets_fetch_inferior_registers (regcache) == 0)
52fa2412 3218 return;
58caa3dc
DJ
3219#endif
3220#ifdef HAVE_LINUX_USRREGS
442ea881 3221 usr_fetch_inferior_registers (regcache, regno);
58caa3dc
DJ
3222#endif
3223}
3224
3225void
442ea881 3226linux_store_registers (struct regcache *regcache, int regno)
58caa3dc
DJ
3227{
3228#ifdef HAVE_LINUX_REGSETS
442ea881 3229 if (regsets_store_inferior_registers (regcache) == 0)
52fa2412 3230 return;
58caa3dc
DJ
3231#endif
3232#ifdef HAVE_LINUX_USRREGS
442ea881 3233 usr_store_inferior_registers (regcache, regno);
58caa3dc
DJ
3234#endif
3235}
3236
da6d8c04 3237
da6d8c04
DJ
3238/* Copy LEN bytes from inferior's memory starting at MEMADDR
3239 to debugger memory starting at MYADDR. */
3240
c3e735a6 3241static int
f450004a 3242linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04
DJ
3243{
3244 register int i;
3245 /* Round starting address down to longword boundary. */
3246 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3247 /* Round ending address up; get number of longwords that makes. */
aa691b87
RM
3248 register int count
3249 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
da6d8c04
DJ
3250 / sizeof (PTRACE_XFER_TYPE);
3251 /* Allocate buffer of that many longwords. */
aa691b87 3252 register PTRACE_XFER_TYPE *buffer
da6d8c04 3253 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
fd462a61
DJ
3254 int fd;
3255 char filename[64];
95954743 3256 int pid = lwpid_of (get_thread_lwp (current_inferior));
fd462a61
DJ
3257
3258 /* Try using /proc. Don't bother for one word. */
3259 if (len >= 3 * sizeof (long))
3260 {
3261 /* We could keep this file open and cache it - possibly one per
3262 thread. That requires some juggling, but is even faster. */
95954743 3263 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
3264 fd = open (filename, O_RDONLY | O_LARGEFILE);
3265 if (fd == -1)
3266 goto no_proc;
3267
3268 /* If pread64 is available, use it. It's faster if the kernel
3269 supports it (only one syscall), and it's 64-bit safe even on
3270 32-bit platforms (for instance, SPARC debugging a SPARC64
3271 application). */
3272#ifdef HAVE_PREAD64
3273 if (pread64 (fd, myaddr, len, memaddr) != len)
3274#else
1de1badb 3275 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
fd462a61
DJ
3276#endif
3277 {
3278 close (fd);
3279 goto no_proc;
3280 }
3281
3282 close (fd);
3283 return 0;
3284 }
da6d8c04 3285
fd462a61 3286 no_proc:
da6d8c04
DJ
3287 /* Read all the longwords */
3288 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3289 {
c3e735a6 3290 errno = 0;
14ce3065
DE
3291 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3292 about coercing an 8 byte integer to a 4 byte pointer. */
3293 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
3294 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
c3e735a6
DJ
3295 if (errno)
3296 return errno;
da6d8c04
DJ
3297 }
3298
3299 /* Copy appropriate bytes out of the buffer. */
1b3f6016
PA
3300 memcpy (myaddr,
3301 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
3302 len);
c3e735a6
DJ
3303
3304 return 0;
da6d8c04
DJ
3305}
3306
93ae6fdc
PA
3307/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
3308 memory at MEMADDR. On failure (cannot write to the inferior)
da6d8c04
DJ
3309 returns the value of errno. */
3310
ce3a066d 3311static int
f450004a 3312linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04
DJ
3313{
3314 register int i;
3315 /* Round starting address down to longword boundary. */
3316 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3317 /* Round ending address up; get number of longwords that makes. */
3318 register int count
3319 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
3320 /* Allocate buffer of that many longwords. */
3321 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
95954743 3322 int pid = lwpid_of (get_thread_lwp (current_inferior));
da6d8c04 3323
0d62e5e8
DJ
3324 if (debug_threads)
3325 {
58d6951d
DJ
3326 /* Dump up to four bytes. */
3327 unsigned int val = * (unsigned int *) myaddr;
3328 if (len == 1)
3329 val = val & 0xff;
3330 else if (len == 2)
3331 val = val & 0xffff;
3332 else if (len == 3)
3333 val = val & 0xffffff;
3334 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
3335 val, (long)memaddr);
0d62e5e8
DJ
3336 }
3337
da6d8c04
DJ
3338 /* Fill start and end extra bytes of buffer with existing memory data. */
3339
93ae6fdc 3340 errno = 0;
14ce3065
DE
3341 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3342 about coercing an 8 byte integer to a 4 byte pointer. */
3343 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
3344 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
93ae6fdc
PA
3345 if (errno)
3346 return errno;
da6d8c04
DJ
3347
3348 if (count > 1)
3349 {
93ae6fdc 3350 errno = 0;
da6d8c04 3351 buffer[count - 1]
95954743 3352 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
3353 /* Coerce to a uintptr_t first to avoid potential gcc warning
3354 about coercing an 8 byte integer to a 4 byte pointer. */
3355 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
3356 * sizeof (PTRACE_XFER_TYPE)),
d844cde6 3357 0);
93ae6fdc
PA
3358 if (errno)
3359 return errno;
da6d8c04
DJ
3360 }
3361
93ae6fdc 3362 /* Copy data to be written over corresponding part of buffer. */
da6d8c04
DJ
3363
3364 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
3365
3366 /* Write the entire buffer. */
3367
3368 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3369 {
3370 errno = 0;
14ce3065
DE
3371 ptrace (PTRACE_POKETEXT, pid,
3372 /* Coerce to a uintptr_t first to avoid potential gcc warning
3373 about coercing an 8 byte integer to a 4 byte pointer. */
3374 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
3375 (PTRACE_ARG4_TYPE) buffer[i]);
da6d8c04
DJ
3376 if (errno)
3377 return errno;
3378 }
3379
3380 return 0;
3381}
2f2893d9 3382
6076632b 3383/* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
24a09b5f
DJ
3384static int linux_supports_tracefork_flag;
3385
51c2684e 3386/* Helper functions for linux_test_for_tracefork, called via clone (). */
24a09b5f 3387
51c2684e
DJ
3388static int
3389linux_tracefork_grandchild (void *arg)
3390{
3391 _exit (0);
3392}
3393
7407e2de
AS
3394#define STACK_SIZE 4096
3395
51c2684e
DJ
3396static int
3397linux_tracefork_child (void *arg)
24a09b5f
DJ
3398{
3399 ptrace (PTRACE_TRACEME, 0, 0, 0);
3400 kill (getpid (), SIGSTOP);
e4b7f41c
JK
3401
3402#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3403
3404 if (fork () == 0)
3405 linux_tracefork_grandchild (NULL);
3406
3407#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3408
7407e2de
AS
3409#ifdef __ia64__
3410 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
3411 CLONE_VM | SIGCHLD, NULL);
3412#else
3413 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
3414 CLONE_VM | SIGCHLD, NULL);
3415#endif
e4b7f41c
JK
3416
3417#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3418
24a09b5f
DJ
3419 _exit (0);
3420}
3421
24a09b5f
DJ
3422/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
3423 sure that we can enable the option, and that it had the desired
3424 effect. */
3425
3426static void
3427linux_test_for_tracefork (void)
3428{
3429 int child_pid, ret, status;
3430 long second_pid;
e4b7f41c 3431#if defined(__UCLIBC__) && defined(HAS_NOMMU)
bca929d3 3432 char *stack = xmalloc (STACK_SIZE * 4);
e4b7f41c 3433#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
24a09b5f
DJ
3434
3435 linux_supports_tracefork_flag = 0;
3436
e4b7f41c
JK
3437#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3438
3439 child_pid = fork ();
3440 if (child_pid == 0)
3441 linux_tracefork_child (NULL);
3442
3443#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3444
51c2684e 3445 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
7407e2de
AS
3446#ifdef __ia64__
3447 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
3448 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
e4b7f41c 3449#else /* !__ia64__ */
7407e2de
AS
3450 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
3451 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
e4b7f41c
JK
3452#endif /* !__ia64__ */
3453
3454#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3455
24a09b5f 3456 if (child_pid == -1)
51c2684e 3457 perror_with_name ("clone");
24a09b5f
DJ
3458
3459 ret = my_waitpid (child_pid, &status, 0);
3460 if (ret == -1)
3461 perror_with_name ("waitpid");
3462 else if (ret != child_pid)
3463 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
3464 if (! WIFSTOPPED (status))
3465 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
3466
14ce3065
DE
3467 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
3468 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
24a09b5f
DJ
3469 if (ret != 0)
3470 {
3471 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3472 if (ret != 0)
3473 {
3474 warning ("linux_test_for_tracefork: failed to kill child");
3475 return;
3476 }
3477
3478 ret = my_waitpid (child_pid, &status, 0);
3479 if (ret != child_pid)
3480 warning ("linux_test_for_tracefork: failed to wait for killed child");
3481 else if (!WIFSIGNALED (status))
3482 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
3483 "killed child", status);
3484
3485 return;
3486 }
3487
3488 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
3489 if (ret != 0)
3490 warning ("linux_test_for_tracefork: failed to resume child");
3491
3492 ret = my_waitpid (child_pid, &status, 0);
3493
3494 if (ret == child_pid && WIFSTOPPED (status)
3495 && status >> 16 == PTRACE_EVENT_FORK)
3496 {
3497 second_pid = 0;
3498 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
3499 if (ret == 0 && second_pid != 0)
3500 {
3501 int second_status;
3502
3503 linux_supports_tracefork_flag = 1;
3504 my_waitpid (second_pid, &second_status, 0);
3505 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
3506 if (ret != 0)
3507 warning ("linux_test_for_tracefork: failed to kill second child");
3508 my_waitpid (second_pid, &status, 0);
3509 }
3510 }
3511 else
3512 warning ("linux_test_for_tracefork: unexpected result from waitpid "
3513 "(%d, status 0x%x)", ret, status);
3514
3515 do
3516 {
3517 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3518 if (ret != 0)
3519 warning ("linux_test_for_tracefork: failed to kill child");
3520 my_waitpid (child_pid, &status, 0);
3521 }
3522 while (WIFSTOPPED (status));
51c2684e 3523
e4b7f41c 3524#if defined(__UCLIBC__) && defined(HAS_NOMMU)
51c2684e 3525 free (stack);
e4b7f41c 3526#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
24a09b5f
DJ
3527}
3528
3529
2f2893d9
DJ
3530static void
3531linux_look_up_symbols (void)
3532{
0d62e5e8 3533#ifdef USE_THREAD_DB
95954743
PA
3534 struct process_info *proc = current_process ();
3535
cdbfd419 3536 if (proc->private->thread_db != NULL)
0d62e5e8
DJ
3537 return;
3538
6076632b
DE
3539 /* If the kernel supports tracing forks then it also supports tracing
3540 clones, and then we don't need to use the magic thread event breakpoint
3541 to learn about threads. */
cdbfd419 3542 thread_db_init (!linux_supports_tracefork_flag);
0d62e5e8
DJ
3543#endif
3544}
3545
e5379b03 3546static void
ef57601b 3547linux_request_interrupt (void)
e5379b03 3548{
a1928bad 3549 extern unsigned long signal_pid;
e5379b03 3550
95954743
PA
3551 if (!ptid_equal (cont_thread, null_ptid)
3552 && !ptid_equal (cont_thread, minus_one_ptid))
e5379b03 3553 {
54a0b537 3554 struct lwp_info *lwp;
bd99dc85 3555 int lwpid;
e5379b03 3556
54a0b537 3557 lwp = get_thread_lwp (current_inferior);
bd99dc85
PA
3558 lwpid = lwpid_of (lwp);
3559 kill_lwp (lwpid, SIGINT);
e5379b03
DJ
3560 }
3561 else
ef57601b 3562 kill_lwp (signal_pid, SIGINT);
e5379b03
DJ
3563}
3564
aa691b87
RM
3565/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
3566 to debugger memory starting at MYADDR. */
3567
3568static int
f450004a 3569linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
3570{
3571 char filename[PATH_MAX];
3572 int fd, n;
95954743 3573 int pid = lwpid_of (get_thread_lwp (current_inferior));
aa691b87 3574
95954743 3575 snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
3576
3577 fd = open (filename, O_RDONLY);
3578 if (fd < 0)
3579 return -1;
3580
3581 if (offset != (CORE_ADDR) 0
3582 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
3583 n = -1;
3584 else
3585 n = read (fd, myaddr, len);
3586
3587 close (fd);
3588
3589 return n;
3590}
3591
d993e290
PA
3592/* These breakpoint and watchpoint related wrapper functions simply
3593 pass on the function call if the target has registered a
3594 corresponding function. */
e013ee27
OF
3595
3596static int
d993e290 3597linux_insert_point (char type, CORE_ADDR addr, int len)
e013ee27 3598{
d993e290
PA
3599 if (the_low_target.insert_point != NULL)
3600 return the_low_target.insert_point (type, addr, len);
e013ee27
OF
3601 else
3602 /* Unsupported (see target.h). */
3603 return 1;
3604}
3605
3606static int
d993e290 3607linux_remove_point (char type, CORE_ADDR addr, int len)
e013ee27 3608{
d993e290
PA
3609 if (the_low_target.remove_point != NULL)
3610 return the_low_target.remove_point (type, addr, len);
e013ee27
OF
3611 else
3612 /* Unsupported (see target.h). */
3613 return 1;
3614}
3615
3616static int
3617linux_stopped_by_watchpoint (void)
3618{
c3adc08c
PA
3619 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3620
3621 return lwp->stopped_by_watchpoint;
e013ee27
OF
3622}
3623
3624static CORE_ADDR
3625linux_stopped_data_address (void)
3626{
c3adc08c
PA
3627 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3628
3629 return lwp->stopped_data_address;
e013ee27
OF
3630}
3631
42c81e2a 3632#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
3633#if defined(__mcoldfire__)
3634/* These should really be defined in the kernel's ptrace.h header. */
3635#define PT_TEXT_ADDR 49*4
3636#define PT_DATA_ADDR 50*4
3637#define PT_TEXT_END_ADDR 51*4
3638#endif
3639
3640/* Under uClinux, programs are loaded at non-zero offsets, which we need
3641 to tell gdb about. */
3642
3643static int
3644linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
3645{
3646#if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
3647 unsigned long text, text_end, data;
bd99dc85 3648 int pid = lwpid_of (get_thread_lwp (current_inferior));
52fb6437
NS
3649
3650 errno = 0;
3651
3652 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
3653 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
3654 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
3655
3656 if (errno == 0)
3657 {
3658 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
3659 used by gdb) are relative to the beginning of the program,
3660 with the data segment immediately following the text segment.
3661 However, the actual runtime layout in memory may put the data
3662 somewhere else, so when we send gdb a data base-address, we
3663 use the real data base address and subtract the compile-time
3664 data base-address from it (which is just the length of the
3665 text segment). BSS immediately follows data in both
3666 cases. */
52fb6437
NS
3667 *text_p = text;
3668 *data_p = data - (text_end - text);
1b3f6016 3669
52fb6437
NS
3670 return 1;
3671 }
3672#endif
3673 return 0;
3674}
3675#endif
3676
dc146f7c
VP
3677static int
3678compare_ints (const void *xa, const void *xb)
3679{
3680 int a = *(const int *)xa;
3681 int b = *(const int *)xb;
3682
3683 return a - b;
3684}
3685
3686static int *
3687unique (int *b, int *e)
3688{
3689 int *d = b;
3690 while (++b != e)
3691 if (*d != *b)
3692 *++d = *b;
3693 return ++d;
3694}
3695
3696/* Given PID, iterates over all threads in that process.
3697
3698 Information about each thread, in a format suitable for qXfer:osdata:thread
3699 is printed to BUFFER, if it's not NULL. BUFFER is assumed to be already
3700 initialized, and the caller is responsible for finishing and appending '\0'
3701 to it.
3702
3703 The list of cores that threads are running on is assigned to *CORES, if it
3704 is not NULL. If no cores are found, *CORES will be set to NULL. Caller
3705 should free *CORES. */
3706
3707static void
3708list_threads (int pid, struct buffer *buffer, char **cores)
3709{
3710 int count = 0;
3711 int allocated = 10;
3712 int *core_numbers = xmalloc (sizeof (int) * allocated);
3713 char pathname[128];
3714 DIR *dir;
3715 struct dirent *dp;
3716 struct stat statbuf;
3717
3718 sprintf (pathname, "/proc/%d/task", pid);
3719 if (stat (pathname, &statbuf) == 0 && S_ISDIR (statbuf.st_mode))
3720 {
3721 dir = opendir (pathname);
3722 if (!dir)
3723 {
3724 free (core_numbers);
3725 return;
3726 }
3727
3728 while ((dp = readdir (dir)) != NULL)
3729 {
3730 unsigned long lwp = strtoul (dp->d_name, NULL, 10);
3731
3732 if (lwp != 0)
3733 {
3734 unsigned core = linux_core_of_thread (ptid_build (pid, lwp, 0));
3735
3736 if (core != -1)
3737 {
3738 char s[sizeof ("4294967295")];
3739 sprintf (s, "%u", core);
3740
3741 if (count == allocated)
3742 {
3743 allocated *= 2;
3744 core_numbers = realloc (core_numbers,
3745 sizeof (int) * allocated);
3746 }
3747 core_numbers[count++] = core;
3748 if (buffer)
3749 buffer_xml_printf (buffer,
3750 "<item>"
3751 "<column name=\"pid\">%d</column>"
3752 "<column name=\"tid\">%s</column>"
3753 "<column name=\"core\">%s</column>"
3754 "</item>", pid, dp->d_name, s);
3755 }
3756 else
3757 {
3758 if (buffer)
3759 buffer_xml_printf (buffer,
3760 "<item>"
3761 "<column name=\"pid\">%d</column>"
3762 "<column name=\"tid\">%s</column>"
3763 "</item>", pid, dp->d_name);
3764 }
3765 }
3766 }
3767 }
3768
3769 if (cores)
3770 {
3771 *cores = NULL;
3772 if (count > 0)
3773 {
3774 struct buffer buffer2;
3775 int *b;
3776 int *e;
3777 qsort (core_numbers, count, sizeof (int), compare_ints);
3778
3779 /* Remove duplicates. */
3780 b = core_numbers;
3781 e = unique (b, core_numbers + count);
3782
3783 buffer_init (&buffer2);
3784
3785 for (b = core_numbers; b != e; ++b)
3786 {
3787 char number[sizeof ("4294967295")];
3788 sprintf (number, "%u", *b);
3789 buffer_xml_printf (&buffer2, "%s%s",
3790 (b == core_numbers) ? "" : ",", number);
3791 }
3792 buffer_grow_str0 (&buffer2, "");
3793
3794 *cores = buffer_finish (&buffer2);
3795 }
3796 }
3797 free (core_numbers);
3798}
3799
3800static void
3801show_process (int pid, const char *username, struct buffer *buffer)
3802{
3803 char pathname[128];
3804 FILE *f;
3805 char cmd[MAXPATHLEN + 1];
3806
3807 sprintf (pathname, "/proc/%d/cmdline", pid);
3808
3809 if ((f = fopen (pathname, "r")) != NULL)
3810 {
3811 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
3812 if (len > 0)
3813 {
3814 char *cores = 0;
3815 int i;
3816 for (i = 0; i < len; i++)
3817 if (cmd[i] == '\0')
3818 cmd[i] = ' ';
3819 cmd[len] = '\0';
3820
3821 buffer_xml_printf (buffer,
3822 "<item>"
3823 "<column name=\"pid\">%d</column>"
3824 "<column name=\"user\">%s</column>"
3825 "<column name=\"command\">%s</column>",
3826 pid,
3827 username,
3828 cmd);
3829
3830 /* This only collects core numbers, and does not print threads. */
3831 list_threads (pid, NULL, &cores);
3832
3833 if (cores)
3834 {
3835 buffer_xml_printf (buffer,
3836 "<column name=\"cores\">%s</column>", cores);
3837 free (cores);
3838 }
3839
3840 buffer_xml_printf (buffer, "</item>");
3841 }
3842 fclose (f);
3843 }
3844}
3845
07e059b5
VP
3846static int
3847linux_qxfer_osdata (const char *annex,
1b3f6016
PA
3848 unsigned char *readbuf, unsigned const char *writebuf,
3849 CORE_ADDR offset, int len)
07e059b5
VP
3850{
3851 /* We make the process list snapshot when the object starts to be
3852 read. */
3853 static const char *buf;
3854 static long len_avail = -1;
3855 static struct buffer buffer;
dc146f7c
VP
3856 int processes = 0;
3857 int threads = 0;
07e059b5
VP
3858
3859 DIR *dirp;
3860
dc146f7c
VP
3861 if (strcmp (annex, "processes") == 0)
3862 processes = 1;
3863 else if (strcmp (annex, "threads") == 0)
3864 threads = 1;
3865 else
07e059b5
VP
3866 return 0;
3867
3868 if (!readbuf || writebuf)
3869 return 0;
3870
3871 if (offset == 0)
3872 {
3873 if (len_avail != -1 && len_avail != 0)
3874 buffer_free (&buffer);
3875 len_avail = 0;
3876 buf = NULL;
3877 buffer_init (&buffer);
dc146f7c
VP
3878 if (processes)
3879 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
3880 else if (threads)
3881 buffer_grow_str (&buffer, "<osdata type=\"threads\">");
07e059b5
VP
3882
3883 dirp = opendir ("/proc");
3884 if (dirp)
3885 {
1b3f6016
PA
3886 struct dirent *dp;
3887 while ((dp = readdir (dirp)) != NULL)
3888 {
3889 struct stat statbuf;
3890 char procentry[sizeof ("/proc/4294967295")];
3891
3892 if (!isdigit (dp->d_name[0])
3893 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
3894 continue;
3895
3896 sprintf (procentry, "/proc/%s", dp->d_name);
3897 if (stat (procentry, &statbuf) == 0
3898 && S_ISDIR (statbuf.st_mode))
3899 {
dc146f7c 3900 int pid = (int) strtoul (dp->d_name, NULL, 10);
1b3f6016 3901
dc146f7c 3902 if (processes)
1b3f6016 3903 {
dc146f7c
VP
3904 struct passwd *entry = getpwuid (statbuf.st_uid);
3905 show_process (pid, entry ? entry->pw_name : "?", &buffer);
3906 }
3907 else if (threads)
3908 {
3909 list_threads (pid, &buffer, NULL);
1b3f6016
PA
3910 }
3911 }
3912 }
07e059b5 3913
1b3f6016 3914 closedir (dirp);
07e059b5
VP
3915 }
3916 buffer_grow_str0 (&buffer, "</osdata>\n");
3917 buf = buffer_finish (&buffer);
3918 len_avail = strlen (buf);
3919 }
3920
3921 if (offset >= len_avail)
3922 {
3923 /* Done. Get rid of the data. */
3924 buffer_free (&buffer);
3925 buf = NULL;
3926 len_avail = 0;
3927 return 0;
3928 }
3929
3930 if (len > len_avail - offset)
3931 len = len_avail - offset;
3932 memcpy (readbuf, buf + offset, len);
3933
3934 return len;
3935}
3936
d0722149
DE
3937/* Convert a native/host siginfo object, into/from the siginfo in the
3938 layout of the inferiors' architecture. */
3939
3940static void
3941siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
3942{
3943 int done = 0;
3944
3945 if (the_low_target.siginfo_fixup != NULL)
3946 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
3947
3948 /* If there was no callback, or the callback didn't do anything,
3949 then just do a straight memcpy. */
3950 if (!done)
3951 {
3952 if (direction == 1)
3953 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
3954 else
3955 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
3956 }
3957}
3958
4aa995e1
PA
3959static int
3960linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
3961 unsigned const char *writebuf, CORE_ADDR offset, int len)
3962{
d0722149 3963 int pid;
4aa995e1 3964 struct siginfo siginfo;
d0722149 3965 char inf_siginfo[sizeof (struct siginfo)];
4aa995e1
PA
3966
3967 if (current_inferior == NULL)
3968 return -1;
3969
bd99dc85 3970 pid = lwpid_of (get_thread_lwp (current_inferior));
4aa995e1
PA
3971
3972 if (debug_threads)
d0722149 3973 fprintf (stderr, "%s siginfo for lwp %d.\n",
4aa995e1
PA
3974 readbuf != NULL ? "Reading" : "Writing",
3975 pid);
3976
3977 if (offset > sizeof (siginfo))
3978 return -1;
3979
3980 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
3981 return -1;
3982
d0722149
DE
3983 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
3984 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3985 inferior with a 64-bit GDBSERVER should look the same as debugging it
3986 with a 32-bit GDBSERVER, we need to convert it. */
3987 siginfo_fixup (&siginfo, inf_siginfo, 0);
3988
4aa995e1
PA
3989 if (offset + len > sizeof (siginfo))
3990 len = sizeof (siginfo) - offset;
3991
3992 if (readbuf != NULL)
d0722149 3993 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
3994 else
3995 {
d0722149
DE
3996 memcpy (inf_siginfo + offset, writebuf, len);
3997
3998 /* Convert back to ptrace layout before flushing it out. */
3999 siginfo_fixup (&siginfo, inf_siginfo, 1);
4000
4aa995e1
PA
4001 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4002 return -1;
4003 }
4004
4005 return len;
4006}
4007
bd99dc85
PA
4008/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4009 so we notice when children change state; as the handler for the
4010 sigsuspend in my_waitpid. */
4011
4012static void
4013sigchld_handler (int signo)
4014{
4015 int old_errno = errno;
4016
4017 if (debug_threads)
4018 /* fprintf is not async-signal-safe, so call write directly. */
4019 write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
4020
4021 if (target_is_async_p ())
4022 async_file_mark (); /* trigger a linux_wait */
4023
4024 errno = old_errno;
4025}
4026
4027static int
4028linux_supports_non_stop (void)
4029{
4030 return 1;
4031}
4032
4033static int
4034linux_async (int enable)
4035{
4036 int previous = (linux_event_pipe[0] != -1);
4037
8336d594
PA
4038 if (debug_threads)
4039 fprintf (stderr, "linux_async (%d), previous=%d\n",
4040 enable, previous);
4041
bd99dc85
PA
4042 if (previous != enable)
4043 {
4044 sigset_t mask;
4045 sigemptyset (&mask);
4046 sigaddset (&mask, SIGCHLD);
4047
4048 sigprocmask (SIG_BLOCK, &mask, NULL);
4049
4050 if (enable)
4051 {
4052 if (pipe (linux_event_pipe) == -1)
4053 fatal ("creating event pipe failed.");
4054
4055 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4056 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4057
4058 /* Register the event loop handler. */
4059 add_file_handler (linux_event_pipe[0],
4060 handle_target_event, NULL);
4061
4062 /* Always trigger a linux_wait. */
4063 async_file_mark ();
4064 }
4065 else
4066 {
4067 delete_file_handler (linux_event_pipe[0]);
4068
4069 close (linux_event_pipe[0]);
4070 close (linux_event_pipe[1]);
4071 linux_event_pipe[0] = -1;
4072 linux_event_pipe[1] = -1;
4073 }
4074
4075 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4076 }
4077
4078 return previous;
4079}
4080
4081static int
4082linux_start_non_stop (int nonstop)
4083{
4084 /* Register or unregister from event-loop accordingly. */
4085 linux_async (nonstop);
4086 return 0;
4087}
4088
cf8fd78b
PA
4089static int
4090linux_supports_multi_process (void)
4091{
4092 return 1;
4093}
4094
efcbbd14
UW
4095
4096/* Enumerate spufs IDs for process PID. */
4097static int
4098spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4099{
4100 int pos = 0;
4101 int written = 0;
4102 char path[128];
4103 DIR *dir;
4104 struct dirent *entry;
4105
4106 sprintf (path, "/proc/%ld/fd", pid);
4107 dir = opendir (path);
4108 if (!dir)
4109 return -1;
4110
4111 rewinddir (dir);
4112 while ((entry = readdir (dir)) != NULL)
4113 {
4114 struct stat st;
4115 struct statfs stfs;
4116 int fd;
4117
4118 fd = atoi (entry->d_name);
4119 if (!fd)
4120 continue;
4121
4122 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4123 if (stat (path, &st) != 0)
4124 continue;
4125 if (!S_ISDIR (st.st_mode))
4126 continue;
4127
4128 if (statfs (path, &stfs) != 0)
4129 continue;
4130 if (stfs.f_type != SPUFS_MAGIC)
4131 continue;
4132
4133 if (pos >= offset && pos + 4 <= offset + len)
4134 {
4135 *(unsigned int *)(buf + pos - offset) = fd;
4136 written += 4;
4137 }
4138 pos += 4;
4139 }
4140
4141 closedir (dir);
4142 return written;
4143}
4144
4145/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4146 object type, using the /proc file system. */
4147static int
4148linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4149 unsigned const char *writebuf,
4150 CORE_ADDR offset, int len)
4151{
4152 long pid = lwpid_of (get_thread_lwp (current_inferior));
4153 char buf[128];
4154 int fd = 0;
4155 int ret = 0;
4156
4157 if (!writebuf && !readbuf)
4158 return -1;
4159
4160 if (!*annex)
4161 {
4162 if (!readbuf)
4163 return -1;
4164 else
4165 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4166 }
4167
4168 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4169 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4170 if (fd <= 0)
4171 return -1;
4172
4173 if (offset != 0
4174 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4175 {
4176 close (fd);
4177 return 0;
4178 }
4179
4180 if (writebuf)
4181 ret = write (fd, writebuf, (size_t) len);
4182 else
4183 ret = read (fd, readbuf, (size_t) len);
4184
4185 close (fd);
4186 return ret;
4187}
4188
dc146f7c
VP
4189static int
4190linux_core_of_thread (ptid_t ptid)
4191{
4192 char filename[sizeof ("/proc//task//stat")
4193 + 2 * 20 /* decimal digits for 2 numbers, max 2^64 bit each */
4194 + 1];
4195 FILE *f;
4196 char *content = NULL;
4197 char *p;
4198 char *ts = 0;
4199 int content_read = 0;
4200 int i;
4201 int core;
4202
4203 sprintf (filename, "/proc/%d/task/%ld/stat",
4204 ptid_get_pid (ptid), ptid_get_lwp (ptid));
4205 f = fopen (filename, "r");
4206 if (!f)
4207 return -1;
4208
4209 for (;;)
4210 {
4211 int n;
4212 content = realloc (content, content_read + 1024);
4213 n = fread (content + content_read, 1, 1024, f);
4214 content_read += n;
4215 if (n < 1024)
4216 {
4217 content[content_read] = '\0';
4218 break;
4219 }
4220 }
4221
4222 p = strchr (content, '(');
4223 p = strchr (p, ')') + 2; /* skip ")" and a whitespace. */
4224
4225 p = strtok_r (p, " ", &ts);
4226 for (i = 0; i != 36; ++i)
4227 p = strtok_r (NULL, " ", &ts);
4228
4229 if (sscanf (p, "%d", &core) == 0)
4230 core = -1;
4231
4232 free (content);
4233 fclose (f);
4234
4235 return core;
4236}
4237
1570b33e
L
4238static void
4239linux_process_qsupported (const char *query)
4240{
4241 if (the_low_target.process_qsupported != NULL)
4242 the_low_target.process_qsupported (query);
4243}
4244
219f2f23
PA
4245static int
4246linux_supports_tracepoints (void)
4247{
4248 if (*the_low_target.supports_tracepoints == NULL)
4249 return 0;
4250
4251 return (*the_low_target.supports_tracepoints) ();
4252}
4253
4254static CORE_ADDR
4255linux_read_pc (struct regcache *regcache)
4256{
4257 if (the_low_target.get_pc == NULL)
4258 return 0;
4259
4260 return (*the_low_target.get_pc) (regcache);
4261}
4262
4263static void
4264linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
4265{
4266 gdb_assert (the_low_target.set_pc != NULL);
4267
4268 (*the_low_target.set_pc) (regcache, pc);
4269}
4270
8336d594
PA
4271static int
4272linux_thread_stopped (struct thread_info *thread)
4273{
4274 return get_thread_lwp (thread)->stopped;
4275}
4276
4277/* This exposes stop-all-threads functionality to other modules. */
4278
4279static void
4280linux_pause_all (void)
4281{
4282 stop_all_lwps ();
4283}
4284
ce3a066d
DJ
4285static struct target_ops linux_target_ops = {
4286 linux_create_inferior,
4287 linux_attach,
4288 linux_kill,
6ad8ae5c 4289 linux_detach,
8336d594 4290 linux_mourn,
444d6139 4291 linux_join,
ce3a066d
DJ
4292 linux_thread_alive,
4293 linux_resume,
4294 linux_wait,
4295 linux_fetch_registers,
4296 linux_store_registers,
4297 linux_read_memory,
4298 linux_write_memory,
2f2893d9 4299 linux_look_up_symbols,
ef57601b 4300 linux_request_interrupt,
aa691b87 4301 linux_read_auxv,
d993e290
PA
4302 linux_insert_point,
4303 linux_remove_point,
e013ee27
OF
4304 linux_stopped_by_watchpoint,
4305 linux_stopped_data_address,
42c81e2a 4306#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437 4307 linux_read_offsets,
dae5f5cf
DJ
4308#else
4309 NULL,
4310#endif
4311#ifdef USE_THREAD_DB
4312 thread_db_get_tls_address,
4313#else
4314 NULL,
52fb6437 4315#endif
efcbbd14 4316 linux_qxfer_spu,
59a016f0 4317 hostio_last_error_from_errno,
07e059b5 4318 linux_qxfer_osdata,
4aa995e1 4319 linux_xfer_siginfo,
bd99dc85
PA
4320 linux_supports_non_stop,
4321 linux_async,
4322 linux_start_non_stop,
cdbfd419
PP
4323 linux_supports_multi_process,
4324#ifdef USE_THREAD_DB
dc146f7c 4325 thread_db_handle_monitor_command,
cdbfd419 4326#else
dc146f7c 4327 NULL,
cdbfd419 4328#endif
1570b33e 4329 linux_core_of_thread,
219f2f23
PA
4330 linux_process_qsupported,
4331 linux_supports_tracepoints,
4332 linux_read_pc,
8336d594
PA
4333 linux_write_pc,
4334 linux_thread_stopped,
4335 linux_pause_all
ce3a066d
DJ
4336};
4337
0d62e5e8
DJ
4338static void
4339linux_init_signals ()
4340{
4341 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
4342 to find what the cancel signal actually is. */
60c3d7b0 4343#ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 4344 signal (__SIGRTMIN+1, SIG_IGN);
60c3d7b0 4345#endif
0d62e5e8
DJ
4346}
4347
da6d8c04
DJ
4348void
4349initialize_low (void)
4350{
bd99dc85
PA
4351 struct sigaction sigchld_action;
4352 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 4353 set_target_ops (&linux_target_ops);
611cb4a5
DJ
4354 set_breakpoint_data (the_low_target.breakpoint,
4355 the_low_target.breakpoint_len);
0d62e5e8 4356 linux_init_signals ();
24a09b5f 4357 linux_test_for_tracefork ();
52fa2412
UW
4358#ifdef HAVE_LINUX_REGSETS
4359 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
4360 ;
bca929d3 4361 disabled_regsets = xmalloc (num_regsets);
52fa2412 4362#endif
bd99dc85
PA
4363
4364 sigchld_action.sa_handler = sigchld_handler;
4365 sigemptyset (&sigchld_action.sa_mask);
4366 sigchld_action.sa_flags = SA_RESTART;
4367 sigaction (SIGCHLD, &sigchld_action, NULL);
da6d8c04 4368}