]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/gdbserver/linux-low.c
* linux-low.c (linux_wait_for_event_1): Move passing the signal to
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
545587ee 2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
4c38e0a4 3 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
da6d8c04
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
19
20#include "server.h"
58caa3dc 21#include "linux-low.h"
da6d8c04 22
58caa3dc 23#include <sys/wait.h>
da6d8c04
DJ
24#include <stdio.h>
25#include <sys/param.h>
da6d8c04 26#include <sys/ptrace.h>
da6d8c04
DJ
27#include <signal.h>
28#include <sys/ioctl.h>
29#include <fcntl.h>
d07c63e7 30#include <string.h>
0a30fbc4
DJ
31#include <stdlib.h>
32#include <unistd.h>
fa6a77dc 33#include <errno.h>
fd500816 34#include <sys/syscall.h>
f9387fc3 35#include <sched.h>
07e059b5
VP
36#include <ctype.h>
37#include <pwd.h>
38#include <sys/types.h>
39#include <dirent.h>
efcbbd14
UW
40#include <sys/stat.h>
41#include <sys/vfs.h>
1570b33e 42#include <sys/uio.h>
957f3f49
DE
43#ifndef ELFMAG0
44/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
45 then ELFMAG0 will have been defined. If it didn't get included by
46 gdb_proc_service.h then including it will likely introduce a duplicate
47 definition of elf_fpregset_t. */
48#include <elf.h>
49#endif
efcbbd14
UW
50
51#ifndef SPUFS_MAGIC
52#define SPUFS_MAGIC 0x23c9b64e
53#endif
da6d8c04 54
32ca6d61
DJ
55#ifndef PTRACE_GETSIGINFO
56# define PTRACE_GETSIGINFO 0x4202
57# define PTRACE_SETSIGINFO 0x4203
58#endif
59
fd462a61
DJ
60#ifndef O_LARGEFILE
61#define O_LARGEFILE 0
62#endif
63
24a09b5f
DJ
64/* If the system headers did not provide the constants, hard-code the normal
65 values. */
66#ifndef PTRACE_EVENT_FORK
67
68#define PTRACE_SETOPTIONS 0x4200
69#define PTRACE_GETEVENTMSG 0x4201
70
71/* options set using PTRACE_SETOPTIONS */
72#define PTRACE_O_TRACESYSGOOD 0x00000001
73#define PTRACE_O_TRACEFORK 0x00000002
74#define PTRACE_O_TRACEVFORK 0x00000004
75#define PTRACE_O_TRACECLONE 0x00000008
76#define PTRACE_O_TRACEEXEC 0x00000010
77#define PTRACE_O_TRACEVFORKDONE 0x00000020
78#define PTRACE_O_TRACEEXIT 0x00000040
79
80/* Wait extended result codes for the above trace options. */
81#define PTRACE_EVENT_FORK 1
82#define PTRACE_EVENT_VFORK 2
83#define PTRACE_EVENT_CLONE 3
84#define PTRACE_EVENT_EXEC 4
85#define PTRACE_EVENT_VFORK_DONE 5
86#define PTRACE_EVENT_EXIT 6
87
88#endif /* PTRACE_EVENT_FORK */
89
90/* We can't always assume that this flag is available, but all systems
91 with the ptrace event handlers also have __WALL, so it's safe to use
92 in some contexts. */
93#ifndef __WALL
94#define __WALL 0x40000000 /* Wait for any child. */
95#endif
96
ec8ebe72
DE
97#ifndef W_STOPCODE
98#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
99#endif
100
42c81e2a
DJ
101#ifdef __UCLIBC__
102#if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
103#define HAS_NOMMU
104#endif
105#endif
106
24a09b5f
DJ
107/* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
108 representation of the thread ID.
611cb4a5 109
54a0b537 110 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
95954743
PA
111 the same as the LWP ID.
112
113 ``all_processes'' is keyed by the "overall process ID", which
114 GNU/Linux calls tgid, "thread group ID". */
0d62e5e8 115
54a0b537 116struct inferior_list all_lwps;
0d62e5e8 117
24a09b5f
DJ
118/* A list of all unknown processes which receive stop signals. Some other
119 process will presumably claim each of these as forked children
120 momentarily. */
121
122struct inferior_list stopped_pids;
123
0d62e5e8
DJ
124/* FIXME this is a bit of a hack, and could be removed. */
125int stopping_threads;
126
127/* FIXME make into a target method? */
24a09b5f 128int using_threads = 1;
24a09b5f 129
95954743
PA
130/* This flag is true iff we've just created or attached to our first
131 inferior but it has not stopped yet. As soon as it does, we need
132 to call the low target's arch_setup callback. Doing this only on
133 the first inferior avoids reinializing the architecture on every
134 inferior, and avoids messing with the register caches of the
135 already running inferiors. NOTE: this assumes all inferiors under
136 control of gdbserver have the same architecture. */
d61ddec4
UW
137static int new_inferior;
138
2acc282a 139static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 140 int step, int signal, siginfo_t *info);
2bd7c093 141static void linux_resume (struct thread_resume *resume_info, size_t n);
54a0b537 142static void stop_all_lwps (void);
95954743 143static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
95954743 144static void *add_lwp (ptid_t ptid);
c35fafde 145static int linux_stopped_by_watchpoint (void);
95954743 146static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
dc146f7c 147static int linux_core_of_thread (ptid_t ptid);
d50171e4
PA
148static void proceed_all_lwps (void);
149static void unstop_all_lwps (struct lwp_info *except);
d50171e4
PA
150static int finish_step_over (struct lwp_info *lwp);
151static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
152static int kill_lwp (unsigned long lwpid, int signo);
1e7fc18c 153static void linux_enable_event_reporting (int pid);
d50171e4
PA
154
155/* True if the low target can hardware single-step. Such targets
156 don't need a BREAKPOINT_REINSERT_ADDR callback. */
157
158static int
159can_hardware_single_step (void)
160{
161 return (the_low_target.breakpoint_reinsert_addr == NULL);
162}
163
164/* True if the low target supports memory breakpoints. If so, we'll
165 have a GET_PC implementation. */
166
167static int
168supports_breakpoints (void)
169{
170 return (the_low_target.get_pc != NULL);
171}
0d62e5e8
DJ
172
173struct pending_signals
174{
175 int signal;
32ca6d61 176 siginfo_t info;
0d62e5e8
DJ
177 struct pending_signals *prev;
178};
611cb4a5 179
14ce3065
DE
180#define PTRACE_ARG3_TYPE void *
181#define PTRACE_ARG4_TYPE void *
c6ecbae5 182#define PTRACE_XFER_TYPE long
da6d8c04 183
58caa3dc 184#ifdef HAVE_LINUX_REGSETS
52fa2412
UW
185static char *disabled_regsets;
186static int num_regsets;
58caa3dc
DJ
187#endif
188
bd99dc85
PA
189/* The read/write ends of the pipe registered as waitable file in the
190 event loop. */
191static int linux_event_pipe[2] = { -1, -1 };
192
193/* True if we're currently in async mode. */
194#define target_is_async_p() (linux_event_pipe[0] != -1)
195
02fc4de7 196static void send_sigstop (struct lwp_info *lwp);
bd99dc85
PA
197static void wait_for_sigstop (struct inferior_list_entry *entry);
198
d0722149
DE
199/* Accepts an integer PID; Returns a string representing a file that
200 can be opened to get info for the child process.
201 Space for the result is malloc'd, caller must free. */
202
203char *
204linux_child_pid_to_exec_file (int pid)
205{
206 char *name1, *name2;
207
208 name1 = xmalloc (MAXPATHLEN);
209 name2 = xmalloc (MAXPATHLEN);
210 memset (name2, 0, MAXPATHLEN);
211
212 sprintf (name1, "/proc/%d/exe", pid);
213 if (readlink (name1, name2, MAXPATHLEN) > 0)
214 {
215 free (name1);
216 return name2;
217 }
218 else
219 {
220 free (name2);
221 return name1;
222 }
223}
224
225/* Return non-zero if HEADER is a 64-bit ELF file. */
226
227static int
957f3f49 228elf_64_header_p (const Elf64_Ehdr *header)
d0722149
DE
229{
230 return (header->e_ident[EI_MAG0] == ELFMAG0
231 && header->e_ident[EI_MAG1] == ELFMAG1
232 && header->e_ident[EI_MAG2] == ELFMAG2
233 && header->e_ident[EI_MAG3] == ELFMAG3
234 && header->e_ident[EI_CLASS] == ELFCLASS64);
235}
236
237/* Return non-zero if FILE is a 64-bit ELF file,
238 zero if the file is not a 64-bit ELF file,
239 and -1 if the file is not accessible or doesn't exist. */
240
241int
242elf_64_file_p (const char *file)
243{
957f3f49 244 Elf64_Ehdr header;
d0722149
DE
245 int fd;
246
247 fd = open (file, O_RDONLY);
248 if (fd < 0)
249 return -1;
250
251 if (read (fd, &header, sizeof (header)) != sizeof (header))
252 {
253 close (fd);
254 return 0;
255 }
256 close (fd);
257
258 return elf_64_header_p (&header);
259}
260
bd99dc85
PA
261static void
262delete_lwp (struct lwp_info *lwp)
263{
264 remove_thread (get_lwp_thread (lwp));
265 remove_inferior (&all_lwps, &lwp->head);
aa5ca48f 266 free (lwp->arch_private);
bd99dc85
PA
267 free (lwp);
268}
269
95954743
PA
270/* Add a process to the common process list, and set its private
271 data. */
272
273static struct process_info *
274linux_add_process (int pid, int attached)
275{
276 struct process_info *proc;
277
278 /* Is this the first process? If so, then set the arch. */
279 if (all_processes.head == NULL)
280 new_inferior = 1;
281
282 proc = add_process (pid, attached);
283 proc->private = xcalloc (1, sizeof (*proc->private));
284
aa5ca48f
DE
285 if (the_low_target.new_process != NULL)
286 proc->private->arch_private = the_low_target.new_process ();
287
95954743
PA
288 return proc;
289}
290
07d4f67e
DE
291/* Wrapper function for waitpid which handles EINTR, and emulates
292 __WALL for systems where that is not available. */
293
294static int
295my_waitpid (int pid, int *status, int flags)
296{
297 int ret, out_errno;
298
299 if (debug_threads)
300 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
301
302 if (flags & __WALL)
303 {
304 sigset_t block_mask, org_mask, wake_mask;
305 int wnohang;
306
307 wnohang = (flags & WNOHANG) != 0;
308 flags &= ~(__WALL | __WCLONE);
309 flags |= WNOHANG;
310
311 /* Block all signals while here. This avoids knowing about
312 LinuxThread's signals. */
313 sigfillset (&block_mask);
314 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
315
316 /* ... except during the sigsuspend below. */
317 sigemptyset (&wake_mask);
318
319 while (1)
320 {
321 /* Since all signals are blocked, there's no need to check
322 for EINTR here. */
323 ret = waitpid (pid, status, flags);
324 out_errno = errno;
325
326 if (ret == -1 && out_errno != ECHILD)
327 break;
328 else if (ret > 0)
329 break;
330
331 if (flags & __WCLONE)
332 {
333 /* We've tried both flavors now. If WNOHANG is set,
334 there's nothing else to do, just bail out. */
335 if (wnohang)
336 break;
337
338 if (debug_threads)
339 fprintf (stderr, "blocking\n");
340
341 /* Block waiting for signals. */
342 sigsuspend (&wake_mask);
343 }
344
345 flags ^= __WCLONE;
346 }
347
348 sigprocmask (SIG_SETMASK, &org_mask, NULL);
349 }
350 else
351 {
352 do
353 ret = waitpid (pid, status, flags);
354 while (ret == -1 && errno == EINTR);
355 out_errno = errno;
356 }
357
358 if (debug_threads)
359 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
360 pid, flags, status ? *status : -1, ret);
361
362 errno = out_errno;
363 return ret;
364}
365
bd99dc85
PA
366/* Handle a GNU/Linux extended wait response. If we see a clone
367 event, we need to add the new LWP to our list (and not report the
368 trap to higher layers). */
0d62e5e8 369
24a09b5f 370static void
54a0b537 371handle_extended_wait (struct lwp_info *event_child, int wstat)
24a09b5f
DJ
372{
373 int event = wstat >> 16;
54a0b537 374 struct lwp_info *new_lwp;
24a09b5f
DJ
375
376 if (event == PTRACE_EVENT_CLONE)
377 {
95954743 378 ptid_t ptid;
24a09b5f 379 unsigned long new_pid;
836acd6d 380 int ret, status = W_STOPCODE (SIGSTOP);
24a09b5f 381
bd99dc85 382 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
24a09b5f
DJ
383
384 /* If we haven't already seen the new PID stop, wait for it now. */
385 if (! pull_pid_from_list (&stopped_pids, new_pid))
386 {
387 /* The new child has a pending SIGSTOP. We can't affect it until it
388 hits the SIGSTOP, but we're already attached. */
389
97438e3f 390 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
391
392 if (ret == -1)
393 perror_with_name ("waiting for new child");
394 else if (ret != new_pid)
395 warning ("wait returned unexpected PID %d", ret);
da5898ce 396 else if (!WIFSTOPPED (status))
24a09b5f
DJ
397 warning ("wait returned unexpected status 0x%x", status);
398 }
399
1e7fc18c 400 linux_enable_event_reporting (new_pid);
24a09b5f 401
95954743
PA
402 ptid = ptid_build (pid_of (event_child), new_pid, 0);
403 new_lwp = (struct lwp_info *) add_lwp (ptid);
404 add_thread (ptid, new_lwp);
24a09b5f 405
e27d73f6
DE
406 /* Either we're going to immediately resume the new thread
407 or leave it stopped. linux_resume_one_lwp is a nop if it
408 thinks the thread is currently running, so set this first
409 before calling linux_resume_one_lwp. */
410 new_lwp->stopped = 1;
411
da5898ce
DJ
412 /* Normally we will get the pending SIGSTOP. But in some cases
413 we might get another signal delivered to the group first.
f21cc1a2 414 If we do get another signal, be sure not to lose it. */
da5898ce
DJ
415 if (WSTOPSIG (status) == SIGSTOP)
416 {
d50171e4
PA
417 if (stopping_threads)
418 new_lwp->stop_pc = get_stop_pc (new_lwp);
419 else
e27d73f6 420 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
da5898ce 421 }
24a09b5f 422 else
da5898ce 423 {
54a0b537 424 new_lwp->stop_expected = 1;
d50171e4 425
da5898ce
DJ
426 if (stopping_threads)
427 {
d50171e4 428 new_lwp->stop_pc = get_stop_pc (new_lwp);
54a0b537
PA
429 new_lwp->status_pending_p = 1;
430 new_lwp->status_pending = status;
da5898ce
DJ
431 }
432 else
433 /* Pass the signal on. This is what GDB does - except
434 shouldn't we really report it instead? */
e27d73f6 435 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
da5898ce 436 }
24a09b5f
DJ
437
438 /* Always resume the current thread. If we are stopping
439 threads, it will have a pending SIGSTOP; we may as well
440 collect it now. */
2acc282a 441 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
24a09b5f
DJ
442 }
443}
444
d50171e4
PA
445/* Return the PC as read from the regcache of LWP, without any
446 adjustment. */
447
448static CORE_ADDR
449get_pc (struct lwp_info *lwp)
450{
451 struct thread_info *saved_inferior;
452 struct regcache *regcache;
453 CORE_ADDR pc;
454
455 if (the_low_target.get_pc == NULL)
456 return 0;
457
458 saved_inferior = current_inferior;
459 current_inferior = get_lwp_thread (lwp);
460
461 regcache = get_thread_regcache (current_inferior, 1);
462 pc = (*the_low_target.get_pc) (regcache);
463
464 if (debug_threads)
465 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
466
467 current_inferior = saved_inferior;
468 return pc;
469}
470
471/* This function should only be called if LWP got a SIGTRAP.
0d62e5e8
DJ
472 The SIGTRAP could mean several things.
473
474 On i386, where decr_pc_after_break is non-zero:
475 If we were single-stepping this process using PTRACE_SINGLESTEP,
476 we will get only the one SIGTRAP (even if the instruction we
477 stepped over was a breakpoint). The value of $eip will be the
478 next instruction.
479 If we continue the process using PTRACE_CONT, we will get a
480 SIGTRAP when we hit a breakpoint. The value of $eip will be
481 the instruction after the breakpoint (i.e. needs to be
482 decremented). If we report the SIGTRAP to GDB, we must also
483 report the undecremented PC. If we cancel the SIGTRAP, we
484 must resume at the decremented PC.
485
486 (Presumably, not yet tested) On a non-decr_pc_after_break machine
487 with hardware or kernel single-step:
488 If we single-step over a breakpoint instruction, our PC will
489 point at the following instruction. If we continue and hit a
490 breakpoint instruction, our PC will point at the breakpoint
491 instruction. */
492
493static CORE_ADDR
d50171e4 494get_stop_pc (struct lwp_info *lwp)
0d62e5e8 495{
d50171e4
PA
496 CORE_ADDR stop_pc;
497
498 if (the_low_target.get_pc == NULL)
499 return 0;
0d62e5e8 500
d50171e4
PA
501 stop_pc = get_pc (lwp);
502
bdabb078
PA
503 if (WSTOPSIG (lwp->last_status) == SIGTRAP
504 && !lwp->stepping
505 && !lwp->stopped_by_watchpoint
506 && lwp->last_status >> 16 == 0)
47c0c975
DE
507 stop_pc -= the_low_target.decr_pc_after_break;
508
509 if (debug_threads)
510 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
511
512 return stop_pc;
0d62e5e8 513}
ce3a066d 514
0d62e5e8 515static void *
95954743 516add_lwp (ptid_t ptid)
611cb4a5 517{
54a0b537 518 struct lwp_info *lwp;
0d62e5e8 519
54a0b537
PA
520 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
521 memset (lwp, 0, sizeof (*lwp));
0d62e5e8 522
95954743 523 lwp->head.id = ptid;
0d62e5e8 524
aa5ca48f
DE
525 if (the_low_target.new_thread != NULL)
526 lwp->arch_private = the_low_target.new_thread ();
527
54a0b537 528 add_inferior_to_list (&all_lwps, &lwp->head);
0d62e5e8 529
54a0b537 530 return lwp;
0d62e5e8 531}
611cb4a5 532
da6d8c04
DJ
533/* Start an inferior process and returns its pid.
534 ALLARGS is a vector of program-name and args. */
535
ce3a066d
DJ
536static int
537linux_create_inferior (char *program, char **allargs)
da6d8c04 538{
a6dbe5df 539 struct lwp_info *new_lwp;
da6d8c04 540 int pid;
95954743 541 ptid_t ptid;
da6d8c04 542
42c81e2a 543#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
544 pid = vfork ();
545#else
da6d8c04 546 pid = fork ();
52fb6437 547#endif
da6d8c04
DJ
548 if (pid < 0)
549 perror_with_name ("fork");
550
551 if (pid == 0)
552 {
553 ptrace (PTRACE_TRACEME, 0, 0, 0);
554
60c3d7b0 555#ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 556 signal (__SIGRTMIN + 1, SIG_DFL);
60c3d7b0 557#endif
0d62e5e8 558
a9fa9f7d
DJ
559 setpgid (0, 0);
560
2b876972
DJ
561 execv (program, allargs);
562 if (errno == ENOENT)
563 execvp (program, allargs);
da6d8c04
DJ
564
565 fprintf (stderr, "Cannot exec %s: %s.\n", program,
d07c63e7 566 strerror (errno));
da6d8c04
DJ
567 fflush (stderr);
568 _exit (0177);
569 }
570
95954743
PA
571 linux_add_process (pid, 0);
572
573 ptid = ptid_build (pid, pid, 0);
574 new_lwp = add_lwp (ptid);
575 add_thread (ptid, new_lwp);
a6dbe5df 576 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 577
a9fa9f7d 578 return pid;
da6d8c04
DJ
579}
580
581/* Attach to an inferior process. */
582
95954743
PA
583static void
584linux_attach_lwp_1 (unsigned long lwpid, int initial)
da6d8c04 585{
95954743 586 ptid_t ptid;
54a0b537 587 struct lwp_info *new_lwp;
611cb4a5 588
95954743 589 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
da6d8c04 590 {
95954743 591 if (!initial)
2d717e4f
DJ
592 {
593 /* If we fail to attach to an LWP, just warn. */
95954743 594 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
2d717e4f
DJ
595 strerror (errno), errno);
596 fflush (stderr);
597 return;
598 }
599 else
600 /* If we fail to attach to a process, report an error. */
95954743 601 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
43d5792c 602 strerror (errno), errno);
da6d8c04
DJ
603 }
604
95954743
PA
605 if (initial)
606 /* NOTE/FIXME: This lwp might have not been the tgid. */
607 ptid = ptid_build (lwpid, lwpid, 0);
608 else
609 {
610 /* Note that extracting the pid from the current inferior is
611 safe, since we're always called in the context of the same
612 process as this new thread. */
613 int pid = pid_of (get_thread_lwp (current_inferior));
614 ptid = ptid_build (pid, lwpid, 0);
615 }
24a09b5f 616
95954743
PA
617 new_lwp = (struct lwp_info *) add_lwp (ptid);
618 add_thread (ptid, new_lwp);
0d62e5e8 619
a6dbe5df
PA
620 /* We need to wait for SIGSTOP before being able to make the next
621 ptrace call on this LWP. */
622 new_lwp->must_set_ptrace_flags = 1;
623
0d62e5e8 624 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
625 brings it to a halt.
626
627 There are several cases to consider here:
628
629 1) gdbserver has already attached to the process and is being notified
1b3f6016 630 of a new thread that is being created.
d50171e4
PA
631 In this case we should ignore that SIGSTOP and resume the
632 process. This is handled below by setting stop_expected = 1,
8336d594 633 and the fact that add_thread sets last_resume_kind ==
d50171e4 634 resume_continue.
0e21c1ec
DE
635
636 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
637 to it via attach_inferior.
638 In this case we want the process thread to stop.
d50171e4
PA
639 This is handled by having linux_attach set last_resume_kind ==
640 resume_stop after we return.
1b3f6016
PA
641 ??? If the process already has several threads we leave the other
642 threads running.
0e21c1ec
DE
643
644 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
645 existing threads.
646 In this case we want the thread to stop.
647 FIXME: This case is currently not properly handled.
648 We should wait for the SIGSTOP but don't. Things work apparently
649 because enough time passes between when we ptrace (ATTACH) and when
650 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
651
652 On the other hand, if we are currently trying to stop all threads, we
653 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 654 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
655 end of the list, and so the new thread has not yet reached
656 wait_for_sigstop (but will). */
d50171e4 657 new_lwp->stop_expected = 1;
0d62e5e8
DJ
658}
659
95954743
PA
660void
661linux_attach_lwp (unsigned long lwpid)
662{
663 linux_attach_lwp_1 (lwpid, 0);
664}
665
0d62e5e8 666int
a1928bad 667linux_attach (unsigned long pid)
0d62e5e8 668{
95954743 669 linux_attach_lwp_1 (pid, 1);
95954743 670 linux_add_process (pid, 1);
0d62e5e8 671
bd99dc85
PA
672 if (!non_stop)
673 {
8336d594
PA
674 struct thread_info *thread;
675
676 /* Don't ignore the initial SIGSTOP if we just attached to this
677 process. It will be collected by wait shortly. */
678 thread = find_thread_ptid (ptid_build (pid, pid, 0));
679 thread->last_resume_kind = resume_stop;
bd99dc85 680 }
0d62e5e8 681
95954743
PA
682 return 0;
683}
684
685struct counter
686{
687 int pid;
688 int count;
689};
690
691static int
692second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
693{
694 struct counter *counter = args;
695
696 if (ptid_get_pid (entry->id) == counter->pid)
697 {
698 if (++counter->count > 1)
699 return 1;
700 }
d61ddec4 701
da6d8c04
DJ
702 return 0;
703}
704
95954743
PA
705static int
706last_thread_of_process_p (struct thread_info *thread)
707{
708 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
709 int pid = ptid_get_pid (ptid);
710 struct counter counter = { pid , 0 };
da6d8c04 711
95954743
PA
712 return (find_inferior (&all_threads,
713 second_thread_of_pid_p, &counter) == NULL);
714}
715
716/* Kill the inferior lwp. */
717
718static int
719linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
da6d8c04 720{
0d62e5e8 721 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 722 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 723 int wstat;
95954743
PA
724 int pid = * (int *) args;
725
726 if (ptid_get_pid (entry->id) != pid)
727 return 0;
0d62e5e8 728
fd500816
DJ
729 /* We avoid killing the first thread here, because of a Linux kernel (at
730 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
731 the children get a chance to be reaped, it will remain a zombie
732 forever. */
95954743 733
12b42a12 734 if (lwpid_of (lwp) == pid)
95954743
PA
735 {
736 if (debug_threads)
737 fprintf (stderr, "lkop: is last of process %s\n",
738 target_pid_to_str (entry->id));
739 return 0;
740 }
fd500816 741
0d62e5e8
DJ
742 do
743 {
bd99dc85 744 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
0d62e5e8
DJ
745
746 /* Make sure it died. The loop is most likely unnecessary. */
95954743 747 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
bd99dc85 748 } while (pid > 0 && WIFSTOPPED (wstat));
95954743
PA
749
750 return 0;
da6d8c04
DJ
751}
752
95954743
PA
753static int
754linux_kill (int pid)
0d62e5e8 755{
95954743 756 struct process_info *process;
54a0b537 757 struct lwp_info *lwp;
95954743 758 struct thread_info *thread;
fd500816 759 int wstat;
95954743 760 int lwpid;
fd500816 761
95954743
PA
762 process = find_process_pid (pid);
763 if (process == NULL)
764 return -1;
9d606399 765
f9e39928
PA
766 /* If we're killing a running inferior, make sure it is stopped
767 first, as PTRACE_KILL will not work otherwise. */
768 stop_all_lwps ();
769
95954743 770 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
fd500816 771
54a0b537 772 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 773 thread in the list, so do so now. */
95954743
PA
774 lwp = find_lwp_pid (pid_to_ptid (pid));
775 thread = get_lwp_thread (lwp);
bd99dc85
PA
776
777 if (debug_threads)
95954743
PA
778 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
779 lwpid_of (lwp), pid);
bd99dc85 780
fd500816
DJ
781 do
782 {
bd99dc85 783 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
fd500816
DJ
784
785 /* Make sure it died. The loop is most likely unnecessary. */
95954743
PA
786 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
787 } while (lwpid > 0 && WIFSTOPPED (wstat));
2d717e4f 788
8336d594 789 the_target->mourn (process);
f9e39928
PA
790
791 /* Since we presently can only stop all lwps of all processes, we
792 need to unstop lwps of other processes. */
793 unstop_all_lwps (NULL);
95954743 794 return 0;
0d62e5e8
DJ
795}
796
95954743
PA
797static int
798linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
6ad8ae5c
DJ
799{
800 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 801 struct lwp_info *lwp = get_thread_lwp (thread);
95954743
PA
802 int pid = * (int *) args;
803
804 if (ptid_get_pid (entry->id) != pid)
805 return 0;
6ad8ae5c 806
ae13219e
DJ
807 /* If this process is stopped but is expecting a SIGSTOP, then make
808 sure we take care of that now. This isn't absolutely guaranteed
809 to collect the SIGSTOP, but is fairly likely to. */
54a0b537 810 if (lwp->stop_expected)
ae13219e 811 {
bd99dc85 812 int wstat;
ae13219e 813 /* Clear stop_expected, so that the SIGSTOP will be reported. */
54a0b537 814 lwp->stop_expected = 0;
f9e39928 815 linux_resume_one_lwp (lwp, 0, 0, NULL);
95954743 816 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
ae13219e
DJ
817 }
818
819 /* Flush any pending changes to the process's registers. */
820 regcache_invalidate_one ((struct inferior_list_entry *)
54a0b537 821 get_lwp_thread (lwp));
ae13219e
DJ
822
823 /* Finally, let it resume. */
bd99dc85
PA
824 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
825
826 delete_lwp (lwp);
95954743 827 return 0;
6ad8ae5c
DJ
828}
829
95954743
PA
830static int
831linux_detach (int pid)
832{
833 struct process_info *process;
834
835 process = find_process_pid (pid);
836 if (process == NULL)
837 return -1;
838
f9e39928
PA
839 /* Stop all threads before detaching. First, ptrace requires that
840 the thread is stopped to sucessfully detach. Second, thread_db
841 may need to uninstall thread event breakpoints from memory, which
842 only works with a stopped process anyway. */
843 stop_all_lwps ();
844
ca5c370d 845#ifdef USE_THREAD_DB
8336d594 846 thread_db_detach (process);
ca5c370d
PA
847#endif
848
95954743 849 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
8336d594
PA
850
851 the_target->mourn (process);
f9e39928
PA
852
853 /* Since we presently can only stop all lwps of all processes, we
854 need to unstop lwps of other processes. */
855 unstop_all_lwps (NULL);
856 return 0;
857}
858
859/* Remove all LWPs that belong to process PROC from the lwp list. */
860
861static int
862delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
863{
864 struct lwp_info *lwp = (struct lwp_info *) entry;
865 struct process_info *process = proc;
866
867 if (pid_of (lwp) == pid_of (process))
868 delete_lwp (lwp);
869
dd6953e1 870 return 0;
6ad8ae5c
DJ
871}
872
8336d594
PA
873static void
874linux_mourn (struct process_info *process)
875{
876 struct process_info_private *priv;
877
878#ifdef USE_THREAD_DB
879 thread_db_mourn (process);
880#endif
881
f9e39928
PA
882 find_inferior (&all_lwps, delete_lwp_callback, process);
883
8336d594
PA
884 /* Freeing all private data. */
885 priv = process->private;
886 free (priv->arch_private);
887 free (priv);
888 process->private = NULL;
505106cd
PA
889
890 remove_process (process);
8336d594
PA
891}
892
444d6139 893static void
95954743 894linux_join (int pid)
444d6139 895{
444d6139 896 int status, ret;
95954743 897 struct process_info *process;
bd99dc85 898
95954743
PA
899 process = find_process_pid (pid);
900 if (process == NULL)
901 return;
444d6139
PA
902
903 do {
95954743 904 ret = my_waitpid (pid, &status, 0);
444d6139
PA
905 if (WIFEXITED (status) || WIFSIGNALED (status))
906 break;
907 } while (ret != -1 || errno != ECHILD);
908}
909
6ad8ae5c 910/* Return nonzero if the given thread is still alive. */
0d62e5e8 911static int
95954743 912linux_thread_alive (ptid_t ptid)
0d62e5e8 913{
95954743
PA
914 struct lwp_info *lwp = find_lwp_pid (ptid);
915
916 /* We assume we always know if a thread exits. If a whole process
917 exited but we still haven't been able to report it to GDB, we'll
918 hold on to the last lwp of the dead process. */
919 if (lwp != NULL)
920 return !lwp->dead;
0d62e5e8
DJ
921 else
922 return 0;
923}
924
6bf5e0ba 925/* Return 1 if this lwp has an interesting status pending. */
611cb4a5 926static int
d50171e4 927status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
0d62e5e8 928{
54a0b537 929 struct lwp_info *lwp = (struct lwp_info *) entry;
95954743 930 ptid_t ptid = * (ptid_t *) arg;
d50171e4 931 struct thread_info *thread = get_lwp_thread (lwp);
95954743
PA
932
933 /* Check if we're only interested in events from a specific process
934 or its lwps. */
935 if (!ptid_equal (minus_one_ptid, ptid)
936 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
937 return 0;
0d62e5e8 938
d50171e4
PA
939 thread = get_lwp_thread (lwp);
940
941 /* If we got a `vCont;t', but we haven't reported a stop yet, do
942 report any status pending the LWP may have. */
8336d594 943 if (thread->last_resume_kind == resume_stop
d50171e4
PA
944 && thread->last_status.kind == TARGET_WAITKIND_STOPPED)
945 return 0;
0d62e5e8 946
d50171e4 947 return lwp->status_pending_p;
0d62e5e8
DJ
948}
949
95954743
PA
950static int
951same_lwp (struct inferior_list_entry *entry, void *data)
952{
953 ptid_t ptid = *(ptid_t *) data;
954 int lwp;
955
956 if (ptid_get_lwp (ptid) != 0)
957 lwp = ptid_get_lwp (ptid);
958 else
959 lwp = ptid_get_pid (ptid);
960
961 if (ptid_get_lwp (entry->id) == lwp)
962 return 1;
963
964 return 0;
965}
966
967struct lwp_info *
968find_lwp_pid (ptid_t ptid)
969{
970 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
971}
972
bd99dc85 973static struct lwp_info *
95954743 974linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
611cb4a5 975{
0d62e5e8 976 int ret;
95954743 977 int to_wait_for = -1;
bd99dc85 978 struct lwp_info *child = NULL;
0d62e5e8 979
bd99dc85 980 if (debug_threads)
95954743
PA
981 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
982
983 if (ptid_equal (ptid, minus_one_ptid))
984 to_wait_for = -1; /* any child */
985 else
986 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
0d62e5e8 987
bd99dc85 988 options |= __WALL;
0d62e5e8 989
bd99dc85 990retry:
0d62e5e8 991
bd99dc85
PA
992 ret = my_waitpid (to_wait_for, wstatp, options);
993 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
994 return NULL;
995 else if (ret == -1)
996 perror_with_name ("waitpid");
0d62e5e8
DJ
997
998 if (debug_threads
999 && (!WIFSTOPPED (*wstatp)
1000 || (WSTOPSIG (*wstatp) != 32
1001 && WSTOPSIG (*wstatp) != 33)))
1002 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1003
95954743 1004 child = find_lwp_pid (pid_to_ptid (ret));
0d62e5e8 1005
24a09b5f
DJ
1006 /* If we didn't find a process, one of two things presumably happened:
1007 - A process we started and then detached from has exited. Ignore it.
1008 - A process we are controlling has forked and the new child's stop
1009 was reported to us by the kernel. Save its PID. */
bd99dc85 1010 if (child == NULL && WIFSTOPPED (*wstatp))
24a09b5f
DJ
1011 {
1012 add_pid_to_list (&stopped_pids, ret);
1013 goto retry;
1014 }
bd99dc85 1015 else if (child == NULL)
24a09b5f
DJ
1016 goto retry;
1017
bd99dc85 1018 child->stopped = 1;
0d62e5e8 1019
bd99dc85 1020 child->last_status = *wstatp;
32ca6d61 1021
d61ddec4
UW
1022 /* Architecture-specific setup after inferior is running.
1023 This needs to happen after we have attached to the inferior
1024 and it is stopped for the first time, but before we access
1025 any inferior registers. */
1026 if (new_inferior)
1027 {
1028 the_low_target.arch_setup ();
52fa2412
UW
1029#ifdef HAVE_LINUX_REGSETS
1030 memset (disabled_regsets, 0, num_regsets);
1031#endif
d61ddec4
UW
1032 new_inferior = 0;
1033 }
1034
c3adc08c
PA
1035 /* Fetch the possibly triggered data watchpoint info and store it in
1036 CHILD.
1037
1038 On some archs, like x86, that use debug registers to set
1039 watchpoints, it's possible that the way to know which watched
1040 address trapped, is to check the register that is used to select
1041 which address to watch. Problem is, between setting the
1042 watchpoint and reading back which data address trapped, the user
1043 may change the set of watchpoints, and, as a consequence, GDB
1044 changes the debug registers in the inferior. To avoid reading
1045 back a stale stopped-data-address when that happens, we cache in
1046 LP the fact that a watchpoint trapped, and the corresponding data
1047 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1048 changes the debug registers meanwhile, we have the cached data we
1049 can rely on. */
1050
1051 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1052 {
1053 if (the_low_target.stopped_by_watchpoint == NULL)
1054 {
1055 child->stopped_by_watchpoint = 0;
1056 }
1057 else
1058 {
1059 struct thread_info *saved_inferior;
1060
1061 saved_inferior = current_inferior;
1062 current_inferior = get_lwp_thread (child);
1063
1064 child->stopped_by_watchpoint
1065 = the_low_target.stopped_by_watchpoint ();
1066
1067 if (child->stopped_by_watchpoint)
1068 {
1069 if (the_low_target.stopped_data_address != NULL)
1070 child->stopped_data_address
1071 = the_low_target.stopped_data_address ();
1072 else
1073 child->stopped_data_address = 0;
1074 }
1075
1076 current_inferior = saved_inferior;
1077 }
1078 }
1079
d50171e4
PA
1080 /* Store the STOP_PC, with adjustment applied. This depends on the
1081 architecture being defined already (so that CHILD has a valid
1082 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1083 not). */
1084 if (WIFSTOPPED (*wstatp))
1085 child->stop_pc = get_stop_pc (child);
1086
0d62e5e8 1087 if (debug_threads
47c0c975
DE
1088 && WIFSTOPPED (*wstatp)
1089 && the_low_target.get_pc != NULL)
0d62e5e8 1090 {
896c7fbb 1091 struct thread_info *saved_inferior = current_inferior;
bce522a2 1092 struct regcache *regcache;
47c0c975
DE
1093 CORE_ADDR pc;
1094
d50171e4 1095 current_inferior = get_lwp_thread (child);
bce522a2 1096 regcache = get_thread_regcache (current_inferior, 1);
442ea881 1097 pc = (*the_low_target.get_pc) (regcache);
47c0c975 1098 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
896c7fbb 1099 current_inferior = saved_inferior;
0d62e5e8 1100 }
bd99dc85
PA
1101
1102 return child;
0d62e5e8 1103}
611cb4a5 1104
219f2f23
PA
1105/* This function should only be called if the LWP got a SIGTRAP.
1106
1107 Handle any tracepoint steps or hits. Return true if a tracepoint
1108 event was handled, 0 otherwise. */
1109
1110static int
1111handle_tracepoints (struct lwp_info *lwp)
1112{
1113 struct thread_info *tinfo = get_lwp_thread (lwp);
1114 int tpoint_related_event = 0;
1115
1116 /* And we need to be sure that any all-threads-stopping doesn't try
1117 to move threads out of the jump pads, as it could deadlock the
1118 inferior (LWP could be in the jump pad, maybe even holding the
1119 lock.) */
1120
1121 /* Do any necessary step collect actions. */
1122 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1123
1124 /* See if we just hit a tracepoint and do its main collect
1125 actions. */
1126 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1127
1128 if (tpoint_related_event)
1129 {
1130 if (debug_threads)
1131 fprintf (stderr, "got a tracepoint event\n");
1132 return 1;
1133 }
1134
1135 return 0;
1136}
1137
d50171e4
PA
1138/* Arrange for a breakpoint to be hit again later. We don't keep the
1139 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1140 will handle the current event, eventually we will resume this LWP,
1141 and this breakpoint will trap again. */
1142
1143static int
1144cancel_breakpoint (struct lwp_info *lwp)
1145{
1146 struct thread_info *saved_inferior;
d50171e4
PA
1147
1148 /* There's nothing to do if we don't support breakpoints. */
1149 if (!supports_breakpoints ())
1150 return 0;
1151
d50171e4
PA
1152 /* breakpoint_at reads from current inferior. */
1153 saved_inferior = current_inferior;
1154 current_inferior = get_lwp_thread (lwp);
1155
1156 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1157 {
1158 if (debug_threads)
1159 fprintf (stderr,
1160 "CB: Push back breakpoint for %s\n",
fc7238bb 1161 target_pid_to_str (ptid_of (lwp)));
d50171e4
PA
1162
1163 /* Back up the PC if necessary. */
1164 if (the_low_target.decr_pc_after_break)
1165 {
1166 struct regcache *regcache
fc7238bb 1167 = get_thread_regcache (current_inferior, 1);
d50171e4
PA
1168 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1169 }
1170
1171 current_inferior = saved_inferior;
1172 return 1;
1173 }
1174 else
1175 {
1176 if (debug_threads)
1177 fprintf (stderr,
1178 "CB: No breakpoint found at %s for [%s]\n",
1179 paddress (lwp->stop_pc),
fc7238bb 1180 target_pid_to_str (ptid_of (lwp)));
d50171e4
PA
1181 }
1182
1183 current_inferior = saved_inferior;
1184 return 0;
1185}
1186
1187/* When the event-loop is doing a step-over, this points at the thread
1188 being stepped. */
1189ptid_t step_over_bkpt;
1190
bd99dc85
PA
1191/* Wait for an event from child PID. If PID is -1, wait for any
1192 child. Store the stop status through the status pointer WSTAT.
1193 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1194 event was found and OPTIONS contains WNOHANG. Return the PID of
1195 the stopped child otherwise. */
1196
0d62e5e8 1197static int
95954743 1198linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
0d62e5e8 1199{
d50171e4
PA
1200 struct lwp_info *event_child, *requested_child;
1201
d50171e4
PA
1202 event_child = NULL;
1203 requested_child = NULL;
0d62e5e8 1204
95954743 1205 /* Check for a lwp with a pending status. */
bd99dc85 1206
95954743
PA
1207 if (ptid_equal (ptid, minus_one_ptid)
1208 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
0d62e5e8 1209 {
54a0b537 1210 event_child = (struct lwp_info *)
d50171e4 1211 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
0d62e5e8 1212 if (debug_threads && event_child)
bd99dc85 1213 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
0d62e5e8
DJ
1214 }
1215 else
1216 {
95954743 1217 requested_child = find_lwp_pid (ptid);
d50171e4
PA
1218
1219 if (requested_child->status_pending_p)
bd99dc85 1220 event_child = requested_child;
0d62e5e8 1221 }
611cb4a5 1222
0d62e5e8
DJ
1223 if (event_child != NULL)
1224 {
bd99dc85
PA
1225 if (debug_threads)
1226 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1227 lwpid_of (event_child), event_child->status_pending);
1228 *wstat = event_child->status_pending;
1229 event_child->status_pending_p = 0;
1230 event_child->status_pending = 0;
1231 current_inferior = get_lwp_thread (event_child);
1232 return lwpid_of (event_child);
0d62e5e8
DJ
1233 }
1234
1235 /* We only enter this loop if no process has a pending wait status. Thus
1236 any action taken in response to a wait status inside this loop is
1237 responding as soon as we detect the status, not after any pending
1238 events. */
1239 while (1)
1240 {
6bf5e0ba 1241 event_child = linux_wait_for_lwp (ptid, wstat, options);
0d62e5e8 1242
bd99dc85 1243 if ((options & WNOHANG) && event_child == NULL)
d50171e4
PA
1244 {
1245 if (debug_threads)
1246 fprintf (stderr, "WNOHANG set, no event found\n");
1247 return 0;
1248 }
0d62e5e8
DJ
1249
1250 if (event_child == NULL)
1251 error ("event from unknown child");
611cb4a5 1252
bd99dc85 1253 current_inferior = get_lwp_thread (event_child);
0d62e5e8 1254
89be2091 1255 /* Check for thread exit. */
bd99dc85 1256 if (! WIFSTOPPED (*wstat))
0d62e5e8 1257 {
89be2091 1258 if (debug_threads)
95954743 1259 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
89be2091
DJ
1260
1261 /* If the last thread is exiting, just return. */
95954743 1262 if (last_thread_of_process_p (current_inferior))
bd99dc85
PA
1263 {
1264 if (debug_threads)
95954743
PA
1265 fprintf (stderr, "LWP %ld is last lwp of process\n",
1266 lwpid_of (event_child));
bd99dc85
PA
1267 return lwpid_of (event_child);
1268 }
89be2091 1269
bd99dc85
PA
1270 if (!non_stop)
1271 {
1272 current_inferior = (struct thread_info *) all_threads.head;
1273 if (debug_threads)
1274 fprintf (stderr, "Current inferior is now %ld\n",
1275 lwpid_of (get_thread_lwp (current_inferior)));
1276 }
1277 else
1278 {
1279 current_inferior = NULL;
1280 if (debug_threads)
1281 fprintf (stderr, "Current inferior is now <NULL>\n");
1282 }
89be2091
DJ
1283
1284 /* If we were waiting for this particular child to do something...
1285 well, it did something. */
bd99dc85 1286 if (requested_child != NULL)
d50171e4
PA
1287 {
1288 int lwpid = lwpid_of (event_child);
1289
1290 /* Cancel the step-over operation --- the thread that
1291 started it is gone. */
1292 if (finish_step_over (event_child))
1293 unstop_all_lwps (event_child);
1294 delete_lwp (event_child);
1295 return lwpid;
1296 }
1297
1298 delete_lwp (event_child);
89be2091
DJ
1299
1300 /* Wait for a more interesting event. */
1301 continue;
1302 }
1303
a6dbe5df
PA
1304 if (event_child->must_set_ptrace_flags)
1305 {
1e7fc18c 1306 linux_enable_event_reporting (lwpid_of (event_child));
a6dbe5df
PA
1307 event_child->must_set_ptrace_flags = 0;
1308 }
1309
bd99dc85
PA
1310 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1311 && *wstat >> 16 != 0)
24a09b5f 1312 {
bd99dc85 1313 handle_extended_wait (event_child, *wstat);
24a09b5f
DJ
1314 continue;
1315 }
1316
d50171e4
PA
1317 if (WIFSTOPPED (*wstat)
1318 && WSTOPSIG (*wstat) == SIGSTOP
1319 && event_child->stop_expected)
1320 {
1321 int should_stop;
1322
1323 if (debug_threads)
1324 fprintf (stderr, "Expected stop.\n");
1325 event_child->stop_expected = 0;
1326
8336d594 1327 should_stop = (current_inferior->last_resume_kind == resume_stop
d50171e4
PA
1328 || stopping_threads);
1329
1330 if (!should_stop)
1331 {
1332 linux_resume_one_lwp (event_child,
1333 event_child->stepping, 0, NULL);
1334 continue;
1335 }
1336 }
1337
bd99dc85 1338 return lwpid_of (event_child);
611cb4a5 1339 }
0d62e5e8 1340
611cb4a5
DJ
1341 /* NOTREACHED */
1342 return 0;
1343}
1344
95954743
PA
1345static int
1346linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1347{
1348 ptid_t wait_ptid;
1349
1350 if (ptid_is_pid (ptid))
1351 {
1352 /* A request to wait for a specific tgid. This is not possible
1353 with waitpid, so instead, we wait for any child, and leave
1354 children we're not interested in right now with a pending
1355 status to report later. */
1356 wait_ptid = minus_one_ptid;
1357 }
1358 else
1359 wait_ptid = ptid;
1360
1361 while (1)
1362 {
1363 int event_pid;
1364
1365 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1366
1367 if (event_pid > 0
1368 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1369 {
1370 struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1371
1372 if (! WIFSTOPPED (*wstat))
1373 mark_lwp_dead (event_child, *wstat);
1374 else
1375 {
1376 event_child->status_pending_p = 1;
1377 event_child->status_pending = *wstat;
1378 }
1379 }
1380 else
1381 return event_pid;
1382 }
1383}
1384
6bf5e0ba
PA
1385
1386/* Count the LWP's that have had events. */
1387
1388static int
1389count_events_callback (struct inferior_list_entry *entry, void *data)
1390{
1391 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1392 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1393 int *count = data;
1394
1395 gdb_assert (count != NULL);
1396
1397 /* Count only resumed LWPs that have a SIGTRAP event pending that
1398 should be reported to GDB. */
8336d594
PA
1399 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1400 && thread->last_resume_kind != resume_stop
6bf5e0ba
PA
1401 && lp->status_pending_p
1402 && WIFSTOPPED (lp->status_pending)
1403 && WSTOPSIG (lp->status_pending) == SIGTRAP
1404 && !breakpoint_inserted_here (lp->stop_pc))
1405 (*count)++;
1406
1407 return 0;
1408}
1409
1410/* Select the LWP (if any) that is currently being single-stepped. */
1411
1412static int
1413select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1414{
1415 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1416 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba 1417
8336d594
PA
1418 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1419 && thread->last_resume_kind == resume_step
6bf5e0ba
PA
1420 && lp->status_pending_p)
1421 return 1;
1422 else
1423 return 0;
1424}
1425
1426/* Select the Nth LWP that has had a SIGTRAP event that should be
1427 reported to GDB. */
1428
1429static int
1430select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1431{
1432 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1433 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1434 int *selector = data;
1435
1436 gdb_assert (selector != NULL);
1437
1438 /* Select only resumed LWPs that have a SIGTRAP event pending. */
8336d594
PA
1439 if (thread->last_resume_kind != resume_stop
1440 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
6bf5e0ba
PA
1441 && lp->status_pending_p
1442 && WIFSTOPPED (lp->status_pending)
1443 && WSTOPSIG (lp->status_pending) == SIGTRAP
1444 && !breakpoint_inserted_here (lp->stop_pc))
1445 if ((*selector)-- == 0)
1446 return 1;
1447
1448 return 0;
1449}
1450
1451static int
1452cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1453{
1454 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1455 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1456 struct lwp_info *event_lp = data;
1457
1458 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1459 if (lp == event_lp)
1460 return 0;
1461
1462 /* If a LWP other than the LWP that we're reporting an event for has
1463 hit a GDB breakpoint (as opposed to some random trap signal),
1464 then just arrange for it to hit it again later. We don't keep
1465 the SIGTRAP status and don't forward the SIGTRAP signal to the
1466 LWP. We will handle the current event, eventually we will resume
1467 all LWPs, and this one will get its breakpoint trap again.
1468
1469 If we do not do this, then we run the risk that the user will
1470 delete or disable the breakpoint, but the LWP will have already
1471 tripped on it. */
1472
8336d594
PA
1473 if (thread->last_resume_kind != resume_stop
1474 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
6bf5e0ba
PA
1475 && lp->status_pending_p
1476 && WIFSTOPPED (lp->status_pending)
1477 && WSTOPSIG (lp->status_pending) == SIGTRAP
bdabb078
PA
1478 && !lp->stepping
1479 && !lp->stopped_by_watchpoint
6bf5e0ba
PA
1480 && cancel_breakpoint (lp))
1481 /* Throw away the SIGTRAP. */
1482 lp->status_pending_p = 0;
1483
1484 return 0;
1485}
1486
1487/* Select one LWP out of those that have events pending. */
1488
1489static void
1490select_event_lwp (struct lwp_info **orig_lp)
1491{
1492 int num_events = 0;
1493 int random_selector;
1494 struct lwp_info *event_lp;
1495
1496 /* Give preference to any LWP that is being single-stepped. */
1497 event_lp
1498 = (struct lwp_info *) find_inferior (&all_lwps,
1499 select_singlestep_lwp_callback, NULL);
1500 if (event_lp != NULL)
1501 {
1502 if (debug_threads)
1503 fprintf (stderr,
1504 "SEL: Select single-step %s\n",
1505 target_pid_to_str (ptid_of (event_lp)));
1506 }
1507 else
1508 {
1509 /* No single-stepping LWP. Select one at random, out of those
1510 which have had SIGTRAP events. */
1511
1512 /* First see how many SIGTRAP events we have. */
1513 find_inferior (&all_lwps, count_events_callback, &num_events);
1514
1515 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1516 random_selector = (int)
1517 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1518
1519 if (debug_threads && num_events > 1)
1520 fprintf (stderr,
1521 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1522 num_events, random_selector);
1523
1524 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1525 select_event_lwp_callback,
1526 &random_selector);
1527 }
1528
1529 if (event_lp != NULL)
1530 {
1531 /* Switch the event LWP. */
1532 *orig_lp = event_lp;
1533 }
1534}
1535
d50171e4
PA
1536/* Set this inferior LWP's state as "want-stopped". We won't resume
1537 this LWP until the client gives us another action for it. */
1538
1539static void
1540gdb_wants_lwp_stopped (struct inferior_list_entry *entry)
1541{
1542 struct lwp_info *lwp = (struct lwp_info *) entry;
1543 struct thread_info *thread = get_lwp_thread (lwp);
1544
1545 /* Most threads are stopped implicitly (all-stop); tag that with
1546 signal 0. The thread being explicitly reported stopped to the
1547 client, gets it's status fixed up afterwards. */
1548 thread->last_status.kind = TARGET_WAITKIND_STOPPED;
1549 thread->last_status.value.sig = TARGET_SIGNAL_0;
1550
8336d594 1551 thread->last_resume_kind = resume_stop;
d50171e4
PA
1552}
1553
1554/* Set all LWP's states as "want-stopped". */
1555
1556static void
1557gdb_wants_all_stopped (void)
1558{
1559 for_each_inferior (&all_lwps, gdb_wants_lwp_stopped);
1560}
1561
0d62e5e8 1562/* Wait for process, returns status. */
da6d8c04 1563
95954743
PA
1564static ptid_t
1565linux_wait_1 (ptid_t ptid,
1566 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 1567{
e5f1222d 1568 int w;
fc7238bb 1569 struct lwp_info *event_child;
bd99dc85 1570 int options;
bd99dc85 1571 int pid;
6bf5e0ba
PA
1572 int step_over_finished;
1573 int bp_explains_trap;
1574 int maybe_internal_trap;
1575 int report_to_gdb;
219f2f23 1576 int trace_event;
bd99dc85
PA
1577
1578 /* Translate generic target options into linux options. */
1579 options = __WALL;
1580 if (target_options & TARGET_WNOHANG)
1581 options |= WNOHANG;
0d62e5e8
DJ
1582
1583retry:
bd99dc85
PA
1584 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1585
0d62e5e8
DJ
1586 /* If we were only supposed to resume one thread, only wait for
1587 that thread - if it's still alive. If it died, however - which
1588 can happen if we're coming from the thread death case below -
1589 then we need to make sure we restart the other threads. We could
1590 pick a thread at random or restart all; restarting all is less
1591 arbitrary. */
95954743
PA
1592 if (!non_stop
1593 && !ptid_equal (cont_thread, null_ptid)
1594 && !ptid_equal (cont_thread, minus_one_ptid))
0d62e5e8 1595 {
fc7238bb
PA
1596 struct thread_info *thread;
1597
bd99dc85
PA
1598 thread = (struct thread_info *) find_inferior_id (&all_threads,
1599 cont_thread);
0d62e5e8
DJ
1600
1601 /* No stepping, no signal - unless one is pending already, of course. */
bd99dc85 1602 if (thread == NULL)
64386c31
DJ
1603 {
1604 struct thread_resume resume_info;
95954743 1605 resume_info.thread = minus_one_ptid;
bd99dc85
PA
1606 resume_info.kind = resume_continue;
1607 resume_info.sig = 0;
2bd7c093 1608 linux_resume (&resume_info, 1);
64386c31 1609 }
bd99dc85 1610 else
95954743 1611 ptid = cont_thread;
0d62e5e8 1612 }
da6d8c04 1613
6bf5e0ba
PA
1614 if (ptid_equal (step_over_bkpt, null_ptid))
1615 pid = linux_wait_for_event (ptid, &w, options);
1616 else
1617 {
1618 if (debug_threads)
1619 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
1620 target_pid_to_str (step_over_bkpt));
1621 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
1622 }
1623
bd99dc85 1624 if (pid == 0) /* only if TARGET_WNOHANG */
95954743 1625 return null_ptid;
bd99dc85 1626
6bf5e0ba 1627 event_child = get_thread_lwp (current_inferior);
da6d8c04 1628
0d62e5e8
DJ
1629 /* If we are waiting for a particular child, and it exited,
1630 linux_wait_for_event will return its exit status. Similarly if
1631 the last child exited. If this is not the last child, however,
1632 do not report it as exited until there is a 'thread exited' response
1633 available in the remote protocol. Instead, just wait for another event.
1634 This should be safe, because if the thread crashed we will already
1635 have reported the termination signal to GDB; that should stop any
1636 in-progress stepping operations, etc.
1637
1638 Report the exit status of the last thread to exit. This matches
1639 LinuxThreads' behavior. */
1640
95954743 1641 if (last_thread_of_process_p (current_inferior))
da6d8c04 1642 {
bd99dc85 1643 if (WIFEXITED (w) || WIFSIGNALED (w))
0d62e5e8 1644 {
bd99dc85
PA
1645 if (WIFEXITED (w))
1646 {
1647 ourstatus->kind = TARGET_WAITKIND_EXITED;
1648 ourstatus->value.integer = WEXITSTATUS (w);
1649
1650 if (debug_threads)
1651 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
1652 }
1653 else
1654 {
1655 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1656 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
1657
1658 if (debug_threads)
1659 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
1660
1661 }
5b1c542e 1662
95954743 1663 return pid_to_ptid (pid);
0d62e5e8 1664 }
da6d8c04 1665 }
0d62e5e8 1666 else
da6d8c04 1667 {
0d62e5e8
DJ
1668 if (!WIFSTOPPED (w))
1669 goto retry;
da6d8c04
DJ
1670 }
1671
6bf5e0ba
PA
1672 /* If this event was not handled before, and is not a SIGTRAP, we
1673 report it. SIGILL and SIGSEGV are also treated as traps in case
1674 a breakpoint is inserted at the current PC. If this target does
1675 not support internal breakpoints at all, we also report the
1676 SIGTRAP without further processing; it's of no concern to us. */
1677 maybe_internal_trap
1678 = (supports_breakpoints ()
1679 && (WSTOPSIG (w) == SIGTRAP
1680 || ((WSTOPSIG (w) == SIGILL
1681 || WSTOPSIG (w) == SIGSEGV)
1682 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
1683
1684 if (maybe_internal_trap)
1685 {
1686 /* Handle anything that requires bookkeeping before deciding to
1687 report the event or continue waiting. */
1688
1689 /* First check if we can explain the SIGTRAP with an internal
1690 breakpoint, or if we should possibly report the event to GDB.
1691 Do this before anything that may remove or insert a
1692 breakpoint. */
1693 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
1694
1695 /* We have a SIGTRAP, possibly a step-over dance has just
1696 finished. If so, tweak the state machine accordingly,
1697 reinsert breakpoints and delete any reinsert (software
1698 single-step) breakpoints. */
1699 step_over_finished = finish_step_over (event_child);
1700
1701 /* Now invoke the callbacks of any internal breakpoints there. */
1702 check_breakpoints (event_child->stop_pc);
1703
219f2f23
PA
1704 /* Handle tracepoint data collecting. This may overflow the
1705 trace buffer, and cause a tracing stop, removing
1706 breakpoints. */
1707 trace_event = handle_tracepoints (event_child);
1708
6bf5e0ba
PA
1709 if (bp_explains_trap)
1710 {
1711 /* If we stepped or ran into an internal breakpoint, we've
1712 already handled it. So next time we resume (from this
1713 PC), we should step over it. */
1714 if (debug_threads)
1715 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1716
8b07ae33
PA
1717 if (breakpoint_here (event_child->stop_pc))
1718 event_child->need_step_over = 1;
6bf5e0ba
PA
1719 }
1720 }
1721 else
1722 {
1723 /* We have some other signal, possibly a step-over dance was in
1724 progress, and it should be cancelled too. */
1725 step_over_finished = finish_step_over (event_child);
219f2f23
PA
1726
1727 trace_event = 0;
6bf5e0ba
PA
1728 }
1729
e471f25b
PA
1730 /* Check whether GDB would be interested in this event. */
1731
1732 /* If GDB is not interested in this signal, don't stop other
1733 threads, and don't report it to GDB. Just resume the inferior
1734 right away. We do this for threading-related signals as well as
1735 any that GDB specifically requested we ignore. But never ignore
1736 SIGSTOP if we sent it ourselves, and do not ignore signals when
1737 stepping - they may require special handling to skip the signal
1738 handler. */
1739 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
1740 thread library? */
1741 if (WIFSTOPPED (w)
1742 && current_inferior->last_resume_kind != resume_step
1743 && (
1744#if defined (USE_THREAD_DB) && defined (__SIGRTMIN)
1745 (current_process ()->private->thread_db != NULL
1746 && (WSTOPSIG (w) == __SIGRTMIN
1747 || WSTOPSIG (w) == __SIGRTMIN + 1))
1748 ||
1749#endif
1750 (pass_signals[target_signal_from_host (WSTOPSIG (w))]
1751 && !(WSTOPSIG (w) == SIGSTOP
1752 && current_inferior->last_resume_kind == resume_stop))))
1753 {
1754 siginfo_t info, *info_p;
1755
1756 if (debug_threads)
1757 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
1758 WSTOPSIG (w), lwpid_of (event_child));
1759
1760 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
1761 info_p = &info;
1762 else
1763 info_p = NULL;
1764 linux_resume_one_lwp (event_child, event_child->stepping,
1765 WSTOPSIG (w), info_p);
1766 goto retry;
1767 }
1768
1769 /* If GDB wanted this thread to single step, we always want to
1770 report the SIGTRAP, and let GDB handle it. Watchpoints should
1771 always be reported. So should signals we can't explain. A
1772 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
1773 not support Z0 breakpoints. If we do, we're be able to handle
1774 GDB breakpoints on top of internal breakpoints, by handling the
1775 internal breakpoint and still reporting the event to GDB. If we
1776 don't, we're out of luck, GDB won't see the breakpoint hit. */
6bf5e0ba 1777 report_to_gdb = (!maybe_internal_trap
8336d594 1778 || current_inferior->last_resume_kind == resume_step
6bf5e0ba 1779 || event_child->stopped_by_watchpoint
219f2f23 1780 || (!step_over_finished && !bp_explains_trap && !trace_event)
8b07ae33 1781 || gdb_breakpoint_here (event_child->stop_pc));
6bf5e0ba
PA
1782
1783 /* We found no reason GDB would want us to stop. We either hit one
1784 of our own breakpoints, or finished an internal step GDB
1785 shouldn't know about. */
1786 if (!report_to_gdb)
1787 {
1788 if (debug_threads)
1789 {
1790 if (bp_explains_trap)
1791 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1792 if (step_over_finished)
1793 fprintf (stderr, "Step-over finished.\n");
219f2f23
PA
1794 if (trace_event)
1795 fprintf (stderr, "Tracepoint event.\n");
6bf5e0ba
PA
1796 }
1797
1798 /* We're not reporting this breakpoint to GDB, so apply the
1799 decr_pc_after_break adjustment to the inferior's regcache
1800 ourselves. */
1801
1802 if (the_low_target.set_pc != NULL)
1803 {
1804 struct regcache *regcache
1805 = get_thread_regcache (get_lwp_thread (event_child), 1);
1806 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
1807 }
1808
1809 /* We've finished stepping over a breakpoint. We've stopped all
1810 LWPs momentarily except the stepping one. This is where we
1811 resume them all again. We're going to keep waiting, so use
1812 proceed, which handles stepping over the next breakpoint. */
1813 if (debug_threads)
1814 fprintf (stderr, "proceeding all threads.\n");
1815 proceed_all_lwps ();
1816 goto retry;
1817 }
1818
1819 if (debug_threads)
1820 {
8336d594 1821 if (current_inferior->last_resume_kind == resume_step)
6bf5e0ba
PA
1822 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
1823 if (event_child->stopped_by_watchpoint)
1824 fprintf (stderr, "Stopped by watchpoint.\n");
8b07ae33
PA
1825 if (gdb_breakpoint_here (event_child->stop_pc))
1826 fprintf (stderr, "Stopped by GDB breakpoint.\n");
6bf5e0ba
PA
1827 if (debug_threads)
1828 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
1829 }
1830
1831 /* Alright, we're going to report a stop. */
1832
1833 if (!non_stop)
1834 {
1835 /* In all-stop, stop all threads. */
1836 stop_all_lwps ();
1837
1838 /* If we're not waiting for a specific LWP, choose an event LWP
1839 from among those that have had events. Giving equal priority
1840 to all LWPs that have had events helps prevent
1841 starvation. */
1842 if (ptid_equal (ptid, minus_one_ptid))
1843 {
1844 event_child->status_pending_p = 1;
1845 event_child->status_pending = w;
1846
1847 select_event_lwp (&event_child);
1848
1849 event_child->status_pending_p = 0;
1850 w = event_child->status_pending;
1851 }
1852
1853 /* Now that we've selected our final event LWP, cancel any
1854 breakpoints in other LWPs that have hit a GDB breakpoint.
1855 See the comment in cancel_breakpoints_callback to find out
1856 why. */
1857 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
1858 }
1859 else
1860 {
1861 /* If we just finished a step-over, then all threads had been
1862 momentarily paused. In all-stop, that's fine, we want
1863 threads stopped by now anyway. In non-stop, we need to
1864 re-resume threads that GDB wanted to be running. */
1865 if (step_over_finished)
1866 unstop_all_lwps (event_child);
1867 }
1868
5b1c542e 1869 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 1870
d50171e4
PA
1871 /* Do this before the gdb_wants_all_stopped calls below, since they
1872 always set last_resume_kind to resume_stop. */
8336d594
PA
1873 if (current_inferior->last_resume_kind == resume_stop
1874 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
1875 {
1876 /* A thread that has been requested to stop by GDB with vCont;t,
1877 and it stopped cleanly, so report as SIG0. The use of
1878 SIGSTOP is an implementation detail. */
1879 ourstatus->value.sig = TARGET_SIGNAL_0;
1880 }
8336d594
PA
1881 else if (current_inferior->last_resume_kind == resume_stop
1882 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
1883 {
1884 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 1885 but, it stopped for other reasons. */
bd99dc85
PA
1886 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1887 }
1888 else
1889 {
1890 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1891 }
1892
d50171e4
PA
1893 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
1894
1895 if (!non_stop)
1896 {
d50171e4
PA
1897 /* From GDB's perspective, all-stop mode always stops all
1898 threads implicitly. Tag all threads as "want-stopped". */
1899 gdb_wants_all_stopped ();
1900 }
1901 else
1902 {
1903 /* We're reporting this LWP as stopped. Update it's
1904 "want-stopped" state to what the client wants, until it gets
1905 a new resume action. */
6bf5e0ba 1906 gdb_wants_lwp_stopped (&event_child->head);
d50171e4
PA
1907 }
1908
bd99dc85 1909 if (debug_threads)
95954743 1910 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
6bf5e0ba 1911 target_pid_to_str (ptid_of (event_child)),
bd99dc85
PA
1912 ourstatus->kind,
1913 ourstatus->value.sig);
1914
6bf5e0ba
PA
1915 get_lwp_thread (event_child)->last_status = *ourstatus;
1916 return ptid_of (event_child);
bd99dc85
PA
1917}
1918
1919/* Get rid of any pending event in the pipe. */
1920static void
1921async_file_flush (void)
1922{
1923 int ret;
1924 char buf;
1925
1926 do
1927 ret = read (linux_event_pipe[0], &buf, 1);
1928 while (ret >= 0 || (ret == -1 && errno == EINTR));
1929}
1930
1931/* Put something in the pipe, so the event loop wakes up. */
1932static void
1933async_file_mark (void)
1934{
1935 int ret;
1936
1937 async_file_flush ();
1938
1939 do
1940 ret = write (linux_event_pipe[1], "+", 1);
1941 while (ret == 0 || (ret == -1 && errno == EINTR));
1942
1943 /* Ignore EAGAIN. If the pipe is full, the event loop will already
1944 be awakened anyway. */
1945}
1946
95954743
PA
1947static ptid_t
1948linux_wait (ptid_t ptid,
1949 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 1950{
95954743 1951 ptid_t event_ptid;
bd99dc85
PA
1952
1953 if (debug_threads)
95954743 1954 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
bd99dc85
PA
1955
1956 /* Flush the async file first. */
1957 if (target_is_async_p ())
1958 async_file_flush ();
1959
95954743 1960 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
bd99dc85
PA
1961
1962 /* If at least one stop was reported, there may be more. A single
1963 SIGCHLD can signal more than one child stop. */
1964 if (target_is_async_p ()
1965 && (target_options & TARGET_WNOHANG) != 0
95954743 1966 && !ptid_equal (event_ptid, null_ptid))
bd99dc85
PA
1967 async_file_mark ();
1968
1969 return event_ptid;
da6d8c04
DJ
1970}
1971
c5f62d5f 1972/* Send a signal to an LWP. */
fd500816
DJ
1973
1974static int
a1928bad 1975kill_lwp (unsigned long lwpid, int signo)
fd500816 1976{
c5f62d5f
DE
1977 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1978 fails, then we are not using nptl threads and we should be using kill. */
fd500816 1979
c5f62d5f
DE
1980#ifdef __NR_tkill
1981 {
1982 static int tkill_failed;
fd500816 1983
c5f62d5f
DE
1984 if (!tkill_failed)
1985 {
1986 int ret;
1987
1988 errno = 0;
1989 ret = syscall (__NR_tkill, lwpid, signo);
1990 if (errno != ENOSYS)
1991 return ret;
1992 tkill_failed = 1;
1993 }
1994 }
fd500816
DJ
1995#endif
1996
1997 return kill (lwpid, signo);
1998}
1999
0d62e5e8 2000static void
02fc4de7 2001send_sigstop (struct lwp_info *lwp)
0d62e5e8 2002{
bd99dc85 2003 int pid;
0d62e5e8 2004
bd99dc85
PA
2005 pid = lwpid_of (lwp);
2006
0d62e5e8
DJ
2007 /* If we already have a pending stop signal for this process, don't
2008 send another. */
54a0b537 2009 if (lwp->stop_expected)
0d62e5e8 2010 {
ae13219e 2011 if (debug_threads)
bd99dc85 2012 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
ae13219e 2013
0d62e5e8
DJ
2014 return;
2015 }
2016
2017 if (debug_threads)
bd99dc85 2018 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
0d62e5e8 2019
d50171e4 2020 lwp->stop_expected = 1;
bd99dc85 2021 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
2022}
2023
02fc4de7
PA
2024static void
2025send_sigstop_callback (struct inferior_list_entry *entry)
2026{
2027 struct lwp_info *lwp = (struct lwp_info *) entry;
2028
2029 if (lwp->stopped)
2030 return;
2031
2032 send_sigstop (lwp);
2033}
2034
95954743
PA
2035static void
2036mark_lwp_dead (struct lwp_info *lwp, int wstat)
2037{
2038 /* It's dead, really. */
2039 lwp->dead = 1;
2040
2041 /* Store the exit status for later. */
2042 lwp->status_pending_p = 1;
2043 lwp->status_pending = wstat;
2044
95954743
PA
2045 /* Prevent trying to stop it. */
2046 lwp->stopped = 1;
2047
2048 /* No further stops are expected from a dead lwp. */
2049 lwp->stop_expected = 0;
2050}
2051
0d62e5e8
DJ
2052static void
2053wait_for_sigstop (struct inferior_list_entry *entry)
2054{
54a0b537 2055 struct lwp_info *lwp = (struct lwp_info *) entry;
bd99dc85 2056 struct thread_info *saved_inferior;
a1928bad 2057 int wstat;
95954743
PA
2058 ptid_t saved_tid;
2059 ptid_t ptid;
d50171e4 2060 int pid;
0d62e5e8 2061
54a0b537 2062 if (lwp->stopped)
d50171e4
PA
2063 {
2064 if (debug_threads)
2065 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2066 lwpid_of (lwp));
2067 return;
2068 }
0d62e5e8
DJ
2069
2070 saved_inferior = current_inferior;
bd99dc85
PA
2071 if (saved_inferior != NULL)
2072 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2073 else
95954743 2074 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 2075
95954743 2076 ptid = lwp->head.id;
bd99dc85 2077
d50171e4
PA
2078 if (debug_threads)
2079 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2080
2081 pid = linux_wait_for_event (ptid, &wstat, __WALL);
0d62e5e8
DJ
2082
2083 /* If we stopped with a non-SIGSTOP signal, save it for later
2084 and record the pending SIGSTOP. If the process exited, just
2085 return. */
d50171e4 2086 if (WIFSTOPPED (wstat))
0d62e5e8
DJ
2087 {
2088 if (debug_threads)
d50171e4
PA
2089 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2090 lwpid_of (lwp), WSTOPSIG (wstat));
c35fafde 2091
d50171e4 2092 if (WSTOPSIG (wstat) != SIGSTOP)
c35fafde
PA
2093 {
2094 if (debug_threads)
d50171e4
PA
2095 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2096 lwpid_of (lwp), wstat);
2097
c35fafde
PA
2098 lwp->status_pending_p = 1;
2099 lwp->status_pending = wstat;
2100 }
0d62e5e8 2101 }
d50171e4 2102 else
95954743
PA
2103 {
2104 if (debug_threads)
d50171e4 2105 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
95954743 2106
d50171e4
PA
2107 lwp = find_lwp_pid (pid_to_ptid (pid));
2108 if (lwp)
2109 {
2110 /* Leave this status pending for the next time we're able to
2111 report it. In the mean time, we'll report this lwp as
2112 dead to GDB, so GDB doesn't try to read registers and
2113 memory from it. This can only happen if this was the
2114 last thread of the process; otherwise, PID is removed
2115 from the thread tables before linux_wait_for_event
2116 returns. */
2117 mark_lwp_dead (lwp, wstat);
2118 }
95954743 2119 }
0d62e5e8 2120
bd99dc85 2121 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
0d62e5e8
DJ
2122 current_inferior = saved_inferior;
2123 else
2124 {
2125 if (debug_threads)
2126 fprintf (stderr, "Previously current thread died.\n");
2127
bd99dc85
PA
2128 if (non_stop)
2129 {
2130 /* We can't change the current inferior behind GDB's back,
2131 otherwise, a subsequent command may apply to the wrong
2132 process. */
2133 current_inferior = NULL;
2134 }
2135 else
2136 {
2137 /* Set a valid thread as current. */
2138 set_desired_inferior (0);
2139 }
0d62e5e8
DJ
2140 }
2141}
2142
2143static void
54a0b537 2144stop_all_lwps (void)
0d62e5e8
DJ
2145{
2146 stopping_threads = 1;
02fc4de7 2147 for_each_inferior (&all_lwps, send_sigstop_callback);
54a0b537 2148 for_each_inferior (&all_lwps, wait_for_sigstop);
0d62e5e8
DJ
2149 stopping_threads = 0;
2150}
2151
da6d8c04
DJ
2152/* Resume execution of the inferior process.
2153 If STEP is nonzero, single-step it.
2154 If SIGNAL is nonzero, give it that signal. */
2155
ce3a066d 2156static void
2acc282a 2157linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 2158 int step, int signal, siginfo_t *info)
da6d8c04 2159{
0d62e5e8
DJ
2160 struct thread_info *saved_inferior;
2161
54a0b537 2162 if (lwp->stopped == 0)
0d62e5e8
DJ
2163 return;
2164
219f2f23
PA
2165 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2166 user used the "jump" command, or "set $pc = foo"). */
2167 if (lwp->stop_pc != get_pc (lwp))
2168 {
2169 /* Collecting 'while-stepping' actions doesn't make sense
2170 anymore. */
2171 release_while_stepping_state_list (get_lwp_thread (lwp));
2172 }
2173
0d62e5e8
DJ
2174 /* If we have pending signals or status, and a new signal, enqueue the
2175 signal. Also enqueue the signal if we are waiting to reinsert a
2176 breakpoint; it will be picked up again below. */
2177 if (signal != 0
54a0b537
PA
2178 && (lwp->status_pending_p || lwp->pending_signals != NULL
2179 || lwp->bp_reinsert != 0))
0d62e5e8
DJ
2180 {
2181 struct pending_signals *p_sig;
bca929d3 2182 p_sig = xmalloc (sizeof (*p_sig));
54a0b537 2183 p_sig->prev = lwp->pending_signals;
0d62e5e8 2184 p_sig->signal = signal;
32ca6d61
DJ
2185 if (info == NULL)
2186 memset (&p_sig->info, 0, sizeof (siginfo_t));
2187 else
2188 memcpy (&p_sig->info, info, sizeof (siginfo_t));
54a0b537 2189 lwp->pending_signals = p_sig;
0d62e5e8
DJ
2190 }
2191
d50171e4
PA
2192 if (lwp->status_pending_p)
2193 {
2194 if (debug_threads)
2195 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2196 " has pending status\n",
2197 lwpid_of (lwp), step ? "step" : "continue", signal,
2198 lwp->stop_expected ? "expected" : "not expected");
2199 return;
2200 }
0d62e5e8
DJ
2201
2202 saved_inferior = current_inferior;
54a0b537 2203 current_inferior = get_lwp_thread (lwp);
0d62e5e8
DJ
2204
2205 if (debug_threads)
1b3f6016 2206 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
bd99dc85 2207 lwpid_of (lwp), step ? "step" : "continue", signal,
54a0b537 2208 lwp->stop_expected ? "expected" : "not expected");
0d62e5e8
DJ
2209
2210 /* This bit needs some thinking about. If we get a signal that
2211 we must report while a single-step reinsert is still pending,
2212 we often end up resuming the thread. It might be better to
2213 (ew) allow a stack of pending events; then we could be sure that
2214 the reinsert happened right away and not lose any signals.
2215
2216 Making this stack would also shrink the window in which breakpoints are
54a0b537 2217 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
2218 complete correctness, so it won't solve that problem. It may be
2219 worthwhile just to solve this one, however. */
54a0b537 2220 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
2221 {
2222 if (debug_threads)
d50171e4
PA
2223 fprintf (stderr, " pending reinsert at 0x%s\n",
2224 paddress (lwp->bp_reinsert));
2225
2226 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2227 {
2228 if (step == 0)
2229 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2230
2231 step = 1;
2232 }
0d62e5e8
DJ
2233
2234 /* Postpone any pending signal. It was enqueued above. */
2235 signal = 0;
2236 }
2237
219f2f23
PA
2238 /* If we have while-stepping actions in this thread set it stepping.
2239 If we have a signal to deliver, it may or may not be set to
2240 SIG_IGN, we don't know. Assume so, and allow collecting
2241 while-stepping into a signal handler. A possible smart thing to
2242 do would be to set an internal breakpoint at the signal return
2243 address, continue, and carry on catching this while-stepping
2244 action only when that breakpoint is hit. A future
2245 enhancement. */
2246 if (get_lwp_thread (lwp)->while_stepping != NULL
2247 && can_hardware_single_step ())
2248 {
2249 if (debug_threads)
2250 fprintf (stderr,
2251 "lwp %ld has a while-stepping action -> forcing step.\n",
2252 lwpid_of (lwp));
2253 step = 1;
2254 }
2255
aa691b87 2256 if (debug_threads && the_low_target.get_pc != NULL)
0d62e5e8 2257 {
442ea881
PA
2258 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
2259 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
47c0c975 2260 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
0d62e5e8
DJ
2261 }
2262
2263 /* If we have pending signals, consume one unless we are trying to reinsert
2264 a breakpoint. */
54a0b537 2265 if (lwp->pending_signals != NULL && lwp->bp_reinsert == 0)
0d62e5e8
DJ
2266 {
2267 struct pending_signals **p_sig;
2268
54a0b537 2269 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
2270 while ((*p_sig)->prev != NULL)
2271 p_sig = &(*p_sig)->prev;
2272
2273 signal = (*p_sig)->signal;
32ca6d61 2274 if ((*p_sig)->info.si_signo != 0)
bd99dc85 2275 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
32ca6d61 2276
0d62e5e8
DJ
2277 free (*p_sig);
2278 *p_sig = NULL;
2279 }
2280
aa5ca48f
DE
2281 if (the_low_target.prepare_to_resume != NULL)
2282 the_low_target.prepare_to_resume (lwp);
2283
0d62e5e8 2284 regcache_invalidate_one ((struct inferior_list_entry *)
54a0b537 2285 get_lwp_thread (lwp));
da6d8c04 2286 errno = 0;
54a0b537 2287 lwp->stopped = 0;
c3adc08c 2288 lwp->stopped_by_watchpoint = 0;
54a0b537 2289 lwp->stepping = step;
14ce3065
DE
2290 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
2291 /* Coerce to a uintptr_t first to avoid potential gcc warning
2292 of coercing an 8 byte integer to a 4 byte pointer. */
2293 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
0d62e5e8
DJ
2294
2295 current_inferior = saved_inferior;
da6d8c04 2296 if (errno)
3221518c
UW
2297 {
2298 /* ESRCH from ptrace either means that the thread was already
2299 running (an error) or that it is gone (a race condition). If
2300 it's gone, we will get a notification the next time we wait,
2301 so we can ignore the error. We could differentiate these
2302 two, but it's tricky without waiting; the thread still exists
2303 as a zombie, so sending it signal 0 would succeed. So just
2304 ignore ESRCH. */
2305 if (errno == ESRCH)
2306 return;
2307
2308 perror_with_name ("ptrace");
2309 }
da6d8c04
DJ
2310}
2311
2bd7c093
PA
2312struct thread_resume_array
2313{
2314 struct thread_resume *resume;
2315 size_t n;
2316};
64386c31
DJ
2317
2318/* This function is called once per thread. We look up the thread
5544ad89
DJ
2319 in RESUME_PTR, and mark the thread with a pointer to the appropriate
2320 resume request.
2321
2322 This algorithm is O(threads * resume elements), but resume elements
2323 is small (and will remain small at least until GDB supports thread
2324 suspension). */
2bd7c093
PA
2325static int
2326linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
0d62e5e8 2327{
54a0b537 2328 struct lwp_info *lwp;
64386c31 2329 struct thread_info *thread;
5544ad89 2330 int ndx;
2bd7c093 2331 struct thread_resume_array *r;
64386c31
DJ
2332
2333 thread = (struct thread_info *) entry;
54a0b537 2334 lwp = get_thread_lwp (thread);
2bd7c093 2335 r = arg;
64386c31 2336
2bd7c093 2337 for (ndx = 0; ndx < r->n; ndx++)
95954743
PA
2338 {
2339 ptid_t ptid = r->resume[ndx].thread;
2340 if (ptid_equal (ptid, minus_one_ptid)
2341 || ptid_equal (ptid, entry->id)
2342 || (ptid_is_pid (ptid)
2343 && (ptid_get_pid (ptid) == pid_of (lwp)))
2344 || (ptid_get_lwp (ptid) == -1
2345 && (ptid_get_pid (ptid) == pid_of (lwp))))
2346 {
d50171e4 2347 if (r->resume[ndx].kind == resume_stop
8336d594 2348 && thread->last_resume_kind == resume_stop)
d50171e4
PA
2349 {
2350 if (debug_threads)
2351 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
2352 thread->last_status.kind == TARGET_WAITKIND_STOPPED
2353 ? "stopped"
2354 : "stopping",
2355 lwpid_of (lwp));
2356
2357 continue;
2358 }
2359
95954743 2360 lwp->resume = &r->resume[ndx];
8336d594 2361 thread->last_resume_kind = lwp->resume->kind;
95954743
PA
2362 return 0;
2363 }
2364 }
2bd7c093
PA
2365
2366 /* No resume action for this thread. */
2367 lwp->resume = NULL;
64386c31 2368
2bd7c093 2369 return 0;
5544ad89
DJ
2370}
2371
5544ad89 2372
bd99dc85
PA
2373/* Set *FLAG_P if this lwp has an interesting status pending. */
2374static int
2375resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
5544ad89 2376{
bd99dc85 2377 struct lwp_info *lwp = (struct lwp_info *) entry;
5544ad89 2378
bd99dc85
PA
2379 /* LWPs which will not be resumed are not interesting, because
2380 we might not wait for them next time through linux_wait. */
2bd7c093 2381 if (lwp->resume == NULL)
bd99dc85 2382 return 0;
64386c31 2383
bd99dc85 2384 if (lwp->status_pending_p)
d50171e4
PA
2385 * (int *) flag_p = 1;
2386
2387 return 0;
2388}
2389
2390/* Return 1 if this lwp that GDB wants running is stopped at an
2391 internal breakpoint that we need to step over. It assumes that any
2392 required STOP_PC adjustment has already been propagated to the
2393 inferior's regcache. */
2394
2395static int
2396need_step_over_p (struct inferior_list_entry *entry, void *dummy)
2397{
2398 struct lwp_info *lwp = (struct lwp_info *) entry;
8336d594 2399 struct thread_info *thread;
d50171e4
PA
2400 struct thread_info *saved_inferior;
2401 CORE_ADDR pc;
2402
2403 /* LWPs which will not be resumed are not interesting, because we
2404 might not wait for them next time through linux_wait. */
2405
2406 if (!lwp->stopped)
2407 {
2408 if (debug_threads)
2409 fprintf (stderr,
2410 "Need step over [LWP %ld]? Ignoring, not stopped\n",
2411 lwpid_of (lwp));
2412 return 0;
2413 }
2414
8336d594
PA
2415 thread = get_lwp_thread (lwp);
2416
2417 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
2418 {
2419 if (debug_threads)
2420 fprintf (stderr,
2421 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
2422 lwpid_of (lwp));
2423 return 0;
2424 }
2425
2426 if (!lwp->need_step_over)
2427 {
2428 if (debug_threads)
2429 fprintf (stderr,
2430 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
2431 }
5544ad89 2432
bd99dc85 2433 if (lwp->status_pending_p)
d50171e4
PA
2434 {
2435 if (debug_threads)
2436 fprintf (stderr,
2437 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
2438 lwpid_of (lwp));
2439 return 0;
2440 }
2441
2442 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
2443 or we have. */
2444 pc = get_pc (lwp);
2445
2446 /* If the PC has changed since we stopped, then don't do anything,
2447 and let the breakpoint/tracepoint be hit. This happens if, for
2448 instance, GDB handled the decr_pc_after_break subtraction itself,
2449 GDB is OOL stepping this thread, or the user has issued a "jump"
2450 command, or poked thread's registers herself. */
2451 if (pc != lwp->stop_pc)
2452 {
2453 if (debug_threads)
2454 fprintf (stderr,
2455 "Need step over [LWP %ld]? Cancelling, PC was changed. "
2456 "Old stop_pc was 0x%s, PC is now 0x%s\n",
2457 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
2458
2459 lwp->need_step_over = 0;
2460 return 0;
2461 }
2462
2463 saved_inferior = current_inferior;
8336d594 2464 current_inferior = thread;
d50171e4 2465
8b07ae33 2466 /* We can only step over breakpoints we know about. */
d50171e4
PA
2467 if (breakpoint_here (pc))
2468 {
8b07ae33
PA
2469 /* Don't step over a breakpoint that GDB expects to hit
2470 though. */
2471 if (gdb_breakpoint_here (pc))
2472 {
2473 if (debug_threads)
2474 fprintf (stderr,
2475 "Need step over [LWP %ld]? yes, but found"
2476 " GDB breakpoint at 0x%s; skipping step over\n",
2477 lwpid_of (lwp), paddress (pc));
d50171e4 2478
8b07ae33
PA
2479 current_inferior = saved_inferior;
2480 return 0;
2481 }
2482 else
2483 {
2484 if (debug_threads)
2485 fprintf (stderr,
2486 "Need step over [LWP %ld]? yes, found breakpoint at 0x%s\n",
2487 lwpid_of (lwp), paddress (pc));
d50171e4 2488
8b07ae33
PA
2489 /* We've found an lwp that needs stepping over --- return 1 so
2490 that find_inferior stops looking. */
2491 current_inferior = saved_inferior;
2492
2493 /* If the step over is cancelled, this is set again. */
2494 lwp->need_step_over = 0;
2495 return 1;
2496 }
d50171e4
PA
2497 }
2498
2499 current_inferior = saved_inferior;
2500
2501 if (debug_threads)
2502 fprintf (stderr,
2503 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
2504 lwpid_of (lwp), paddress (pc));
c6ecbae5 2505
bd99dc85 2506 return 0;
5544ad89
DJ
2507}
2508
d50171e4
PA
2509/* Start a step-over operation on LWP. When LWP stopped at a
2510 breakpoint, to make progress, we need to remove the breakpoint out
2511 of the way. If we let other threads run while we do that, they may
2512 pass by the breakpoint location and miss hitting it. To avoid
2513 that, a step-over momentarily stops all threads while LWP is
2514 single-stepped while the breakpoint is temporarily uninserted from
2515 the inferior. When the single-step finishes, we reinsert the
2516 breakpoint, and let all threads that are supposed to be running,
2517 run again.
2518
2519 On targets that don't support hardware single-step, we don't
2520 currently support full software single-stepping. Instead, we only
2521 support stepping over the thread event breakpoint, by asking the
2522 low target where to place a reinsert breakpoint. Since this
2523 routine assumes the breakpoint being stepped over is a thread event
2524 breakpoint, it usually assumes the return address of the current
2525 function is a good enough place to set the reinsert breakpoint. */
2526
2527static int
2528start_step_over (struct lwp_info *lwp)
2529{
2530 struct thread_info *saved_inferior;
2531 CORE_ADDR pc;
2532 int step;
2533
2534 if (debug_threads)
2535 fprintf (stderr,
2536 "Starting step-over on LWP %ld. Stopping all threads\n",
2537 lwpid_of (lwp));
2538
2539 stop_all_lwps ();
2540
2541 if (debug_threads)
2542 fprintf (stderr, "Done stopping all threads for step-over.\n");
2543
2544 /* Note, we should always reach here with an already adjusted PC,
2545 either by GDB (if we're resuming due to GDB's request), or by our
2546 caller, if we just finished handling an internal breakpoint GDB
2547 shouldn't care about. */
2548 pc = get_pc (lwp);
2549
2550 saved_inferior = current_inferior;
2551 current_inferior = get_lwp_thread (lwp);
2552
2553 lwp->bp_reinsert = pc;
2554 uninsert_breakpoints_at (pc);
2555
2556 if (can_hardware_single_step ())
2557 {
2558 step = 1;
2559 }
2560 else
2561 {
2562 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
2563 set_reinsert_breakpoint (raddr);
2564 step = 0;
2565 }
2566
2567 current_inferior = saved_inferior;
2568
2569 linux_resume_one_lwp (lwp, step, 0, NULL);
2570
2571 /* Require next event from this LWP. */
2572 step_over_bkpt = lwp->head.id;
2573 return 1;
2574}
2575
2576/* Finish a step-over. Reinsert the breakpoint we had uninserted in
2577 start_step_over, if still there, and delete any reinsert
2578 breakpoints we've set, on non hardware single-step targets. */
2579
2580static int
2581finish_step_over (struct lwp_info *lwp)
2582{
2583 if (lwp->bp_reinsert != 0)
2584 {
2585 if (debug_threads)
2586 fprintf (stderr, "Finished step over.\n");
2587
2588 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
2589 may be no breakpoint to reinsert there by now. */
2590 reinsert_breakpoints_at (lwp->bp_reinsert);
2591
2592 lwp->bp_reinsert = 0;
2593
2594 /* Delete any software-single-step reinsert breakpoints. No
2595 longer needed. We don't have to worry about other threads
2596 hitting this trap, and later not being able to explain it,
2597 because we were stepping over a breakpoint, and we hold all
2598 threads but LWP stopped while doing that. */
2599 if (!can_hardware_single_step ())
2600 delete_reinsert_breakpoints ();
2601
2602 step_over_bkpt = null_ptid;
2603 return 1;
2604 }
2605 else
2606 return 0;
2607}
2608
5544ad89
DJ
2609/* This function is called once per thread. We check the thread's resume
2610 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 2611 stopped; and what signal, if any, it should be sent.
5544ad89 2612
bd99dc85
PA
2613 For threads which we aren't explicitly told otherwise, we preserve
2614 the stepping flag; this is used for stepping over gdbserver-placed
2615 breakpoints.
2616
2617 If pending_flags was set in any thread, we queue any needed
2618 signals, since we won't actually resume. We already have a pending
2619 event to report, so we don't need to preserve any step requests;
2620 they should be re-issued if necessary. */
2621
2622static int
2623linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5544ad89 2624{
54a0b537 2625 struct lwp_info *lwp;
5544ad89 2626 struct thread_info *thread;
bd99dc85 2627 int step;
d50171e4
PA
2628 int leave_all_stopped = * (int *) arg;
2629 int leave_pending;
5544ad89
DJ
2630
2631 thread = (struct thread_info *) entry;
54a0b537 2632 lwp = get_thread_lwp (thread);
5544ad89 2633
2bd7c093 2634 if (lwp->resume == NULL)
bd99dc85 2635 return 0;
5544ad89 2636
bd99dc85 2637 if (lwp->resume->kind == resume_stop)
5544ad89 2638 {
bd99dc85 2639 if (debug_threads)
d50171e4 2640 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
bd99dc85
PA
2641
2642 if (!lwp->stopped)
2643 {
2644 if (debug_threads)
d50171e4 2645 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
bd99dc85 2646
d50171e4
PA
2647 /* Stop the thread, and wait for the event asynchronously,
2648 through the event loop. */
02fc4de7 2649 send_sigstop (lwp);
bd99dc85
PA
2650 }
2651 else
2652 {
2653 if (debug_threads)
d50171e4
PA
2654 fprintf (stderr, "already stopped LWP %ld\n",
2655 lwpid_of (lwp));
2656
2657 /* The LWP may have been stopped in an internal event that
2658 was not meant to be notified back to GDB (e.g., gdbserver
2659 breakpoint), so we should be reporting a stop event in
2660 this case too. */
2661
2662 /* If the thread already has a pending SIGSTOP, this is a
2663 no-op. Otherwise, something later will presumably resume
2664 the thread and this will cause it to cancel any pending
2665 operation, due to last_resume_kind == resume_stop. If
2666 the thread already has a pending status to report, we
2667 will still report it the next time we wait - see
2668 status_pending_p_callback. */
02fc4de7 2669 send_sigstop (lwp);
bd99dc85 2670 }
32ca6d61 2671
bd99dc85
PA
2672 /* For stop requests, we're done. */
2673 lwp->resume = NULL;
fc7238bb 2674 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 2675 return 0;
5544ad89
DJ
2676 }
2677
bd99dc85
PA
2678 /* If this thread which is about to be resumed has a pending status,
2679 then don't resume any threads - we can just report the pending
2680 status. Make sure to queue any signals that would otherwise be
2681 sent. In all-stop mode, we do this decision based on if *any*
d50171e4
PA
2682 thread has a pending status. If there's a thread that needs the
2683 step-over-breakpoint dance, then don't resume any other thread
2684 but that particular one. */
2685 leave_pending = (lwp->status_pending_p || leave_all_stopped);
5544ad89 2686
d50171e4 2687 if (!leave_pending)
bd99dc85
PA
2688 {
2689 if (debug_threads)
2690 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
5544ad89 2691
d50171e4 2692 step = (lwp->resume->kind == resume_step);
2acc282a 2693 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
bd99dc85
PA
2694 }
2695 else
2696 {
2697 if (debug_threads)
2698 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
5544ad89 2699
bd99dc85
PA
2700 /* If we have a new signal, enqueue the signal. */
2701 if (lwp->resume->sig != 0)
2702 {
2703 struct pending_signals *p_sig;
2704 p_sig = xmalloc (sizeof (*p_sig));
2705 p_sig->prev = lwp->pending_signals;
2706 p_sig->signal = lwp->resume->sig;
2707 memset (&p_sig->info, 0, sizeof (siginfo_t));
2708
2709 /* If this is the same signal we were previously stopped by,
2710 make sure to queue its siginfo. We can ignore the return
2711 value of ptrace; if it fails, we'll skip
2712 PTRACE_SETSIGINFO. */
2713 if (WIFSTOPPED (lwp->last_status)
2714 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
2715 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
2716
2717 lwp->pending_signals = p_sig;
2718 }
2719 }
5544ad89 2720
fc7238bb 2721 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 2722 lwp->resume = NULL;
5544ad89 2723 return 0;
0d62e5e8
DJ
2724}
2725
2726static void
2bd7c093 2727linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 2728{
2bd7c093 2729 struct thread_resume_array array = { resume_info, n };
d50171e4
PA
2730 struct lwp_info *need_step_over = NULL;
2731 int any_pending;
2732 int leave_all_stopped;
c6ecbae5 2733
2bd7c093 2734 find_inferior (&all_threads, linux_set_resume_request, &array);
5544ad89 2735
d50171e4
PA
2736 /* If there is a thread which would otherwise be resumed, which has
2737 a pending status, then don't resume any threads - we can just
2738 report the pending status. Make sure to queue any signals that
2739 would otherwise be sent. In non-stop mode, we'll apply this
2740 logic to each thread individually. We consume all pending events
2741 before considering to start a step-over (in all-stop). */
2742 any_pending = 0;
bd99dc85 2743 if (!non_stop)
d50171e4
PA
2744 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
2745
2746 /* If there is a thread which would otherwise be resumed, which is
2747 stopped at a breakpoint that needs stepping over, then don't
2748 resume any threads - have it step over the breakpoint with all
2749 other threads stopped, then resume all threads again. Make sure
2750 to queue any signals that would otherwise be delivered or
2751 queued. */
2752 if (!any_pending && supports_breakpoints ())
2753 need_step_over
2754 = (struct lwp_info *) find_inferior (&all_lwps,
2755 need_step_over_p, NULL);
2756
2757 leave_all_stopped = (need_step_over != NULL || any_pending);
2758
2759 if (debug_threads)
2760 {
2761 if (need_step_over != NULL)
2762 fprintf (stderr, "Not resuming all, need step over\n");
2763 else if (any_pending)
2764 fprintf (stderr,
2765 "Not resuming, all-stop and found "
2766 "an LWP with pending status\n");
2767 else
2768 fprintf (stderr, "Resuming, no pending status or step over needed\n");
2769 }
2770
2771 /* Even if we're leaving threads stopped, queue all signals we'd
2772 otherwise deliver. */
2773 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
2774
2775 if (need_step_over)
2776 start_step_over (need_step_over);
2777}
2778
2779/* This function is called once per thread. We check the thread's
2780 last resume request, which will tell us whether to resume, step, or
2781 leave the thread stopped. Any signal the client requested to be
2782 delivered has already been enqueued at this point.
2783
2784 If any thread that GDB wants running is stopped at an internal
2785 breakpoint that needs stepping over, we start a step-over operation
2786 on that particular thread, and leave all others stopped. */
2787
2788static void
2789proceed_one_lwp (struct inferior_list_entry *entry)
2790{
2791 struct lwp_info *lwp;
8336d594 2792 struct thread_info *thread;
d50171e4
PA
2793 int step;
2794
2795 lwp = (struct lwp_info *) entry;
2796
2797 if (debug_threads)
2798 fprintf (stderr,
2799 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
2800
2801 if (!lwp->stopped)
2802 {
2803 if (debug_threads)
2804 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
2805 return;
2806 }
2807
8336d594
PA
2808 thread = get_lwp_thread (lwp);
2809
02fc4de7
PA
2810 if (thread->last_resume_kind == resume_stop
2811 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
2812 {
2813 if (debug_threads)
02fc4de7
PA
2814 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
2815 lwpid_of (lwp));
d50171e4
PA
2816 return;
2817 }
2818
2819 if (lwp->status_pending_p)
2820 {
2821 if (debug_threads)
2822 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
2823 lwpid_of (lwp));
2824 return;
2825 }
2826
2827 if (lwp->suspended)
2828 {
2829 if (debug_threads)
2830 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
2831 return;
2832 }
2833
02fc4de7
PA
2834 if (thread->last_resume_kind == resume_stop)
2835 {
2836 /* We haven't reported this LWP as stopped yet (otherwise, the
2837 last_status.kind check above would catch it, and we wouldn't
2838 reach here. This LWP may have been momentarily paused by a
2839 stop_all_lwps call while handling for example, another LWP's
2840 step-over. In that case, the pending expected SIGSTOP signal
2841 that was queued at vCont;t handling time will have already
2842 been consumed by wait_for_sigstop, and so we need to requeue
2843 another one here. Note that if the LWP already has a SIGSTOP
2844 pending, this is a no-op. */
2845
2846 if (debug_threads)
2847 fprintf (stderr,
2848 "Client wants LWP %ld to stop. "
2849 "Making sure it has a SIGSTOP pending\n",
2850 lwpid_of (lwp));
2851
2852 send_sigstop (lwp);
2853 }
2854
8336d594 2855 step = thread->last_resume_kind == resume_step;
d50171e4
PA
2856 linux_resume_one_lwp (lwp, step, 0, NULL);
2857}
2858
2859/* When we finish a step-over, set threads running again. If there's
2860 another thread that may need a step-over, now's the time to start
2861 it. Eventually, we'll move all threads past their breakpoints. */
2862
2863static void
2864proceed_all_lwps (void)
2865{
2866 struct lwp_info *need_step_over;
2867
2868 /* If there is a thread which would otherwise be resumed, which is
2869 stopped at a breakpoint that needs stepping over, then don't
2870 resume any threads - have it step over the breakpoint with all
2871 other threads stopped, then resume all threads again. */
2872
2873 if (supports_breakpoints ())
2874 {
2875 need_step_over
2876 = (struct lwp_info *) find_inferior (&all_lwps,
2877 need_step_over_p, NULL);
2878
2879 if (need_step_over != NULL)
2880 {
2881 if (debug_threads)
2882 fprintf (stderr, "proceed_all_lwps: found "
2883 "thread %ld needing a step-over\n",
2884 lwpid_of (need_step_over));
2885
2886 start_step_over (need_step_over);
2887 return;
2888 }
2889 }
5544ad89 2890
d50171e4
PA
2891 if (debug_threads)
2892 fprintf (stderr, "Proceeding, no step-over needed\n");
2893
2894 for_each_inferior (&all_lwps, proceed_one_lwp);
2895}
2896
2897/* Stopped LWPs that the client wanted to be running, that don't have
2898 pending statuses, are set to run again, except for EXCEPT, if not
2899 NULL. This undoes a stop_all_lwps call. */
2900
2901static void
2902unstop_all_lwps (struct lwp_info *except)
2903{
5544ad89
DJ
2904 if (debug_threads)
2905 {
d50171e4
PA
2906 if (except)
2907 fprintf (stderr,
2908 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
5544ad89 2909 else
d50171e4
PA
2910 fprintf (stderr,
2911 "unstopping all lwps\n");
5544ad89
DJ
2912 }
2913
d50171e4
PA
2914 /* Make sure proceed_one_lwp doesn't try to resume this thread. */
2915 if (except != NULL)
2916 ++except->suspended;
2917
2918 for_each_inferior (&all_lwps, proceed_one_lwp);
2919
2920 if (except != NULL)
2921 --except->suspended;
0d62e5e8
DJ
2922}
2923
2924#ifdef HAVE_LINUX_USRREGS
da6d8c04
DJ
2925
2926int
0a30fbc4 2927register_addr (int regnum)
da6d8c04
DJ
2928{
2929 int addr;
2930
2ec06d2e 2931 if (regnum < 0 || regnum >= the_low_target.num_regs)
da6d8c04
DJ
2932 error ("Invalid register number %d.", regnum);
2933
2ec06d2e 2934 addr = the_low_target.regmap[regnum];
da6d8c04
DJ
2935
2936 return addr;
2937}
2938
58caa3dc 2939/* Fetch one register. */
da6d8c04 2940static void
442ea881 2941fetch_register (struct regcache *regcache, int regno)
da6d8c04
DJ
2942{
2943 CORE_ADDR regaddr;
48d93c75 2944 int i, size;
0d62e5e8 2945 char *buf;
95954743 2946 int pid;
da6d8c04 2947
2ec06d2e 2948 if (regno >= the_low_target.num_regs)
0a30fbc4 2949 return;
2ec06d2e 2950 if ((*the_low_target.cannot_fetch_register) (regno))
0a30fbc4 2951 return;
da6d8c04 2952
0a30fbc4
DJ
2953 regaddr = register_addr (regno);
2954 if (regaddr == -1)
2955 return;
95954743
PA
2956
2957 pid = lwpid_of (get_thread_lwp (current_inferior));
1b3f6016
PA
2958 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2959 & - sizeof (PTRACE_XFER_TYPE));
48d93c75
UW
2960 buf = alloca (size);
2961 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
da6d8c04
DJ
2962 {
2963 errno = 0;
0d62e5e8 2964 *(PTRACE_XFER_TYPE *) (buf + i) =
14ce3065
DE
2965 ptrace (PTRACE_PEEKUSER, pid,
2966 /* Coerce to a uintptr_t first to avoid potential gcc warning
2967 of coercing an 8 byte integer to a 4 byte pointer. */
2968 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
da6d8c04
DJ
2969 regaddr += sizeof (PTRACE_XFER_TYPE);
2970 if (errno != 0)
f52cd8cd 2971 error ("reading register %d: %s", regno, strerror (errno));
da6d8c04 2972 }
ee1a7ae4
UW
2973
2974 if (the_low_target.supply_ptrace_register)
442ea881 2975 the_low_target.supply_ptrace_register (regcache, regno, buf);
5a1f5858 2976 else
442ea881 2977 supply_register (regcache, regno, buf);
da6d8c04
DJ
2978}
2979
2980/* Fetch all registers, or just one, from the child process. */
58caa3dc 2981static void
442ea881 2982usr_fetch_inferior_registers (struct regcache *regcache, int regno)
da6d8c04 2983{
4463ce24 2984 if (regno == -1)
2ec06d2e 2985 for (regno = 0; regno < the_low_target.num_regs; regno++)
442ea881 2986 fetch_register (regcache, regno);
da6d8c04 2987 else
442ea881 2988 fetch_register (regcache, regno);
da6d8c04
DJ
2989}
2990
2991/* Store our register values back into the inferior.
2992 If REGNO is -1, do this for all registers.
2993 Otherwise, REGNO specifies which register (so we can save time). */
58caa3dc 2994static void
442ea881 2995usr_store_inferior_registers (struct regcache *regcache, int regno)
da6d8c04
DJ
2996{
2997 CORE_ADDR regaddr;
48d93c75 2998 int i, size;
0d62e5e8 2999 char *buf;
55ac2b99 3000 int pid;
da6d8c04
DJ
3001
3002 if (regno >= 0)
3003 {
2ec06d2e 3004 if (regno >= the_low_target.num_regs)
0a30fbc4
DJ
3005 return;
3006
bc1e36ca 3007 if ((*the_low_target.cannot_store_register) (regno) == 1)
0a30fbc4
DJ
3008 return;
3009
3010 regaddr = register_addr (regno);
3011 if (regaddr == -1)
da6d8c04 3012 return;
da6d8c04 3013 errno = 0;
48d93c75
UW
3014 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3015 & - sizeof (PTRACE_XFER_TYPE);
3016 buf = alloca (size);
3017 memset (buf, 0, size);
ee1a7ae4
UW
3018
3019 if (the_low_target.collect_ptrace_register)
442ea881 3020 the_low_target.collect_ptrace_register (regcache, regno, buf);
5a1f5858 3021 else
442ea881 3022 collect_register (regcache, regno, buf);
ee1a7ae4 3023
95954743 3024 pid = lwpid_of (get_thread_lwp (current_inferior));
48d93c75 3025 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
da6d8c04 3026 {
0a30fbc4 3027 errno = 0;
14ce3065
DE
3028 ptrace (PTRACE_POKEUSER, pid,
3029 /* Coerce to a uintptr_t first to avoid potential gcc warning
3030 about coercing an 8 byte integer to a 4 byte pointer. */
3031 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
3032 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
da6d8c04
DJ
3033 if (errno != 0)
3034 {
1b3f6016
PA
3035 /* At this point, ESRCH should mean the process is
3036 already gone, in which case we simply ignore attempts
3037 to change its registers. See also the related
3038 comment in linux_resume_one_lwp. */
3221518c
UW
3039 if (errno == ESRCH)
3040 return;
3041
bc1e36ca 3042 if ((*the_low_target.cannot_store_register) (regno) == 0)
f52cd8cd 3043 error ("writing register %d: %s", regno, strerror (errno));
da6d8c04 3044 }
2ff29de4 3045 regaddr += sizeof (PTRACE_XFER_TYPE);
da6d8c04 3046 }
da6d8c04
DJ
3047 }
3048 else
2ec06d2e 3049 for (regno = 0; regno < the_low_target.num_regs; regno++)
442ea881 3050 usr_store_inferior_registers (regcache, regno);
da6d8c04 3051}
58caa3dc
DJ
3052#endif /* HAVE_LINUX_USRREGS */
3053
3054
3055
3056#ifdef HAVE_LINUX_REGSETS
3057
3058static int
442ea881 3059regsets_fetch_inferior_registers (struct regcache *regcache)
58caa3dc
DJ
3060{
3061 struct regset_info *regset;
e9d25b98 3062 int saw_general_regs = 0;
95954743 3063 int pid;
1570b33e 3064 struct iovec iov;
58caa3dc
DJ
3065
3066 regset = target_regsets;
3067
95954743 3068 pid = lwpid_of (get_thread_lwp (current_inferior));
58caa3dc
DJ
3069 while (regset->size >= 0)
3070 {
1570b33e
L
3071 void *buf, *data;
3072 int nt_type, res;
58caa3dc 3073
52fa2412 3074 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
58caa3dc
DJ
3075 {
3076 regset ++;
3077 continue;
3078 }
3079
bca929d3 3080 buf = xmalloc (regset->size);
1570b33e
L
3081
3082 nt_type = regset->nt_type;
3083 if (nt_type)
3084 {
3085 iov.iov_base = buf;
3086 iov.iov_len = regset->size;
3087 data = (void *) &iov;
3088 }
3089 else
3090 data = buf;
3091
dfb64f85 3092#ifndef __sparc__
1570b33e 3093 res = ptrace (regset->get_request, pid, nt_type, data);
dfb64f85 3094#else
1570b33e 3095 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 3096#endif
58caa3dc
DJ
3097 if (res < 0)
3098 {
3099 if (errno == EIO)
3100 {
52fa2412
UW
3101 /* If we get EIO on a regset, do not try it again for
3102 this process. */
3103 disabled_regsets[regset - target_regsets] = 1;
fdeb2a12 3104 free (buf);
52fa2412 3105 continue;
58caa3dc
DJ
3106 }
3107 else
3108 {
0d62e5e8 3109 char s[256];
95954743
PA
3110 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3111 pid);
0d62e5e8 3112 perror (s);
58caa3dc
DJ
3113 }
3114 }
e9d25b98
DJ
3115 else if (regset->type == GENERAL_REGS)
3116 saw_general_regs = 1;
442ea881 3117 regset->store_function (regcache, buf);
58caa3dc 3118 regset ++;
fdeb2a12 3119 free (buf);
58caa3dc 3120 }
e9d25b98
DJ
3121 if (saw_general_regs)
3122 return 0;
3123 else
3124 return 1;
58caa3dc
DJ
3125}
3126
3127static int
442ea881 3128regsets_store_inferior_registers (struct regcache *regcache)
58caa3dc
DJ
3129{
3130 struct regset_info *regset;
e9d25b98 3131 int saw_general_regs = 0;
95954743 3132 int pid;
1570b33e 3133 struct iovec iov;
58caa3dc
DJ
3134
3135 regset = target_regsets;
3136
95954743 3137 pid = lwpid_of (get_thread_lwp (current_inferior));
58caa3dc
DJ
3138 while (regset->size >= 0)
3139 {
1570b33e
L
3140 void *buf, *data;
3141 int nt_type, res;
58caa3dc 3142
52fa2412 3143 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
58caa3dc
DJ
3144 {
3145 regset ++;
3146 continue;
3147 }
3148
bca929d3 3149 buf = xmalloc (regset->size);
545587ee
DJ
3150
3151 /* First fill the buffer with the current register set contents,
3152 in case there are any items in the kernel's regset that are
3153 not in gdbserver's regcache. */
1570b33e
L
3154
3155 nt_type = regset->nt_type;
3156 if (nt_type)
3157 {
3158 iov.iov_base = buf;
3159 iov.iov_len = regset->size;
3160 data = (void *) &iov;
3161 }
3162 else
3163 data = buf;
3164
dfb64f85 3165#ifndef __sparc__
1570b33e 3166 res = ptrace (regset->get_request, pid, nt_type, data);
dfb64f85 3167#else
1570b33e 3168 res = ptrace (regset->get_request, pid, &iov, data);
dfb64f85 3169#endif
545587ee
DJ
3170
3171 if (res == 0)
3172 {
3173 /* Then overlay our cached registers on that. */
442ea881 3174 regset->fill_function (regcache, buf);
545587ee
DJ
3175
3176 /* Only now do we write the register set. */
dfb64f85 3177#ifndef __sparc__
1570b33e 3178 res = ptrace (regset->set_request, pid, nt_type, data);
dfb64f85 3179#else
1570b33e 3180 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 3181#endif
545587ee
DJ
3182 }
3183
58caa3dc
DJ
3184 if (res < 0)
3185 {
3186 if (errno == EIO)
3187 {
52fa2412
UW
3188 /* If we get EIO on a regset, do not try it again for
3189 this process. */
3190 disabled_regsets[regset - target_regsets] = 1;
fdeb2a12 3191 free (buf);
52fa2412 3192 continue;
58caa3dc 3193 }
3221518c
UW
3194 else if (errno == ESRCH)
3195 {
1b3f6016
PA
3196 /* At this point, ESRCH should mean the process is
3197 already gone, in which case we simply ignore attempts
3198 to change its registers. See also the related
3199 comment in linux_resume_one_lwp. */
fdeb2a12 3200 free (buf);
3221518c
UW
3201 return 0;
3202 }
58caa3dc
DJ
3203 else
3204 {
ce3a066d 3205 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
3206 }
3207 }
e9d25b98
DJ
3208 else if (regset->type == GENERAL_REGS)
3209 saw_general_regs = 1;
58caa3dc 3210 regset ++;
09ec9b38 3211 free (buf);
58caa3dc 3212 }
e9d25b98
DJ
3213 if (saw_general_regs)
3214 return 0;
3215 else
3216 return 1;
ce3a066d 3217 return 0;
58caa3dc
DJ
3218}
3219
3220#endif /* HAVE_LINUX_REGSETS */
3221
3222
3223void
442ea881 3224linux_fetch_registers (struct regcache *regcache, int regno)
58caa3dc
DJ
3225{
3226#ifdef HAVE_LINUX_REGSETS
442ea881 3227 if (regsets_fetch_inferior_registers (regcache) == 0)
52fa2412 3228 return;
58caa3dc
DJ
3229#endif
3230#ifdef HAVE_LINUX_USRREGS
442ea881 3231 usr_fetch_inferior_registers (regcache, regno);
58caa3dc
DJ
3232#endif
3233}
3234
3235void
442ea881 3236linux_store_registers (struct regcache *regcache, int regno)
58caa3dc
DJ
3237{
3238#ifdef HAVE_LINUX_REGSETS
442ea881 3239 if (regsets_store_inferior_registers (regcache) == 0)
52fa2412 3240 return;
58caa3dc
DJ
3241#endif
3242#ifdef HAVE_LINUX_USRREGS
442ea881 3243 usr_store_inferior_registers (regcache, regno);
58caa3dc
DJ
3244#endif
3245}
3246
da6d8c04 3247
da6d8c04
DJ
3248/* Copy LEN bytes from inferior's memory starting at MEMADDR
3249 to debugger memory starting at MYADDR. */
3250
c3e735a6 3251static int
f450004a 3252linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04
DJ
3253{
3254 register int i;
3255 /* Round starting address down to longword boundary. */
3256 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3257 /* Round ending address up; get number of longwords that makes. */
aa691b87
RM
3258 register int count
3259 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
da6d8c04
DJ
3260 / sizeof (PTRACE_XFER_TYPE);
3261 /* Allocate buffer of that many longwords. */
aa691b87 3262 register PTRACE_XFER_TYPE *buffer
da6d8c04 3263 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
fd462a61
DJ
3264 int fd;
3265 char filename[64];
95954743 3266 int pid = lwpid_of (get_thread_lwp (current_inferior));
fd462a61
DJ
3267
3268 /* Try using /proc. Don't bother for one word. */
3269 if (len >= 3 * sizeof (long))
3270 {
3271 /* We could keep this file open and cache it - possibly one per
3272 thread. That requires some juggling, but is even faster. */
95954743 3273 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
3274 fd = open (filename, O_RDONLY | O_LARGEFILE);
3275 if (fd == -1)
3276 goto no_proc;
3277
3278 /* If pread64 is available, use it. It's faster if the kernel
3279 supports it (only one syscall), and it's 64-bit safe even on
3280 32-bit platforms (for instance, SPARC debugging a SPARC64
3281 application). */
3282#ifdef HAVE_PREAD64
3283 if (pread64 (fd, myaddr, len, memaddr) != len)
3284#else
1de1badb 3285 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
fd462a61
DJ
3286#endif
3287 {
3288 close (fd);
3289 goto no_proc;
3290 }
3291
3292 close (fd);
3293 return 0;
3294 }
da6d8c04 3295
fd462a61 3296 no_proc:
da6d8c04
DJ
3297 /* Read all the longwords */
3298 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3299 {
c3e735a6 3300 errno = 0;
14ce3065
DE
3301 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3302 about coercing an 8 byte integer to a 4 byte pointer. */
3303 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
3304 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
c3e735a6
DJ
3305 if (errno)
3306 return errno;
da6d8c04
DJ
3307 }
3308
3309 /* Copy appropriate bytes out of the buffer. */
1b3f6016
PA
3310 memcpy (myaddr,
3311 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
3312 len);
c3e735a6
DJ
3313
3314 return 0;
da6d8c04
DJ
3315}
3316
93ae6fdc
PA
3317/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
3318 memory at MEMADDR. On failure (cannot write to the inferior)
da6d8c04
DJ
3319 returns the value of errno. */
3320
ce3a066d 3321static int
f450004a 3322linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04
DJ
3323{
3324 register int i;
3325 /* Round starting address down to longword boundary. */
3326 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3327 /* Round ending address up; get number of longwords that makes. */
3328 register int count
3329 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
3330 /* Allocate buffer of that many longwords. */
3331 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
95954743 3332 int pid = lwpid_of (get_thread_lwp (current_inferior));
da6d8c04 3333
0d62e5e8
DJ
3334 if (debug_threads)
3335 {
58d6951d
DJ
3336 /* Dump up to four bytes. */
3337 unsigned int val = * (unsigned int *) myaddr;
3338 if (len == 1)
3339 val = val & 0xff;
3340 else if (len == 2)
3341 val = val & 0xffff;
3342 else if (len == 3)
3343 val = val & 0xffffff;
3344 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
3345 val, (long)memaddr);
0d62e5e8
DJ
3346 }
3347
da6d8c04
DJ
3348 /* Fill start and end extra bytes of buffer with existing memory data. */
3349
93ae6fdc 3350 errno = 0;
14ce3065
DE
3351 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3352 about coercing an 8 byte integer to a 4 byte pointer. */
3353 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
3354 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
93ae6fdc
PA
3355 if (errno)
3356 return errno;
da6d8c04
DJ
3357
3358 if (count > 1)
3359 {
93ae6fdc 3360 errno = 0;
da6d8c04 3361 buffer[count - 1]
95954743 3362 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
3363 /* Coerce to a uintptr_t first to avoid potential gcc warning
3364 about coercing an 8 byte integer to a 4 byte pointer. */
3365 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
3366 * sizeof (PTRACE_XFER_TYPE)),
d844cde6 3367 0);
93ae6fdc
PA
3368 if (errno)
3369 return errno;
da6d8c04
DJ
3370 }
3371
93ae6fdc 3372 /* Copy data to be written over corresponding part of buffer. */
da6d8c04
DJ
3373
3374 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
3375
3376 /* Write the entire buffer. */
3377
3378 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3379 {
3380 errno = 0;
14ce3065
DE
3381 ptrace (PTRACE_POKETEXT, pid,
3382 /* Coerce to a uintptr_t first to avoid potential gcc warning
3383 about coercing an 8 byte integer to a 4 byte pointer. */
3384 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
3385 (PTRACE_ARG4_TYPE) buffer[i]);
da6d8c04
DJ
3386 if (errno)
3387 return errno;
3388 }
3389
3390 return 0;
3391}
2f2893d9 3392
6076632b 3393/* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
24a09b5f
DJ
3394static int linux_supports_tracefork_flag;
3395
1e7fc18c
PA
3396static void
3397linux_enable_event_reporting (int pid)
3398{
3399 if (!linux_supports_tracefork_flag)
3400 return;
3401
3402 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
3403}
3404
51c2684e 3405/* Helper functions for linux_test_for_tracefork, called via clone (). */
24a09b5f 3406
51c2684e
DJ
3407static int
3408linux_tracefork_grandchild (void *arg)
3409{
3410 _exit (0);
3411}
3412
7407e2de
AS
3413#define STACK_SIZE 4096
3414
51c2684e
DJ
3415static int
3416linux_tracefork_child (void *arg)
24a09b5f
DJ
3417{
3418 ptrace (PTRACE_TRACEME, 0, 0, 0);
3419 kill (getpid (), SIGSTOP);
e4b7f41c
JK
3420
3421#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3422
3423 if (fork () == 0)
3424 linux_tracefork_grandchild (NULL);
3425
3426#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3427
7407e2de
AS
3428#ifdef __ia64__
3429 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
3430 CLONE_VM | SIGCHLD, NULL);
3431#else
3432 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
3433 CLONE_VM | SIGCHLD, NULL);
3434#endif
e4b7f41c
JK
3435
3436#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3437
24a09b5f
DJ
3438 _exit (0);
3439}
3440
24a09b5f
DJ
3441/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
3442 sure that we can enable the option, and that it had the desired
3443 effect. */
3444
3445static void
3446linux_test_for_tracefork (void)
3447{
3448 int child_pid, ret, status;
3449 long second_pid;
e4b7f41c 3450#if defined(__UCLIBC__) && defined(HAS_NOMMU)
bca929d3 3451 char *stack = xmalloc (STACK_SIZE * 4);
e4b7f41c 3452#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
24a09b5f
DJ
3453
3454 linux_supports_tracefork_flag = 0;
3455
e4b7f41c
JK
3456#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3457
3458 child_pid = fork ();
3459 if (child_pid == 0)
3460 linux_tracefork_child (NULL);
3461
3462#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3463
51c2684e 3464 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
7407e2de
AS
3465#ifdef __ia64__
3466 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
3467 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
e4b7f41c 3468#else /* !__ia64__ */
7407e2de
AS
3469 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
3470 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
e4b7f41c
JK
3471#endif /* !__ia64__ */
3472
3473#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3474
24a09b5f 3475 if (child_pid == -1)
51c2684e 3476 perror_with_name ("clone");
24a09b5f
DJ
3477
3478 ret = my_waitpid (child_pid, &status, 0);
3479 if (ret == -1)
3480 perror_with_name ("waitpid");
3481 else if (ret != child_pid)
3482 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
3483 if (! WIFSTOPPED (status))
3484 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
3485
14ce3065
DE
3486 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
3487 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
24a09b5f
DJ
3488 if (ret != 0)
3489 {
3490 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3491 if (ret != 0)
3492 {
3493 warning ("linux_test_for_tracefork: failed to kill child");
3494 return;
3495 }
3496
3497 ret = my_waitpid (child_pid, &status, 0);
3498 if (ret != child_pid)
3499 warning ("linux_test_for_tracefork: failed to wait for killed child");
3500 else if (!WIFSIGNALED (status))
3501 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
3502 "killed child", status);
3503
3504 return;
3505 }
3506
3507 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
3508 if (ret != 0)
3509 warning ("linux_test_for_tracefork: failed to resume child");
3510
3511 ret = my_waitpid (child_pid, &status, 0);
3512
3513 if (ret == child_pid && WIFSTOPPED (status)
3514 && status >> 16 == PTRACE_EVENT_FORK)
3515 {
3516 second_pid = 0;
3517 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
3518 if (ret == 0 && second_pid != 0)
3519 {
3520 int second_status;
3521
3522 linux_supports_tracefork_flag = 1;
3523 my_waitpid (second_pid, &second_status, 0);
3524 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
3525 if (ret != 0)
3526 warning ("linux_test_for_tracefork: failed to kill second child");
3527 my_waitpid (second_pid, &status, 0);
3528 }
3529 }
3530 else
3531 warning ("linux_test_for_tracefork: unexpected result from waitpid "
3532 "(%d, status 0x%x)", ret, status);
3533
3534 do
3535 {
3536 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3537 if (ret != 0)
3538 warning ("linux_test_for_tracefork: failed to kill child");
3539 my_waitpid (child_pid, &status, 0);
3540 }
3541 while (WIFSTOPPED (status));
51c2684e 3542
e4b7f41c 3543#if defined(__UCLIBC__) && defined(HAS_NOMMU)
51c2684e 3544 free (stack);
e4b7f41c 3545#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
24a09b5f
DJ
3546}
3547
3548
2f2893d9
DJ
3549static void
3550linux_look_up_symbols (void)
3551{
0d62e5e8 3552#ifdef USE_THREAD_DB
95954743
PA
3553 struct process_info *proc = current_process ();
3554
cdbfd419 3555 if (proc->private->thread_db != NULL)
0d62e5e8
DJ
3556 return;
3557
6076632b
DE
3558 /* If the kernel supports tracing forks then it also supports tracing
3559 clones, and then we don't need to use the magic thread event breakpoint
3560 to learn about threads. */
cdbfd419 3561 thread_db_init (!linux_supports_tracefork_flag);
0d62e5e8
DJ
3562#endif
3563}
3564
e5379b03 3565static void
ef57601b 3566linux_request_interrupt (void)
e5379b03 3567{
a1928bad 3568 extern unsigned long signal_pid;
e5379b03 3569
95954743
PA
3570 if (!ptid_equal (cont_thread, null_ptid)
3571 && !ptid_equal (cont_thread, minus_one_ptid))
e5379b03 3572 {
54a0b537 3573 struct lwp_info *lwp;
bd99dc85 3574 int lwpid;
e5379b03 3575
54a0b537 3576 lwp = get_thread_lwp (current_inferior);
bd99dc85
PA
3577 lwpid = lwpid_of (lwp);
3578 kill_lwp (lwpid, SIGINT);
e5379b03
DJ
3579 }
3580 else
ef57601b 3581 kill_lwp (signal_pid, SIGINT);
e5379b03
DJ
3582}
3583
aa691b87
RM
3584/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
3585 to debugger memory starting at MYADDR. */
3586
3587static int
f450004a 3588linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
3589{
3590 char filename[PATH_MAX];
3591 int fd, n;
95954743 3592 int pid = lwpid_of (get_thread_lwp (current_inferior));
aa691b87 3593
95954743 3594 snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
3595
3596 fd = open (filename, O_RDONLY);
3597 if (fd < 0)
3598 return -1;
3599
3600 if (offset != (CORE_ADDR) 0
3601 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
3602 n = -1;
3603 else
3604 n = read (fd, myaddr, len);
3605
3606 close (fd);
3607
3608 return n;
3609}
3610
d993e290
PA
3611/* These breakpoint and watchpoint related wrapper functions simply
3612 pass on the function call if the target has registered a
3613 corresponding function. */
e013ee27
OF
3614
3615static int
d993e290 3616linux_insert_point (char type, CORE_ADDR addr, int len)
e013ee27 3617{
d993e290
PA
3618 if (the_low_target.insert_point != NULL)
3619 return the_low_target.insert_point (type, addr, len);
e013ee27
OF
3620 else
3621 /* Unsupported (see target.h). */
3622 return 1;
3623}
3624
3625static int
d993e290 3626linux_remove_point (char type, CORE_ADDR addr, int len)
e013ee27 3627{
d993e290
PA
3628 if (the_low_target.remove_point != NULL)
3629 return the_low_target.remove_point (type, addr, len);
e013ee27
OF
3630 else
3631 /* Unsupported (see target.h). */
3632 return 1;
3633}
3634
3635static int
3636linux_stopped_by_watchpoint (void)
3637{
c3adc08c
PA
3638 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3639
3640 return lwp->stopped_by_watchpoint;
e013ee27
OF
3641}
3642
3643static CORE_ADDR
3644linux_stopped_data_address (void)
3645{
c3adc08c
PA
3646 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3647
3648 return lwp->stopped_data_address;
e013ee27
OF
3649}
3650
42c81e2a 3651#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
3652#if defined(__mcoldfire__)
3653/* These should really be defined in the kernel's ptrace.h header. */
3654#define PT_TEXT_ADDR 49*4
3655#define PT_DATA_ADDR 50*4
3656#define PT_TEXT_END_ADDR 51*4
3657#endif
3658
3659/* Under uClinux, programs are loaded at non-zero offsets, which we need
3660 to tell gdb about. */
3661
3662static int
3663linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
3664{
3665#if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
3666 unsigned long text, text_end, data;
bd99dc85 3667 int pid = lwpid_of (get_thread_lwp (current_inferior));
52fb6437
NS
3668
3669 errno = 0;
3670
3671 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
3672 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
3673 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
3674
3675 if (errno == 0)
3676 {
3677 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
3678 used by gdb) are relative to the beginning of the program,
3679 with the data segment immediately following the text segment.
3680 However, the actual runtime layout in memory may put the data
3681 somewhere else, so when we send gdb a data base-address, we
3682 use the real data base address and subtract the compile-time
3683 data base-address from it (which is just the length of the
3684 text segment). BSS immediately follows data in both
3685 cases. */
52fb6437
NS
3686 *text_p = text;
3687 *data_p = data - (text_end - text);
1b3f6016 3688
52fb6437
NS
3689 return 1;
3690 }
3691#endif
3692 return 0;
3693}
3694#endif
3695
dc146f7c
VP
3696static int
3697compare_ints (const void *xa, const void *xb)
3698{
3699 int a = *(const int *)xa;
3700 int b = *(const int *)xb;
3701
3702 return a - b;
3703}
3704
3705static int *
3706unique (int *b, int *e)
3707{
3708 int *d = b;
3709 while (++b != e)
3710 if (*d != *b)
3711 *++d = *b;
3712 return ++d;
3713}
3714
3715/* Given PID, iterates over all threads in that process.
3716
3717 Information about each thread, in a format suitable for qXfer:osdata:thread
3718 is printed to BUFFER, if it's not NULL. BUFFER is assumed to be already
3719 initialized, and the caller is responsible for finishing and appending '\0'
3720 to it.
3721
3722 The list of cores that threads are running on is assigned to *CORES, if it
3723 is not NULL. If no cores are found, *CORES will be set to NULL. Caller
3724 should free *CORES. */
3725
3726static void
3727list_threads (int pid, struct buffer *buffer, char **cores)
3728{
3729 int count = 0;
3730 int allocated = 10;
3731 int *core_numbers = xmalloc (sizeof (int) * allocated);
3732 char pathname[128];
3733 DIR *dir;
3734 struct dirent *dp;
3735 struct stat statbuf;
3736
3737 sprintf (pathname, "/proc/%d/task", pid);
3738 if (stat (pathname, &statbuf) == 0 && S_ISDIR (statbuf.st_mode))
3739 {
3740 dir = opendir (pathname);
3741 if (!dir)
3742 {
3743 free (core_numbers);
3744 return;
3745 }
3746
3747 while ((dp = readdir (dir)) != NULL)
3748 {
3749 unsigned long lwp = strtoul (dp->d_name, NULL, 10);
3750
3751 if (lwp != 0)
3752 {
3753 unsigned core = linux_core_of_thread (ptid_build (pid, lwp, 0));
3754
3755 if (core != -1)
3756 {
3757 char s[sizeof ("4294967295")];
3758 sprintf (s, "%u", core);
3759
3760 if (count == allocated)
3761 {
3762 allocated *= 2;
3763 core_numbers = realloc (core_numbers,
3764 sizeof (int) * allocated);
3765 }
3766 core_numbers[count++] = core;
3767 if (buffer)
3768 buffer_xml_printf (buffer,
3769 "<item>"
3770 "<column name=\"pid\">%d</column>"
3771 "<column name=\"tid\">%s</column>"
3772 "<column name=\"core\">%s</column>"
3773 "</item>", pid, dp->d_name, s);
3774 }
3775 else
3776 {
3777 if (buffer)
3778 buffer_xml_printf (buffer,
3779 "<item>"
3780 "<column name=\"pid\">%d</column>"
3781 "<column name=\"tid\">%s</column>"
3782 "</item>", pid, dp->d_name);
3783 }
3784 }
3785 }
3786 }
3787
3788 if (cores)
3789 {
3790 *cores = NULL;
3791 if (count > 0)
3792 {
3793 struct buffer buffer2;
3794 int *b;
3795 int *e;
3796 qsort (core_numbers, count, sizeof (int), compare_ints);
3797
3798 /* Remove duplicates. */
3799 b = core_numbers;
3800 e = unique (b, core_numbers + count);
3801
3802 buffer_init (&buffer2);
3803
3804 for (b = core_numbers; b != e; ++b)
3805 {
3806 char number[sizeof ("4294967295")];
3807 sprintf (number, "%u", *b);
3808 buffer_xml_printf (&buffer2, "%s%s",
3809 (b == core_numbers) ? "" : ",", number);
3810 }
3811 buffer_grow_str0 (&buffer2, "");
3812
3813 *cores = buffer_finish (&buffer2);
3814 }
3815 }
3816 free (core_numbers);
3817}
3818
3819static void
3820show_process (int pid, const char *username, struct buffer *buffer)
3821{
3822 char pathname[128];
3823 FILE *f;
3824 char cmd[MAXPATHLEN + 1];
3825
3826 sprintf (pathname, "/proc/%d/cmdline", pid);
3827
3828 if ((f = fopen (pathname, "r")) != NULL)
3829 {
3830 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
3831 if (len > 0)
3832 {
3833 char *cores = 0;
3834 int i;
3835 for (i = 0; i < len; i++)
3836 if (cmd[i] == '\0')
3837 cmd[i] = ' ';
3838 cmd[len] = '\0';
3839
3840 buffer_xml_printf (buffer,
3841 "<item>"
3842 "<column name=\"pid\">%d</column>"
3843 "<column name=\"user\">%s</column>"
3844 "<column name=\"command\">%s</column>",
3845 pid,
3846 username,
3847 cmd);
3848
3849 /* This only collects core numbers, and does not print threads. */
3850 list_threads (pid, NULL, &cores);
3851
3852 if (cores)
3853 {
3854 buffer_xml_printf (buffer,
3855 "<column name=\"cores\">%s</column>", cores);
3856 free (cores);
3857 }
3858
3859 buffer_xml_printf (buffer, "</item>");
3860 }
3861 fclose (f);
3862 }
3863}
3864
07e059b5
VP
3865static int
3866linux_qxfer_osdata (const char *annex,
1b3f6016
PA
3867 unsigned char *readbuf, unsigned const char *writebuf,
3868 CORE_ADDR offset, int len)
07e059b5
VP
3869{
3870 /* We make the process list snapshot when the object starts to be
3871 read. */
3872 static const char *buf;
3873 static long len_avail = -1;
3874 static struct buffer buffer;
dc146f7c
VP
3875 int processes = 0;
3876 int threads = 0;
07e059b5
VP
3877
3878 DIR *dirp;
3879
dc146f7c
VP
3880 if (strcmp (annex, "processes") == 0)
3881 processes = 1;
3882 else if (strcmp (annex, "threads") == 0)
3883 threads = 1;
3884 else
07e059b5
VP
3885 return 0;
3886
3887 if (!readbuf || writebuf)
3888 return 0;
3889
3890 if (offset == 0)
3891 {
3892 if (len_avail != -1 && len_avail != 0)
3893 buffer_free (&buffer);
3894 len_avail = 0;
3895 buf = NULL;
3896 buffer_init (&buffer);
dc146f7c
VP
3897 if (processes)
3898 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
3899 else if (threads)
3900 buffer_grow_str (&buffer, "<osdata type=\"threads\">");
07e059b5
VP
3901
3902 dirp = opendir ("/proc");
3903 if (dirp)
3904 {
1b3f6016
PA
3905 struct dirent *dp;
3906 while ((dp = readdir (dirp)) != NULL)
3907 {
3908 struct stat statbuf;
3909 char procentry[sizeof ("/proc/4294967295")];
3910
3911 if (!isdigit (dp->d_name[0])
3912 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
3913 continue;
3914
3915 sprintf (procentry, "/proc/%s", dp->d_name);
3916 if (stat (procentry, &statbuf) == 0
3917 && S_ISDIR (statbuf.st_mode))
3918 {
dc146f7c 3919 int pid = (int) strtoul (dp->d_name, NULL, 10);
1b3f6016 3920
dc146f7c 3921 if (processes)
1b3f6016 3922 {
dc146f7c
VP
3923 struct passwd *entry = getpwuid (statbuf.st_uid);
3924 show_process (pid, entry ? entry->pw_name : "?", &buffer);
3925 }
3926 else if (threads)
3927 {
3928 list_threads (pid, &buffer, NULL);
1b3f6016
PA
3929 }
3930 }
3931 }
07e059b5 3932
1b3f6016 3933 closedir (dirp);
07e059b5
VP
3934 }
3935 buffer_grow_str0 (&buffer, "</osdata>\n");
3936 buf = buffer_finish (&buffer);
3937 len_avail = strlen (buf);
3938 }
3939
3940 if (offset >= len_avail)
3941 {
3942 /* Done. Get rid of the data. */
3943 buffer_free (&buffer);
3944 buf = NULL;
3945 len_avail = 0;
3946 return 0;
3947 }
3948
3949 if (len > len_avail - offset)
3950 len = len_avail - offset;
3951 memcpy (readbuf, buf + offset, len);
3952
3953 return len;
3954}
3955
d0722149
DE
3956/* Convert a native/host siginfo object, into/from the siginfo in the
3957 layout of the inferiors' architecture. */
3958
3959static void
3960siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
3961{
3962 int done = 0;
3963
3964 if (the_low_target.siginfo_fixup != NULL)
3965 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
3966
3967 /* If there was no callback, or the callback didn't do anything,
3968 then just do a straight memcpy. */
3969 if (!done)
3970 {
3971 if (direction == 1)
3972 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
3973 else
3974 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
3975 }
3976}
3977
4aa995e1
PA
3978static int
3979linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
3980 unsigned const char *writebuf, CORE_ADDR offset, int len)
3981{
d0722149 3982 int pid;
4aa995e1 3983 struct siginfo siginfo;
d0722149 3984 char inf_siginfo[sizeof (struct siginfo)];
4aa995e1
PA
3985
3986 if (current_inferior == NULL)
3987 return -1;
3988
bd99dc85 3989 pid = lwpid_of (get_thread_lwp (current_inferior));
4aa995e1
PA
3990
3991 if (debug_threads)
d0722149 3992 fprintf (stderr, "%s siginfo for lwp %d.\n",
4aa995e1
PA
3993 readbuf != NULL ? "Reading" : "Writing",
3994 pid);
3995
3996 if (offset > sizeof (siginfo))
3997 return -1;
3998
3999 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4000 return -1;
4001
d0722149
DE
4002 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4003 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4004 inferior with a 64-bit GDBSERVER should look the same as debugging it
4005 with a 32-bit GDBSERVER, we need to convert it. */
4006 siginfo_fixup (&siginfo, inf_siginfo, 0);
4007
4aa995e1
PA
4008 if (offset + len > sizeof (siginfo))
4009 len = sizeof (siginfo) - offset;
4010
4011 if (readbuf != NULL)
d0722149 4012 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
4013 else
4014 {
d0722149
DE
4015 memcpy (inf_siginfo + offset, writebuf, len);
4016
4017 /* Convert back to ptrace layout before flushing it out. */
4018 siginfo_fixup (&siginfo, inf_siginfo, 1);
4019
4aa995e1
PA
4020 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4021 return -1;
4022 }
4023
4024 return len;
4025}
4026
bd99dc85
PA
4027/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4028 so we notice when children change state; as the handler for the
4029 sigsuspend in my_waitpid. */
4030
4031static void
4032sigchld_handler (int signo)
4033{
4034 int old_errno = errno;
4035
4036 if (debug_threads)
4037 /* fprintf is not async-signal-safe, so call write directly. */
4038 write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
4039
4040 if (target_is_async_p ())
4041 async_file_mark (); /* trigger a linux_wait */
4042
4043 errno = old_errno;
4044}
4045
4046static int
4047linux_supports_non_stop (void)
4048{
4049 return 1;
4050}
4051
4052static int
4053linux_async (int enable)
4054{
4055 int previous = (linux_event_pipe[0] != -1);
4056
8336d594
PA
4057 if (debug_threads)
4058 fprintf (stderr, "linux_async (%d), previous=%d\n",
4059 enable, previous);
4060
bd99dc85
PA
4061 if (previous != enable)
4062 {
4063 sigset_t mask;
4064 sigemptyset (&mask);
4065 sigaddset (&mask, SIGCHLD);
4066
4067 sigprocmask (SIG_BLOCK, &mask, NULL);
4068
4069 if (enable)
4070 {
4071 if (pipe (linux_event_pipe) == -1)
4072 fatal ("creating event pipe failed.");
4073
4074 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4075 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4076
4077 /* Register the event loop handler. */
4078 add_file_handler (linux_event_pipe[0],
4079 handle_target_event, NULL);
4080
4081 /* Always trigger a linux_wait. */
4082 async_file_mark ();
4083 }
4084 else
4085 {
4086 delete_file_handler (linux_event_pipe[0]);
4087
4088 close (linux_event_pipe[0]);
4089 close (linux_event_pipe[1]);
4090 linux_event_pipe[0] = -1;
4091 linux_event_pipe[1] = -1;
4092 }
4093
4094 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4095 }
4096
4097 return previous;
4098}
4099
4100static int
4101linux_start_non_stop (int nonstop)
4102{
4103 /* Register or unregister from event-loop accordingly. */
4104 linux_async (nonstop);
4105 return 0;
4106}
4107
cf8fd78b
PA
4108static int
4109linux_supports_multi_process (void)
4110{
4111 return 1;
4112}
4113
efcbbd14
UW
4114
4115/* Enumerate spufs IDs for process PID. */
4116static int
4117spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4118{
4119 int pos = 0;
4120 int written = 0;
4121 char path[128];
4122 DIR *dir;
4123 struct dirent *entry;
4124
4125 sprintf (path, "/proc/%ld/fd", pid);
4126 dir = opendir (path);
4127 if (!dir)
4128 return -1;
4129
4130 rewinddir (dir);
4131 while ((entry = readdir (dir)) != NULL)
4132 {
4133 struct stat st;
4134 struct statfs stfs;
4135 int fd;
4136
4137 fd = atoi (entry->d_name);
4138 if (!fd)
4139 continue;
4140
4141 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4142 if (stat (path, &st) != 0)
4143 continue;
4144 if (!S_ISDIR (st.st_mode))
4145 continue;
4146
4147 if (statfs (path, &stfs) != 0)
4148 continue;
4149 if (stfs.f_type != SPUFS_MAGIC)
4150 continue;
4151
4152 if (pos >= offset && pos + 4 <= offset + len)
4153 {
4154 *(unsigned int *)(buf + pos - offset) = fd;
4155 written += 4;
4156 }
4157 pos += 4;
4158 }
4159
4160 closedir (dir);
4161 return written;
4162}
4163
4164/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4165 object type, using the /proc file system. */
4166static int
4167linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4168 unsigned const char *writebuf,
4169 CORE_ADDR offset, int len)
4170{
4171 long pid = lwpid_of (get_thread_lwp (current_inferior));
4172 char buf[128];
4173 int fd = 0;
4174 int ret = 0;
4175
4176 if (!writebuf && !readbuf)
4177 return -1;
4178
4179 if (!*annex)
4180 {
4181 if (!readbuf)
4182 return -1;
4183 else
4184 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4185 }
4186
4187 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4188 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4189 if (fd <= 0)
4190 return -1;
4191
4192 if (offset != 0
4193 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4194 {
4195 close (fd);
4196 return 0;
4197 }
4198
4199 if (writebuf)
4200 ret = write (fd, writebuf, (size_t) len);
4201 else
4202 ret = read (fd, readbuf, (size_t) len);
4203
4204 close (fd);
4205 return ret;
4206}
4207
dc146f7c
VP
4208static int
4209linux_core_of_thread (ptid_t ptid)
4210{
4211 char filename[sizeof ("/proc//task//stat")
4212 + 2 * 20 /* decimal digits for 2 numbers, max 2^64 bit each */
4213 + 1];
4214 FILE *f;
4215 char *content = NULL;
4216 char *p;
4217 char *ts = 0;
4218 int content_read = 0;
4219 int i;
4220 int core;
4221
4222 sprintf (filename, "/proc/%d/task/%ld/stat",
4223 ptid_get_pid (ptid), ptid_get_lwp (ptid));
4224 f = fopen (filename, "r");
4225 if (!f)
4226 return -1;
4227
4228 for (;;)
4229 {
4230 int n;
4231 content = realloc (content, content_read + 1024);
4232 n = fread (content + content_read, 1, 1024, f);
4233 content_read += n;
4234 if (n < 1024)
4235 {
4236 content[content_read] = '\0';
4237 break;
4238 }
4239 }
4240
4241 p = strchr (content, '(');
4242 p = strchr (p, ')') + 2; /* skip ")" and a whitespace. */
4243
4244 p = strtok_r (p, " ", &ts);
4245 for (i = 0; i != 36; ++i)
4246 p = strtok_r (NULL, " ", &ts);
4247
4248 if (sscanf (p, "%d", &core) == 0)
4249 core = -1;
4250
4251 free (content);
4252 fclose (f);
4253
4254 return core;
4255}
4256
1570b33e
L
4257static void
4258linux_process_qsupported (const char *query)
4259{
4260 if (the_low_target.process_qsupported != NULL)
4261 the_low_target.process_qsupported (query);
4262}
4263
219f2f23
PA
4264static int
4265linux_supports_tracepoints (void)
4266{
4267 if (*the_low_target.supports_tracepoints == NULL)
4268 return 0;
4269
4270 return (*the_low_target.supports_tracepoints) ();
4271}
4272
4273static CORE_ADDR
4274linux_read_pc (struct regcache *regcache)
4275{
4276 if (the_low_target.get_pc == NULL)
4277 return 0;
4278
4279 return (*the_low_target.get_pc) (regcache);
4280}
4281
4282static void
4283linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
4284{
4285 gdb_assert (the_low_target.set_pc != NULL);
4286
4287 (*the_low_target.set_pc) (regcache, pc);
4288}
4289
8336d594
PA
4290static int
4291linux_thread_stopped (struct thread_info *thread)
4292{
4293 return get_thread_lwp (thread)->stopped;
4294}
4295
4296/* This exposes stop-all-threads functionality to other modules. */
4297
4298static void
4299linux_pause_all (void)
4300{
4301 stop_all_lwps ();
4302}
4303
ce3a066d
DJ
4304static struct target_ops linux_target_ops = {
4305 linux_create_inferior,
4306 linux_attach,
4307 linux_kill,
6ad8ae5c 4308 linux_detach,
8336d594 4309 linux_mourn,
444d6139 4310 linux_join,
ce3a066d
DJ
4311 linux_thread_alive,
4312 linux_resume,
4313 linux_wait,
4314 linux_fetch_registers,
4315 linux_store_registers,
4316 linux_read_memory,
4317 linux_write_memory,
2f2893d9 4318 linux_look_up_symbols,
ef57601b 4319 linux_request_interrupt,
aa691b87 4320 linux_read_auxv,
d993e290
PA
4321 linux_insert_point,
4322 linux_remove_point,
e013ee27
OF
4323 linux_stopped_by_watchpoint,
4324 linux_stopped_data_address,
42c81e2a 4325#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437 4326 linux_read_offsets,
dae5f5cf
DJ
4327#else
4328 NULL,
4329#endif
4330#ifdef USE_THREAD_DB
4331 thread_db_get_tls_address,
4332#else
4333 NULL,
52fb6437 4334#endif
efcbbd14 4335 linux_qxfer_spu,
59a016f0 4336 hostio_last_error_from_errno,
07e059b5 4337 linux_qxfer_osdata,
4aa995e1 4338 linux_xfer_siginfo,
bd99dc85
PA
4339 linux_supports_non_stop,
4340 linux_async,
4341 linux_start_non_stop,
cdbfd419
PP
4342 linux_supports_multi_process,
4343#ifdef USE_THREAD_DB
dc146f7c 4344 thread_db_handle_monitor_command,
cdbfd419 4345#else
dc146f7c 4346 NULL,
cdbfd419 4347#endif
1570b33e 4348 linux_core_of_thread,
219f2f23
PA
4349 linux_process_qsupported,
4350 linux_supports_tracepoints,
4351 linux_read_pc,
8336d594
PA
4352 linux_write_pc,
4353 linux_thread_stopped,
711e434b
PM
4354 linux_pause_all,
4355 NULL, /* get_tib_address (Windows OS specific). */
ce3a066d
DJ
4356};
4357
0d62e5e8
DJ
4358static void
4359linux_init_signals ()
4360{
4361 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
4362 to find what the cancel signal actually is. */
60c3d7b0 4363#ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 4364 signal (__SIGRTMIN+1, SIG_IGN);
60c3d7b0 4365#endif
0d62e5e8
DJ
4366}
4367
da6d8c04
DJ
4368void
4369initialize_low (void)
4370{
bd99dc85
PA
4371 struct sigaction sigchld_action;
4372 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 4373 set_target_ops (&linux_target_ops);
611cb4a5
DJ
4374 set_breakpoint_data (the_low_target.breakpoint,
4375 the_low_target.breakpoint_len);
0d62e5e8 4376 linux_init_signals ();
24a09b5f 4377 linux_test_for_tracefork ();
52fa2412
UW
4378#ifdef HAVE_LINUX_REGSETS
4379 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
4380 ;
bca929d3 4381 disabled_regsets = xmalloc (num_regsets);
52fa2412 4382#endif
bd99dc85
PA
4383
4384 sigchld_action.sa_handler = sigchld_handler;
4385 sigemptyset (&sigchld_action.sa_mask);
4386 sigchld_action.sa_flags = SA_RESTART;
4387 sigaction (SIGCHLD, &sigchld_action, NULL);
da6d8c04 4388}