]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/gdbserver/linux-low.c
* server.c (handle_query) <qSupported>: Do two passes over the
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
545587ee 2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
4c38e0a4 3 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
da6d8c04
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
19
20#include "server.h"
58caa3dc 21#include "linux-low.h"
da6d8c04 22
58caa3dc 23#include <sys/wait.h>
da6d8c04
DJ
24#include <stdio.h>
25#include <sys/param.h>
da6d8c04 26#include <sys/ptrace.h>
da6d8c04
DJ
27#include <signal.h>
28#include <sys/ioctl.h>
29#include <fcntl.h>
d07c63e7 30#include <string.h>
0a30fbc4
DJ
31#include <stdlib.h>
32#include <unistd.h>
fa6a77dc 33#include <errno.h>
fd500816 34#include <sys/syscall.h>
f9387fc3 35#include <sched.h>
07e059b5
VP
36#include <ctype.h>
37#include <pwd.h>
38#include <sys/types.h>
39#include <dirent.h>
efcbbd14
UW
40#include <sys/stat.h>
41#include <sys/vfs.h>
1570b33e 42#include <sys/uio.h>
957f3f49
DE
43#ifndef ELFMAG0
44/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
45 then ELFMAG0 will have been defined. If it didn't get included by
46 gdb_proc_service.h then including it will likely introduce a duplicate
47 definition of elf_fpregset_t. */
48#include <elf.h>
49#endif
efcbbd14
UW
50
51#ifndef SPUFS_MAGIC
52#define SPUFS_MAGIC 0x23c9b64e
53#endif
da6d8c04 54
32ca6d61
DJ
55#ifndef PTRACE_GETSIGINFO
56# define PTRACE_GETSIGINFO 0x4202
57# define PTRACE_SETSIGINFO 0x4203
58#endif
59
fd462a61
DJ
60#ifndef O_LARGEFILE
61#define O_LARGEFILE 0
62#endif
63
24a09b5f
DJ
64/* If the system headers did not provide the constants, hard-code the normal
65 values. */
66#ifndef PTRACE_EVENT_FORK
67
68#define PTRACE_SETOPTIONS 0x4200
69#define PTRACE_GETEVENTMSG 0x4201
70
71/* options set using PTRACE_SETOPTIONS */
72#define PTRACE_O_TRACESYSGOOD 0x00000001
73#define PTRACE_O_TRACEFORK 0x00000002
74#define PTRACE_O_TRACEVFORK 0x00000004
75#define PTRACE_O_TRACECLONE 0x00000008
76#define PTRACE_O_TRACEEXEC 0x00000010
77#define PTRACE_O_TRACEVFORKDONE 0x00000020
78#define PTRACE_O_TRACEEXIT 0x00000040
79
80/* Wait extended result codes for the above trace options. */
81#define PTRACE_EVENT_FORK 1
82#define PTRACE_EVENT_VFORK 2
83#define PTRACE_EVENT_CLONE 3
84#define PTRACE_EVENT_EXEC 4
85#define PTRACE_EVENT_VFORK_DONE 5
86#define PTRACE_EVENT_EXIT 6
87
88#endif /* PTRACE_EVENT_FORK */
89
90/* We can't always assume that this flag is available, but all systems
91 with the ptrace event handlers also have __WALL, so it's safe to use
92 in some contexts. */
93#ifndef __WALL
94#define __WALL 0x40000000 /* Wait for any child. */
95#endif
96
ec8ebe72
DE
97#ifndef W_STOPCODE
98#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
99#endif
100
42c81e2a
DJ
101#ifdef __UCLIBC__
102#if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
103#define HAS_NOMMU
104#endif
105#endif
106
24a09b5f
DJ
107/* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
108 representation of the thread ID.
611cb4a5 109
54a0b537 110 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
95954743
PA
111 the same as the LWP ID.
112
113 ``all_processes'' is keyed by the "overall process ID", which
114 GNU/Linux calls tgid, "thread group ID". */
0d62e5e8 115
54a0b537 116struct inferior_list all_lwps;
0d62e5e8 117
24a09b5f
DJ
118/* A list of all unknown processes which receive stop signals. Some other
119 process will presumably claim each of these as forked children
120 momentarily. */
121
122struct inferior_list stopped_pids;
123
0d62e5e8
DJ
124/* FIXME this is a bit of a hack, and could be removed. */
125int stopping_threads;
126
127/* FIXME make into a target method? */
24a09b5f 128int using_threads = 1;
24a09b5f 129
95954743
PA
130/* This flag is true iff we've just created or attached to our first
131 inferior but it has not stopped yet. As soon as it does, we need
132 to call the low target's arch_setup callback. Doing this only on
133 the first inferior avoids reinializing the architecture on every
134 inferior, and avoids messing with the register caches of the
135 already running inferiors. NOTE: this assumes all inferiors under
136 control of gdbserver have the same architecture. */
d61ddec4
UW
137static int new_inferior;
138
2acc282a 139static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 140 int step, int signal, siginfo_t *info);
2bd7c093 141static void linux_resume (struct thread_resume *resume_info, size_t n);
7984d532
PA
142static void stop_all_lwps (int suspend, struct lwp_info *except);
143static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
95954743 144static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
95954743 145static void *add_lwp (ptid_t ptid);
c35fafde 146static int linux_stopped_by_watchpoint (void);
95954743 147static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
dc146f7c 148static int linux_core_of_thread (ptid_t ptid);
d50171e4 149static void proceed_all_lwps (void);
d50171e4
PA
150static int finish_step_over (struct lwp_info *lwp);
151static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
152static int kill_lwp (unsigned long lwpid, int signo);
1e7fc18c 153static void linux_enable_event_reporting (int pid);
d50171e4
PA
154
155/* True if the low target can hardware single-step. Such targets
156 don't need a BREAKPOINT_REINSERT_ADDR callback. */
157
158static int
159can_hardware_single_step (void)
160{
161 return (the_low_target.breakpoint_reinsert_addr == NULL);
162}
163
164/* True if the low target supports memory breakpoints. If so, we'll
165 have a GET_PC implementation. */
166
167static int
168supports_breakpoints (void)
169{
170 return (the_low_target.get_pc != NULL);
171}
0d62e5e8
DJ
172
173struct pending_signals
174{
175 int signal;
32ca6d61 176 siginfo_t info;
0d62e5e8
DJ
177 struct pending_signals *prev;
178};
611cb4a5 179
14ce3065
DE
180#define PTRACE_ARG3_TYPE void *
181#define PTRACE_ARG4_TYPE void *
c6ecbae5 182#define PTRACE_XFER_TYPE long
da6d8c04 183
58caa3dc 184#ifdef HAVE_LINUX_REGSETS
52fa2412
UW
185static char *disabled_regsets;
186static int num_regsets;
58caa3dc
DJ
187#endif
188
bd99dc85
PA
189/* The read/write ends of the pipe registered as waitable file in the
190 event loop. */
191static int linux_event_pipe[2] = { -1, -1 };
192
193/* True if we're currently in async mode. */
194#define target_is_async_p() (linux_event_pipe[0] != -1)
195
02fc4de7 196static void send_sigstop (struct lwp_info *lwp);
bd99dc85
PA
197static void wait_for_sigstop (struct inferior_list_entry *entry);
198
d0722149
DE
199/* Accepts an integer PID; Returns a string representing a file that
200 can be opened to get info for the child process.
201 Space for the result is malloc'd, caller must free. */
202
203char *
204linux_child_pid_to_exec_file (int pid)
205{
206 char *name1, *name2;
207
208 name1 = xmalloc (MAXPATHLEN);
209 name2 = xmalloc (MAXPATHLEN);
210 memset (name2, 0, MAXPATHLEN);
211
212 sprintf (name1, "/proc/%d/exe", pid);
213 if (readlink (name1, name2, MAXPATHLEN) > 0)
214 {
215 free (name1);
216 return name2;
217 }
218 else
219 {
220 free (name2);
221 return name1;
222 }
223}
224
225/* Return non-zero if HEADER is a 64-bit ELF file. */
226
227static int
957f3f49 228elf_64_header_p (const Elf64_Ehdr *header)
d0722149
DE
229{
230 return (header->e_ident[EI_MAG0] == ELFMAG0
231 && header->e_ident[EI_MAG1] == ELFMAG1
232 && header->e_ident[EI_MAG2] == ELFMAG2
233 && header->e_ident[EI_MAG3] == ELFMAG3
234 && header->e_ident[EI_CLASS] == ELFCLASS64);
235}
236
237/* Return non-zero if FILE is a 64-bit ELF file,
238 zero if the file is not a 64-bit ELF file,
239 and -1 if the file is not accessible or doesn't exist. */
240
241int
242elf_64_file_p (const char *file)
243{
957f3f49 244 Elf64_Ehdr header;
d0722149
DE
245 int fd;
246
247 fd = open (file, O_RDONLY);
248 if (fd < 0)
249 return -1;
250
251 if (read (fd, &header, sizeof (header)) != sizeof (header))
252 {
253 close (fd);
254 return 0;
255 }
256 close (fd);
257
258 return elf_64_header_p (&header);
259}
260
bd99dc85
PA
261static void
262delete_lwp (struct lwp_info *lwp)
263{
264 remove_thread (get_lwp_thread (lwp));
265 remove_inferior (&all_lwps, &lwp->head);
aa5ca48f 266 free (lwp->arch_private);
bd99dc85
PA
267 free (lwp);
268}
269
95954743
PA
270/* Add a process to the common process list, and set its private
271 data. */
272
273static struct process_info *
274linux_add_process (int pid, int attached)
275{
276 struct process_info *proc;
277
278 /* Is this the first process? If so, then set the arch. */
279 if (all_processes.head == NULL)
280 new_inferior = 1;
281
282 proc = add_process (pid, attached);
283 proc->private = xcalloc (1, sizeof (*proc->private));
284
aa5ca48f
DE
285 if (the_low_target.new_process != NULL)
286 proc->private->arch_private = the_low_target.new_process ();
287
95954743
PA
288 return proc;
289}
290
07d4f67e
DE
291/* Wrapper function for waitpid which handles EINTR, and emulates
292 __WALL for systems where that is not available. */
293
294static int
295my_waitpid (int pid, int *status, int flags)
296{
297 int ret, out_errno;
298
299 if (debug_threads)
300 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
301
302 if (flags & __WALL)
303 {
304 sigset_t block_mask, org_mask, wake_mask;
305 int wnohang;
306
307 wnohang = (flags & WNOHANG) != 0;
308 flags &= ~(__WALL | __WCLONE);
309 flags |= WNOHANG;
310
311 /* Block all signals while here. This avoids knowing about
312 LinuxThread's signals. */
313 sigfillset (&block_mask);
314 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
315
316 /* ... except during the sigsuspend below. */
317 sigemptyset (&wake_mask);
318
319 while (1)
320 {
321 /* Since all signals are blocked, there's no need to check
322 for EINTR here. */
323 ret = waitpid (pid, status, flags);
324 out_errno = errno;
325
326 if (ret == -1 && out_errno != ECHILD)
327 break;
328 else if (ret > 0)
329 break;
330
331 if (flags & __WCLONE)
332 {
333 /* We've tried both flavors now. If WNOHANG is set,
334 there's nothing else to do, just bail out. */
335 if (wnohang)
336 break;
337
338 if (debug_threads)
339 fprintf (stderr, "blocking\n");
340
341 /* Block waiting for signals. */
342 sigsuspend (&wake_mask);
343 }
344
345 flags ^= __WCLONE;
346 }
347
348 sigprocmask (SIG_SETMASK, &org_mask, NULL);
349 }
350 else
351 {
352 do
353 ret = waitpid (pid, status, flags);
354 while (ret == -1 && errno == EINTR);
355 out_errno = errno;
356 }
357
358 if (debug_threads)
359 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
360 pid, flags, status ? *status : -1, ret);
361
362 errno = out_errno;
363 return ret;
364}
365
bd99dc85
PA
366/* Handle a GNU/Linux extended wait response. If we see a clone
367 event, we need to add the new LWP to our list (and not report the
368 trap to higher layers). */
0d62e5e8 369
24a09b5f 370static void
54a0b537 371handle_extended_wait (struct lwp_info *event_child, int wstat)
24a09b5f
DJ
372{
373 int event = wstat >> 16;
54a0b537 374 struct lwp_info *new_lwp;
24a09b5f
DJ
375
376 if (event == PTRACE_EVENT_CLONE)
377 {
95954743 378 ptid_t ptid;
24a09b5f 379 unsigned long new_pid;
836acd6d 380 int ret, status = W_STOPCODE (SIGSTOP);
24a09b5f 381
bd99dc85 382 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
24a09b5f
DJ
383
384 /* If we haven't already seen the new PID stop, wait for it now. */
385 if (! pull_pid_from_list (&stopped_pids, new_pid))
386 {
387 /* The new child has a pending SIGSTOP. We can't affect it until it
388 hits the SIGSTOP, but we're already attached. */
389
97438e3f 390 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
391
392 if (ret == -1)
393 perror_with_name ("waiting for new child");
394 else if (ret != new_pid)
395 warning ("wait returned unexpected PID %d", ret);
da5898ce 396 else if (!WIFSTOPPED (status))
24a09b5f
DJ
397 warning ("wait returned unexpected status 0x%x", status);
398 }
399
1e7fc18c 400 linux_enable_event_reporting (new_pid);
24a09b5f 401
95954743
PA
402 ptid = ptid_build (pid_of (event_child), new_pid, 0);
403 new_lwp = (struct lwp_info *) add_lwp (ptid);
404 add_thread (ptid, new_lwp);
24a09b5f 405
e27d73f6
DE
406 /* Either we're going to immediately resume the new thread
407 or leave it stopped. linux_resume_one_lwp is a nop if it
408 thinks the thread is currently running, so set this first
409 before calling linux_resume_one_lwp. */
410 new_lwp->stopped = 1;
411
da5898ce
DJ
412 /* Normally we will get the pending SIGSTOP. But in some cases
413 we might get another signal delivered to the group first.
f21cc1a2 414 If we do get another signal, be sure not to lose it. */
da5898ce
DJ
415 if (WSTOPSIG (status) == SIGSTOP)
416 {
d50171e4
PA
417 if (stopping_threads)
418 new_lwp->stop_pc = get_stop_pc (new_lwp);
419 else
e27d73f6 420 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
da5898ce 421 }
24a09b5f 422 else
da5898ce 423 {
54a0b537 424 new_lwp->stop_expected = 1;
d50171e4 425
da5898ce
DJ
426 if (stopping_threads)
427 {
d50171e4 428 new_lwp->stop_pc = get_stop_pc (new_lwp);
54a0b537
PA
429 new_lwp->status_pending_p = 1;
430 new_lwp->status_pending = status;
da5898ce
DJ
431 }
432 else
433 /* Pass the signal on. This is what GDB does - except
434 shouldn't we really report it instead? */
e27d73f6 435 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
da5898ce 436 }
24a09b5f
DJ
437
438 /* Always resume the current thread. If we are stopping
439 threads, it will have a pending SIGSTOP; we may as well
440 collect it now. */
2acc282a 441 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
24a09b5f
DJ
442 }
443}
444
d50171e4
PA
445/* Return the PC as read from the regcache of LWP, without any
446 adjustment. */
447
448static CORE_ADDR
449get_pc (struct lwp_info *lwp)
450{
451 struct thread_info *saved_inferior;
452 struct regcache *regcache;
453 CORE_ADDR pc;
454
455 if (the_low_target.get_pc == NULL)
456 return 0;
457
458 saved_inferior = current_inferior;
459 current_inferior = get_lwp_thread (lwp);
460
461 regcache = get_thread_regcache (current_inferior, 1);
462 pc = (*the_low_target.get_pc) (regcache);
463
464 if (debug_threads)
465 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
466
467 current_inferior = saved_inferior;
468 return pc;
469}
470
471/* This function should only be called if LWP got a SIGTRAP.
0d62e5e8
DJ
472 The SIGTRAP could mean several things.
473
474 On i386, where decr_pc_after_break is non-zero:
475 If we were single-stepping this process using PTRACE_SINGLESTEP,
476 we will get only the one SIGTRAP (even if the instruction we
477 stepped over was a breakpoint). The value of $eip will be the
478 next instruction.
479 If we continue the process using PTRACE_CONT, we will get a
480 SIGTRAP when we hit a breakpoint. The value of $eip will be
481 the instruction after the breakpoint (i.e. needs to be
482 decremented). If we report the SIGTRAP to GDB, we must also
483 report the undecremented PC. If we cancel the SIGTRAP, we
484 must resume at the decremented PC.
485
486 (Presumably, not yet tested) On a non-decr_pc_after_break machine
487 with hardware or kernel single-step:
488 If we single-step over a breakpoint instruction, our PC will
489 point at the following instruction. If we continue and hit a
490 breakpoint instruction, our PC will point at the breakpoint
491 instruction. */
492
493static CORE_ADDR
d50171e4 494get_stop_pc (struct lwp_info *lwp)
0d62e5e8 495{
d50171e4
PA
496 CORE_ADDR stop_pc;
497
498 if (the_low_target.get_pc == NULL)
499 return 0;
0d62e5e8 500
d50171e4
PA
501 stop_pc = get_pc (lwp);
502
bdabb078
PA
503 if (WSTOPSIG (lwp->last_status) == SIGTRAP
504 && !lwp->stepping
505 && !lwp->stopped_by_watchpoint
506 && lwp->last_status >> 16 == 0)
47c0c975
DE
507 stop_pc -= the_low_target.decr_pc_after_break;
508
509 if (debug_threads)
510 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
511
512 return stop_pc;
0d62e5e8 513}
ce3a066d 514
0d62e5e8 515static void *
95954743 516add_lwp (ptid_t ptid)
611cb4a5 517{
54a0b537 518 struct lwp_info *lwp;
0d62e5e8 519
54a0b537
PA
520 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
521 memset (lwp, 0, sizeof (*lwp));
0d62e5e8 522
95954743 523 lwp->head.id = ptid;
0d62e5e8 524
aa5ca48f
DE
525 if (the_low_target.new_thread != NULL)
526 lwp->arch_private = the_low_target.new_thread ();
527
54a0b537 528 add_inferior_to_list (&all_lwps, &lwp->head);
0d62e5e8 529
54a0b537 530 return lwp;
0d62e5e8 531}
611cb4a5 532
da6d8c04
DJ
533/* Start an inferior process and returns its pid.
534 ALLARGS is a vector of program-name and args. */
535
ce3a066d
DJ
536static int
537linux_create_inferior (char *program, char **allargs)
da6d8c04 538{
a6dbe5df 539 struct lwp_info *new_lwp;
da6d8c04 540 int pid;
95954743 541 ptid_t ptid;
da6d8c04 542
42c81e2a 543#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
544 pid = vfork ();
545#else
da6d8c04 546 pid = fork ();
52fb6437 547#endif
da6d8c04
DJ
548 if (pid < 0)
549 perror_with_name ("fork");
550
551 if (pid == 0)
552 {
553 ptrace (PTRACE_TRACEME, 0, 0, 0);
554
60c3d7b0 555#ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 556 signal (__SIGRTMIN + 1, SIG_DFL);
60c3d7b0 557#endif
0d62e5e8 558
a9fa9f7d
DJ
559 setpgid (0, 0);
560
2b876972
DJ
561 execv (program, allargs);
562 if (errno == ENOENT)
563 execvp (program, allargs);
da6d8c04
DJ
564
565 fprintf (stderr, "Cannot exec %s: %s.\n", program,
d07c63e7 566 strerror (errno));
da6d8c04
DJ
567 fflush (stderr);
568 _exit (0177);
569 }
570
95954743
PA
571 linux_add_process (pid, 0);
572
573 ptid = ptid_build (pid, pid, 0);
574 new_lwp = add_lwp (ptid);
575 add_thread (ptid, new_lwp);
a6dbe5df 576 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 577
a9fa9f7d 578 return pid;
da6d8c04
DJ
579}
580
581/* Attach to an inferior process. */
582
95954743
PA
583static void
584linux_attach_lwp_1 (unsigned long lwpid, int initial)
da6d8c04 585{
95954743 586 ptid_t ptid;
54a0b537 587 struct lwp_info *new_lwp;
611cb4a5 588
95954743 589 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
da6d8c04 590 {
95954743 591 if (!initial)
2d717e4f
DJ
592 {
593 /* If we fail to attach to an LWP, just warn. */
95954743 594 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
2d717e4f
DJ
595 strerror (errno), errno);
596 fflush (stderr);
597 return;
598 }
599 else
600 /* If we fail to attach to a process, report an error. */
95954743 601 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
43d5792c 602 strerror (errno), errno);
da6d8c04
DJ
603 }
604
95954743
PA
605 if (initial)
606 /* NOTE/FIXME: This lwp might have not been the tgid. */
607 ptid = ptid_build (lwpid, lwpid, 0);
608 else
609 {
610 /* Note that extracting the pid from the current inferior is
611 safe, since we're always called in the context of the same
612 process as this new thread. */
613 int pid = pid_of (get_thread_lwp (current_inferior));
614 ptid = ptid_build (pid, lwpid, 0);
615 }
24a09b5f 616
95954743
PA
617 new_lwp = (struct lwp_info *) add_lwp (ptid);
618 add_thread (ptid, new_lwp);
0d62e5e8 619
a6dbe5df
PA
620 /* We need to wait for SIGSTOP before being able to make the next
621 ptrace call on this LWP. */
622 new_lwp->must_set_ptrace_flags = 1;
623
0d62e5e8 624 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
625 brings it to a halt.
626
627 There are several cases to consider here:
628
629 1) gdbserver has already attached to the process and is being notified
1b3f6016 630 of a new thread that is being created.
d50171e4
PA
631 In this case we should ignore that SIGSTOP and resume the
632 process. This is handled below by setting stop_expected = 1,
8336d594 633 and the fact that add_thread sets last_resume_kind ==
d50171e4 634 resume_continue.
0e21c1ec
DE
635
636 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
637 to it via attach_inferior.
638 In this case we want the process thread to stop.
d50171e4
PA
639 This is handled by having linux_attach set last_resume_kind ==
640 resume_stop after we return.
1b3f6016
PA
641 ??? If the process already has several threads we leave the other
642 threads running.
0e21c1ec
DE
643
644 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
645 existing threads.
646 In this case we want the thread to stop.
647 FIXME: This case is currently not properly handled.
648 We should wait for the SIGSTOP but don't. Things work apparently
649 because enough time passes between when we ptrace (ATTACH) and when
650 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
651
652 On the other hand, if we are currently trying to stop all threads, we
653 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 654 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
655 end of the list, and so the new thread has not yet reached
656 wait_for_sigstop (but will). */
d50171e4 657 new_lwp->stop_expected = 1;
0d62e5e8
DJ
658}
659
95954743
PA
660void
661linux_attach_lwp (unsigned long lwpid)
662{
663 linux_attach_lwp_1 (lwpid, 0);
664}
665
0d62e5e8 666int
a1928bad 667linux_attach (unsigned long pid)
0d62e5e8 668{
95954743 669 linux_attach_lwp_1 (pid, 1);
95954743 670 linux_add_process (pid, 1);
0d62e5e8 671
bd99dc85
PA
672 if (!non_stop)
673 {
8336d594
PA
674 struct thread_info *thread;
675
676 /* Don't ignore the initial SIGSTOP if we just attached to this
677 process. It will be collected by wait shortly. */
678 thread = find_thread_ptid (ptid_build (pid, pid, 0));
679 thread->last_resume_kind = resume_stop;
bd99dc85 680 }
0d62e5e8 681
95954743
PA
682 return 0;
683}
684
685struct counter
686{
687 int pid;
688 int count;
689};
690
691static int
692second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
693{
694 struct counter *counter = args;
695
696 if (ptid_get_pid (entry->id) == counter->pid)
697 {
698 if (++counter->count > 1)
699 return 1;
700 }
d61ddec4 701
da6d8c04
DJ
702 return 0;
703}
704
95954743
PA
705static int
706last_thread_of_process_p (struct thread_info *thread)
707{
708 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
709 int pid = ptid_get_pid (ptid);
710 struct counter counter = { pid , 0 };
da6d8c04 711
95954743
PA
712 return (find_inferior (&all_threads,
713 second_thread_of_pid_p, &counter) == NULL);
714}
715
716/* Kill the inferior lwp. */
717
718static int
719linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
da6d8c04 720{
0d62e5e8 721 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 722 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 723 int wstat;
95954743
PA
724 int pid = * (int *) args;
725
726 if (ptid_get_pid (entry->id) != pid)
727 return 0;
0d62e5e8 728
fd500816
DJ
729 /* We avoid killing the first thread here, because of a Linux kernel (at
730 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
731 the children get a chance to be reaped, it will remain a zombie
732 forever. */
95954743 733
12b42a12 734 if (lwpid_of (lwp) == pid)
95954743
PA
735 {
736 if (debug_threads)
737 fprintf (stderr, "lkop: is last of process %s\n",
738 target_pid_to_str (entry->id));
739 return 0;
740 }
fd500816 741
0d62e5e8
DJ
742 do
743 {
bd99dc85 744 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
0d62e5e8
DJ
745
746 /* Make sure it died. The loop is most likely unnecessary. */
95954743 747 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
bd99dc85 748 } while (pid > 0 && WIFSTOPPED (wstat));
95954743
PA
749
750 return 0;
da6d8c04
DJ
751}
752
95954743
PA
753static int
754linux_kill (int pid)
0d62e5e8 755{
95954743 756 struct process_info *process;
54a0b537 757 struct lwp_info *lwp;
95954743 758 struct thread_info *thread;
fd500816 759 int wstat;
95954743 760 int lwpid;
fd500816 761
95954743
PA
762 process = find_process_pid (pid);
763 if (process == NULL)
764 return -1;
9d606399 765
f9e39928
PA
766 /* If we're killing a running inferior, make sure it is stopped
767 first, as PTRACE_KILL will not work otherwise. */
7984d532 768 stop_all_lwps (0, NULL);
f9e39928 769
95954743 770 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
fd500816 771
54a0b537 772 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 773 thread in the list, so do so now. */
95954743
PA
774 lwp = find_lwp_pid (pid_to_ptid (pid));
775 thread = get_lwp_thread (lwp);
bd99dc85
PA
776
777 if (debug_threads)
95954743
PA
778 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
779 lwpid_of (lwp), pid);
bd99dc85 780
fd500816
DJ
781 do
782 {
bd99dc85 783 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
fd500816
DJ
784
785 /* Make sure it died. The loop is most likely unnecessary. */
95954743
PA
786 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
787 } while (lwpid > 0 && WIFSTOPPED (wstat));
2d717e4f 788
8336d594 789 the_target->mourn (process);
f9e39928
PA
790
791 /* Since we presently can only stop all lwps of all processes, we
792 need to unstop lwps of other processes. */
7984d532 793 unstop_all_lwps (0, NULL);
95954743 794 return 0;
0d62e5e8
DJ
795}
796
95954743
PA
797static int
798linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
6ad8ae5c
DJ
799{
800 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 801 struct lwp_info *lwp = get_thread_lwp (thread);
95954743
PA
802 int pid = * (int *) args;
803
804 if (ptid_get_pid (entry->id) != pid)
805 return 0;
6ad8ae5c 806
ae13219e
DJ
807 /* If this process is stopped but is expecting a SIGSTOP, then make
808 sure we take care of that now. This isn't absolutely guaranteed
809 to collect the SIGSTOP, but is fairly likely to. */
54a0b537 810 if (lwp->stop_expected)
ae13219e 811 {
bd99dc85 812 int wstat;
ae13219e 813 /* Clear stop_expected, so that the SIGSTOP will be reported. */
54a0b537 814 lwp->stop_expected = 0;
f9e39928 815 linux_resume_one_lwp (lwp, 0, 0, NULL);
95954743 816 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
ae13219e
DJ
817 }
818
819 /* Flush any pending changes to the process's registers. */
820 regcache_invalidate_one ((struct inferior_list_entry *)
54a0b537 821 get_lwp_thread (lwp));
ae13219e
DJ
822
823 /* Finally, let it resume. */
bd99dc85
PA
824 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
825
826 delete_lwp (lwp);
95954743 827 return 0;
6ad8ae5c
DJ
828}
829
95954743
PA
830static int
831linux_detach (int pid)
832{
833 struct process_info *process;
834
835 process = find_process_pid (pid);
836 if (process == NULL)
837 return -1;
838
f9e39928
PA
839 /* Stop all threads before detaching. First, ptrace requires that
840 the thread is stopped to sucessfully detach. Second, thread_db
841 may need to uninstall thread event breakpoints from memory, which
842 only works with a stopped process anyway. */
7984d532 843 stop_all_lwps (0, NULL);
f9e39928 844
ca5c370d 845#ifdef USE_THREAD_DB
8336d594 846 thread_db_detach (process);
ca5c370d
PA
847#endif
848
95954743 849 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
8336d594
PA
850
851 the_target->mourn (process);
f9e39928
PA
852
853 /* Since we presently can only stop all lwps of all processes, we
854 need to unstop lwps of other processes. */
7984d532 855 unstop_all_lwps (0, NULL);
f9e39928
PA
856 return 0;
857}
858
859/* Remove all LWPs that belong to process PROC from the lwp list. */
860
861static int
862delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
863{
864 struct lwp_info *lwp = (struct lwp_info *) entry;
865 struct process_info *process = proc;
866
867 if (pid_of (lwp) == pid_of (process))
868 delete_lwp (lwp);
869
dd6953e1 870 return 0;
6ad8ae5c
DJ
871}
872
8336d594
PA
873static void
874linux_mourn (struct process_info *process)
875{
876 struct process_info_private *priv;
877
878#ifdef USE_THREAD_DB
879 thread_db_mourn (process);
880#endif
881
f9e39928
PA
882 find_inferior (&all_lwps, delete_lwp_callback, process);
883
8336d594
PA
884 /* Freeing all private data. */
885 priv = process->private;
886 free (priv->arch_private);
887 free (priv);
888 process->private = NULL;
505106cd
PA
889
890 remove_process (process);
8336d594
PA
891}
892
444d6139 893static void
95954743 894linux_join (int pid)
444d6139 895{
444d6139 896 int status, ret;
95954743 897 struct process_info *process;
bd99dc85 898
95954743
PA
899 process = find_process_pid (pid);
900 if (process == NULL)
901 return;
444d6139
PA
902
903 do {
95954743 904 ret = my_waitpid (pid, &status, 0);
444d6139
PA
905 if (WIFEXITED (status) || WIFSIGNALED (status))
906 break;
907 } while (ret != -1 || errno != ECHILD);
908}
909
6ad8ae5c 910/* Return nonzero if the given thread is still alive. */
0d62e5e8 911static int
95954743 912linux_thread_alive (ptid_t ptid)
0d62e5e8 913{
95954743
PA
914 struct lwp_info *lwp = find_lwp_pid (ptid);
915
916 /* We assume we always know if a thread exits. If a whole process
917 exited but we still haven't been able to report it to GDB, we'll
918 hold on to the last lwp of the dead process. */
919 if (lwp != NULL)
920 return !lwp->dead;
0d62e5e8
DJ
921 else
922 return 0;
923}
924
6bf5e0ba 925/* Return 1 if this lwp has an interesting status pending. */
611cb4a5 926static int
d50171e4 927status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
0d62e5e8 928{
54a0b537 929 struct lwp_info *lwp = (struct lwp_info *) entry;
95954743 930 ptid_t ptid = * (ptid_t *) arg;
7984d532 931 struct thread_info *thread;
95954743
PA
932
933 /* Check if we're only interested in events from a specific process
934 or its lwps. */
935 if (!ptid_equal (minus_one_ptid, ptid)
936 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
937 return 0;
0d62e5e8 938
d50171e4
PA
939 thread = get_lwp_thread (lwp);
940
941 /* If we got a `vCont;t', but we haven't reported a stop yet, do
942 report any status pending the LWP may have. */
8336d594 943 if (thread->last_resume_kind == resume_stop
7984d532 944 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4 945 return 0;
0d62e5e8 946
d50171e4 947 return lwp->status_pending_p;
0d62e5e8
DJ
948}
949
95954743
PA
950static int
951same_lwp (struct inferior_list_entry *entry, void *data)
952{
953 ptid_t ptid = *(ptid_t *) data;
954 int lwp;
955
956 if (ptid_get_lwp (ptid) != 0)
957 lwp = ptid_get_lwp (ptid);
958 else
959 lwp = ptid_get_pid (ptid);
960
961 if (ptid_get_lwp (entry->id) == lwp)
962 return 1;
963
964 return 0;
965}
966
967struct lwp_info *
968find_lwp_pid (ptid_t ptid)
969{
970 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
971}
972
bd99dc85 973static struct lwp_info *
95954743 974linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
611cb4a5 975{
0d62e5e8 976 int ret;
95954743 977 int to_wait_for = -1;
bd99dc85 978 struct lwp_info *child = NULL;
0d62e5e8 979
bd99dc85 980 if (debug_threads)
95954743
PA
981 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
982
983 if (ptid_equal (ptid, minus_one_ptid))
984 to_wait_for = -1; /* any child */
985 else
986 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
0d62e5e8 987
bd99dc85 988 options |= __WALL;
0d62e5e8 989
bd99dc85 990retry:
0d62e5e8 991
bd99dc85
PA
992 ret = my_waitpid (to_wait_for, wstatp, options);
993 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
994 return NULL;
995 else if (ret == -1)
996 perror_with_name ("waitpid");
0d62e5e8
DJ
997
998 if (debug_threads
999 && (!WIFSTOPPED (*wstatp)
1000 || (WSTOPSIG (*wstatp) != 32
1001 && WSTOPSIG (*wstatp) != 33)))
1002 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1003
95954743 1004 child = find_lwp_pid (pid_to_ptid (ret));
0d62e5e8 1005
24a09b5f
DJ
1006 /* If we didn't find a process, one of two things presumably happened:
1007 - A process we started and then detached from has exited. Ignore it.
1008 - A process we are controlling has forked and the new child's stop
1009 was reported to us by the kernel. Save its PID. */
bd99dc85 1010 if (child == NULL && WIFSTOPPED (*wstatp))
24a09b5f
DJ
1011 {
1012 add_pid_to_list (&stopped_pids, ret);
1013 goto retry;
1014 }
bd99dc85 1015 else if (child == NULL)
24a09b5f
DJ
1016 goto retry;
1017
bd99dc85 1018 child->stopped = 1;
0d62e5e8 1019
bd99dc85 1020 child->last_status = *wstatp;
32ca6d61 1021
d61ddec4
UW
1022 /* Architecture-specific setup after inferior is running.
1023 This needs to happen after we have attached to the inferior
1024 and it is stopped for the first time, but before we access
1025 any inferior registers. */
1026 if (new_inferior)
1027 {
1028 the_low_target.arch_setup ();
52fa2412
UW
1029#ifdef HAVE_LINUX_REGSETS
1030 memset (disabled_regsets, 0, num_regsets);
1031#endif
d61ddec4
UW
1032 new_inferior = 0;
1033 }
1034
c3adc08c
PA
1035 /* Fetch the possibly triggered data watchpoint info and store it in
1036 CHILD.
1037
1038 On some archs, like x86, that use debug registers to set
1039 watchpoints, it's possible that the way to know which watched
1040 address trapped, is to check the register that is used to select
1041 which address to watch. Problem is, between setting the
1042 watchpoint and reading back which data address trapped, the user
1043 may change the set of watchpoints, and, as a consequence, GDB
1044 changes the debug registers in the inferior. To avoid reading
1045 back a stale stopped-data-address when that happens, we cache in
1046 LP the fact that a watchpoint trapped, and the corresponding data
1047 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1048 changes the debug registers meanwhile, we have the cached data we
1049 can rely on. */
1050
1051 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1052 {
1053 if (the_low_target.stopped_by_watchpoint == NULL)
1054 {
1055 child->stopped_by_watchpoint = 0;
1056 }
1057 else
1058 {
1059 struct thread_info *saved_inferior;
1060
1061 saved_inferior = current_inferior;
1062 current_inferior = get_lwp_thread (child);
1063
1064 child->stopped_by_watchpoint
1065 = the_low_target.stopped_by_watchpoint ();
1066
1067 if (child->stopped_by_watchpoint)
1068 {
1069 if (the_low_target.stopped_data_address != NULL)
1070 child->stopped_data_address
1071 = the_low_target.stopped_data_address ();
1072 else
1073 child->stopped_data_address = 0;
1074 }
1075
1076 current_inferior = saved_inferior;
1077 }
1078 }
1079
d50171e4
PA
1080 /* Store the STOP_PC, with adjustment applied. This depends on the
1081 architecture being defined already (so that CHILD has a valid
1082 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1083 not). */
1084 if (WIFSTOPPED (*wstatp))
1085 child->stop_pc = get_stop_pc (child);
1086
0d62e5e8 1087 if (debug_threads
47c0c975
DE
1088 && WIFSTOPPED (*wstatp)
1089 && the_low_target.get_pc != NULL)
0d62e5e8 1090 {
896c7fbb 1091 struct thread_info *saved_inferior = current_inferior;
bce522a2 1092 struct regcache *regcache;
47c0c975
DE
1093 CORE_ADDR pc;
1094
d50171e4 1095 current_inferior = get_lwp_thread (child);
bce522a2 1096 regcache = get_thread_regcache (current_inferior, 1);
442ea881 1097 pc = (*the_low_target.get_pc) (regcache);
47c0c975 1098 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
896c7fbb 1099 current_inferior = saved_inferior;
0d62e5e8 1100 }
bd99dc85
PA
1101
1102 return child;
0d62e5e8 1103}
611cb4a5 1104
219f2f23
PA
1105/* This function should only be called if the LWP got a SIGTRAP.
1106
1107 Handle any tracepoint steps or hits. Return true if a tracepoint
1108 event was handled, 0 otherwise. */
1109
1110static int
1111handle_tracepoints (struct lwp_info *lwp)
1112{
1113 struct thread_info *tinfo = get_lwp_thread (lwp);
1114 int tpoint_related_event = 0;
1115
7984d532
PA
1116 /* If this tracepoint hit causes a tracing stop, we'll immediately
1117 uninsert tracepoints. To do this, we temporarily pause all
1118 threads, unpatch away, and then unpause threads. We need to make
1119 sure the unpausing doesn't resume LWP too. */
1120 lwp->suspended++;
1121
219f2f23
PA
1122 /* And we need to be sure that any all-threads-stopping doesn't try
1123 to move threads out of the jump pads, as it could deadlock the
1124 inferior (LWP could be in the jump pad, maybe even holding the
1125 lock.) */
1126
1127 /* Do any necessary step collect actions. */
1128 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1129
1130 /* See if we just hit a tracepoint and do its main collect
1131 actions. */
1132 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1133
7984d532
PA
1134 lwp->suspended--;
1135
1136 gdb_assert (lwp->suspended == 0);
1137
219f2f23
PA
1138 if (tpoint_related_event)
1139 {
1140 if (debug_threads)
1141 fprintf (stderr, "got a tracepoint event\n");
1142 return 1;
1143 }
1144
1145 return 0;
1146}
1147
d50171e4
PA
1148/* Arrange for a breakpoint to be hit again later. We don't keep the
1149 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1150 will handle the current event, eventually we will resume this LWP,
1151 and this breakpoint will trap again. */
1152
1153static int
1154cancel_breakpoint (struct lwp_info *lwp)
1155{
1156 struct thread_info *saved_inferior;
d50171e4
PA
1157
1158 /* There's nothing to do if we don't support breakpoints. */
1159 if (!supports_breakpoints ())
1160 return 0;
1161
d50171e4
PA
1162 /* breakpoint_at reads from current inferior. */
1163 saved_inferior = current_inferior;
1164 current_inferior = get_lwp_thread (lwp);
1165
1166 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1167 {
1168 if (debug_threads)
1169 fprintf (stderr,
1170 "CB: Push back breakpoint for %s\n",
fc7238bb 1171 target_pid_to_str (ptid_of (lwp)));
d50171e4
PA
1172
1173 /* Back up the PC if necessary. */
1174 if (the_low_target.decr_pc_after_break)
1175 {
1176 struct regcache *regcache
fc7238bb 1177 = get_thread_regcache (current_inferior, 1);
d50171e4
PA
1178 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1179 }
1180
1181 current_inferior = saved_inferior;
1182 return 1;
1183 }
1184 else
1185 {
1186 if (debug_threads)
1187 fprintf (stderr,
1188 "CB: No breakpoint found at %s for [%s]\n",
1189 paddress (lwp->stop_pc),
fc7238bb 1190 target_pid_to_str (ptid_of (lwp)));
d50171e4
PA
1191 }
1192
1193 current_inferior = saved_inferior;
1194 return 0;
1195}
1196
1197/* When the event-loop is doing a step-over, this points at the thread
1198 being stepped. */
1199ptid_t step_over_bkpt;
1200
bd99dc85
PA
1201/* Wait for an event from child PID. If PID is -1, wait for any
1202 child. Store the stop status through the status pointer WSTAT.
1203 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1204 event was found and OPTIONS contains WNOHANG. Return the PID of
1205 the stopped child otherwise. */
1206
0d62e5e8 1207static int
95954743 1208linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
0d62e5e8 1209{
d50171e4
PA
1210 struct lwp_info *event_child, *requested_child;
1211
d50171e4
PA
1212 event_child = NULL;
1213 requested_child = NULL;
0d62e5e8 1214
95954743 1215 /* Check for a lwp with a pending status. */
bd99dc85 1216
95954743
PA
1217 if (ptid_equal (ptid, minus_one_ptid)
1218 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
0d62e5e8 1219 {
54a0b537 1220 event_child = (struct lwp_info *)
d50171e4 1221 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
0d62e5e8 1222 if (debug_threads && event_child)
bd99dc85 1223 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
0d62e5e8
DJ
1224 }
1225 else
1226 {
95954743 1227 requested_child = find_lwp_pid (ptid);
d50171e4
PA
1228
1229 if (requested_child->status_pending_p)
bd99dc85 1230 event_child = requested_child;
0d62e5e8 1231 }
611cb4a5 1232
0d62e5e8
DJ
1233 if (event_child != NULL)
1234 {
bd99dc85
PA
1235 if (debug_threads)
1236 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1237 lwpid_of (event_child), event_child->status_pending);
1238 *wstat = event_child->status_pending;
1239 event_child->status_pending_p = 0;
1240 event_child->status_pending = 0;
1241 current_inferior = get_lwp_thread (event_child);
1242 return lwpid_of (event_child);
0d62e5e8
DJ
1243 }
1244
1245 /* We only enter this loop if no process has a pending wait status. Thus
1246 any action taken in response to a wait status inside this loop is
1247 responding as soon as we detect the status, not after any pending
1248 events. */
1249 while (1)
1250 {
6bf5e0ba 1251 event_child = linux_wait_for_lwp (ptid, wstat, options);
0d62e5e8 1252
bd99dc85 1253 if ((options & WNOHANG) && event_child == NULL)
d50171e4
PA
1254 {
1255 if (debug_threads)
1256 fprintf (stderr, "WNOHANG set, no event found\n");
1257 return 0;
1258 }
0d62e5e8
DJ
1259
1260 if (event_child == NULL)
1261 error ("event from unknown child");
611cb4a5 1262
bd99dc85 1263 current_inferior = get_lwp_thread (event_child);
0d62e5e8 1264
89be2091 1265 /* Check for thread exit. */
bd99dc85 1266 if (! WIFSTOPPED (*wstat))
0d62e5e8 1267 {
89be2091 1268 if (debug_threads)
95954743 1269 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
89be2091
DJ
1270
1271 /* If the last thread is exiting, just return. */
95954743 1272 if (last_thread_of_process_p (current_inferior))
bd99dc85
PA
1273 {
1274 if (debug_threads)
95954743
PA
1275 fprintf (stderr, "LWP %ld is last lwp of process\n",
1276 lwpid_of (event_child));
bd99dc85
PA
1277 return lwpid_of (event_child);
1278 }
89be2091 1279
bd99dc85
PA
1280 if (!non_stop)
1281 {
1282 current_inferior = (struct thread_info *) all_threads.head;
1283 if (debug_threads)
1284 fprintf (stderr, "Current inferior is now %ld\n",
1285 lwpid_of (get_thread_lwp (current_inferior)));
1286 }
1287 else
1288 {
1289 current_inferior = NULL;
1290 if (debug_threads)
1291 fprintf (stderr, "Current inferior is now <NULL>\n");
1292 }
89be2091
DJ
1293
1294 /* If we were waiting for this particular child to do something...
1295 well, it did something. */
bd99dc85 1296 if (requested_child != NULL)
d50171e4
PA
1297 {
1298 int lwpid = lwpid_of (event_child);
1299
1300 /* Cancel the step-over operation --- the thread that
1301 started it is gone. */
1302 if (finish_step_over (event_child))
7984d532 1303 unstop_all_lwps (1, event_child);
d50171e4
PA
1304 delete_lwp (event_child);
1305 return lwpid;
1306 }
1307
1308 delete_lwp (event_child);
89be2091
DJ
1309
1310 /* Wait for a more interesting event. */
1311 continue;
1312 }
1313
a6dbe5df
PA
1314 if (event_child->must_set_ptrace_flags)
1315 {
1e7fc18c 1316 linux_enable_event_reporting (lwpid_of (event_child));
a6dbe5df
PA
1317 event_child->must_set_ptrace_flags = 0;
1318 }
1319
bd99dc85
PA
1320 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1321 && *wstat >> 16 != 0)
24a09b5f 1322 {
bd99dc85 1323 handle_extended_wait (event_child, *wstat);
24a09b5f
DJ
1324 continue;
1325 }
1326
d50171e4
PA
1327 if (WIFSTOPPED (*wstat)
1328 && WSTOPSIG (*wstat) == SIGSTOP
1329 && event_child->stop_expected)
1330 {
1331 int should_stop;
1332
1333 if (debug_threads)
1334 fprintf (stderr, "Expected stop.\n");
1335 event_child->stop_expected = 0;
1336
8336d594 1337 should_stop = (current_inferior->last_resume_kind == resume_stop
d50171e4
PA
1338 || stopping_threads);
1339
1340 if (!should_stop)
1341 {
1342 linux_resume_one_lwp (event_child,
1343 event_child->stepping, 0, NULL);
1344 continue;
1345 }
1346 }
1347
bd99dc85 1348 return lwpid_of (event_child);
611cb4a5 1349 }
0d62e5e8 1350
611cb4a5
DJ
1351 /* NOTREACHED */
1352 return 0;
1353}
1354
95954743
PA
1355static int
1356linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1357{
1358 ptid_t wait_ptid;
1359
1360 if (ptid_is_pid (ptid))
1361 {
1362 /* A request to wait for a specific tgid. This is not possible
1363 with waitpid, so instead, we wait for any child, and leave
1364 children we're not interested in right now with a pending
1365 status to report later. */
1366 wait_ptid = minus_one_ptid;
1367 }
1368 else
1369 wait_ptid = ptid;
1370
1371 while (1)
1372 {
1373 int event_pid;
1374
1375 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1376
1377 if (event_pid > 0
1378 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1379 {
1380 struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1381
1382 if (! WIFSTOPPED (*wstat))
1383 mark_lwp_dead (event_child, *wstat);
1384 else
1385 {
1386 event_child->status_pending_p = 1;
1387 event_child->status_pending = *wstat;
1388 }
1389 }
1390 else
1391 return event_pid;
1392 }
1393}
1394
6bf5e0ba
PA
1395
1396/* Count the LWP's that have had events. */
1397
1398static int
1399count_events_callback (struct inferior_list_entry *entry, void *data)
1400{
1401 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1402 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1403 int *count = data;
1404
1405 gdb_assert (count != NULL);
1406
1407 /* Count only resumed LWPs that have a SIGTRAP event pending that
1408 should be reported to GDB. */
8336d594
PA
1409 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1410 && thread->last_resume_kind != resume_stop
6bf5e0ba
PA
1411 && lp->status_pending_p
1412 && WIFSTOPPED (lp->status_pending)
1413 && WSTOPSIG (lp->status_pending) == SIGTRAP
1414 && !breakpoint_inserted_here (lp->stop_pc))
1415 (*count)++;
1416
1417 return 0;
1418}
1419
1420/* Select the LWP (if any) that is currently being single-stepped. */
1421
1422static int
1423select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1424{
1425 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1426 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba 1427
8336d594
PA
1428 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1429 && thread->last_resume_kind == resume_step
6bf5e0ba
PA
1430 && lp->status_pending_p)
1431 return 1;
1432 else
1433 return 0;
1434}
1435
1436/* Select the Nth LWP that has had a SIGTRAP event that should be
1437 reported to GDB. */
1438
1439static int
1440select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1441{
1442 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1443 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1444 int *selector = data;
1445
1446 gdb_assert (selector != NULL);
1447
1448 /* Select only resumed LWPs that have a SIGTRAP event pending. */
8336d594
PA
1449 if (thread->last_resume_kind != resume_stop
1450 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
6bf5e0ba
PA
1451 && lp->status_pending_p
1452 && WIFSTOPPED (lp->status_pending)
1453 && WSTOPSIG (lp->status_pending) == SIGTRAP
1454 && !breakpoint_inserted_here (lp->stop_pc))
1455 if ((*selector)-- == 0)
1456 return 1;
1457
1458 return 0;
1459}
1460
1461static int
1462cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1463{
1464 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1465 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1466 struct lwp_info *event_lp = data;
1467
1468 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1469 if (lp == event_lp)
1470 return 0;
1471
1472 /* If a LWP other than the LWP that we're reporting an event for has
1473 hit a GDB breakpoint (as opposed to some random trap signal),
1474 then just arrange for it to hit it again later. We don't keep
1475 the SIGTRAP status and don't forward the SIGTRAP signal to the
1476 LWP. We will handle the current event, eventually we will resume
1477 all LWPs, and this one will get its breakpoint trap again.
1478
1479 If we do not do this, then we run the risk that the user will
1480 delete or disable the breakpoint, but the LWP will have already
1481 tripped on it. */
1482
8336d594
PA
1483 if (thread->last_resume_kind != resume_stop
1484 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
6bf5e0ba
PA
1485 && lp->status_pending_p
1486 && WIFSTOPPED (lp->status_pending)
1487 && WSTOPSIG (lp->status_pending) == SIGTRAP
bdabb078
PA
1488 && !lp->stepping
1489 && !lp->stopped_by_watchpoint
6bf5e0ba
PA
1490 && cancel_breakpoint (lp))
1491 /* Throw away the SIGTRAP. */
1492 lp->status_pending_p = 0;
1493
1494 return 0;
1495}
1496
7984d532
PA
1497static void
1498linux_cancel_breakpoints (void)
1499{
1500 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
1501}
1502
6bf5e0ba
PA
1503/* Select one LWP out of those that have events pending. */
1504
1505static void
1506select_event_lwp (struct lwp_info **orig_lp)
1507{
1508 int num_events = 0;
1509 int random_selector;
1510 struct lwp_info *event_lp;
1511
1512 /* Give preference to any LWP that is being single-stepped. */
1513 event_lp
1514 = (struct lwp_info *) find_inferior (&all_lwps,
1515 select_singlestep_lwp_callback, NULL);
1516 if (event_lp != NULL)
1517 {
1518 if (debug_threads)
1519 fprintf (stderr,
1520 "SEL: Select single-step %s\n",
1521 target_pid_to_str (ptid_of (event_lp)));
1522 }
1523 else
1524 {
1525 /* No single-stepping LWP. Select one at random, out of those
1526 which have had SIGTRAP events. */
1527
1528 /* First see how many SIGTRAP events we have. */
1529 find_inferior (&all_lwps, count_events_callback, &num_events);
1530
1531 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1532 random_selector = (int)
1533 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1534
1535 if (debug_threads && num_events > 1)
1536 fprintf (stderr,
1537 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1538 num_events, random_selector);
1539
1540 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1541 select_event_lwp_callback,
1542 &random_selector);
1543 }
1544
1545 if (event_lp != NULL)
1546 {
1547 /* Switch the event LWP. */
1548 *orig_lp = event_lp;
1549 }
1550}
1551
d50171e4
PA
1552/* Set this inferior LWP's state as "want-stopped". We won't resume
1553 this LWP until the client gives us another action for it. */
1554
1555static void
1556gdb_wants_lwp_stopped (struct inferior_list_entry *entry)
1557{
1558 struct lwp_info *lwp = (struct lwp_info *) entry;
1559 struct thread_info *thread = get_lwp_thread (lwp);
1560
1561 /* Most threads are stopped implicitly (all-stop); tag that with
1562 signal 0. The thread being explicitly reported stopped to the
1563 client, gets it's status fixed up afterwards. */
1564 thread->last_status.kind = TARGET_WAITKIND_STOPPED;
1565 thread->last_status.value.sig = TARGET_SIGNAL_0;
1566
8336d594 1567 thread->last_resume_kind = resume_stop;
d50171e4
PA
1568}
1569
7984d532
PA
1570/* Decrement the suspend count of an LWP. */
1571
1572static int
1573unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
1574{
1575 struct lwp_info *lwp = (struct lwp_info *) entry;
1576
1577 /* Ignore EXCEPT. */
1578 if (lwp == except)
1579 return 0;
1580
1581 lwp->suspended--;
1582
1583 gdb_assert (lwp->suspended >= 0);
1584 return 0;
1585}
1586
1587/* Decrement the suspend count of all LWPs, except EXCEPT, if non
1588 NULL. */
1589
1590static void
1591unsuspend_all_lwps (struct lwp_info *except)
1592{
1593 find_inferior (&all_lwps, unsuspend_one_lwp, except);
1594}
1595
d50171e4
PA
1596/* Set all LWP's states as "want-stopped". */
1597
1598static void
1599gdb_wants_all_stopped (void)
1600{
1601 for_each_inferior (&all_lwps, gdb_wants_lwp_stopped);
1602}
1603
0d62e5e8 1604/* Wait for process, returns status. */
da6d8c04 1605
95954743
PA
1606static ptid_t
1607linux_wait_1 (ptid_t ptid,
1608 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 1609{
e5f1222d 1610 int w;
fc7238bb 1611 struct lwp_info *event_child;
bd99dc85 1612 int options;
bd99dc85 1613 int pid;
6bf5e0ba
PA
1614 int step_over_finished;
1615 int bp_explains_trap;
1616 int maybe_internal_trap;
1617 int report_to_gdb;
219f2f23 1618 int trace_event;
bd99dc85
PA
1619
1620 /* Translate generic target options into linux options. */
1621 options = __WALL;
1622 if (target_options & TARGET_WNOHANG)
1623 options |= WNOHANG;
0d62e5e8
DJ
1624
1625retry:
bd99dc85
PA
1626 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1627
0d62e5e8
DJ
1628 /* If we were only supposed to resume one thread, only wait for
1629 that thread - if it's still alive. If it died, however - which
1630 can happen if we're coming from the thread death case below -
1631 then we need to make sure we restart the other threads. We could
1632 pick a thread at random or restart all; restarting all is less
1633 arbitrary. */
95954743
PA
1634 if (!non_stop
1635 && !ptid_equal (cont_thread, null_ptid)
1636 && !ptid_equal (cont_thread, minus_one_ptid))
0d62e5e8 1637 {
fc7238bb
PA
1638 struct thread_info *thread;
1639
bd99dc85
PA
1640 thread = (struct thread_info *) find_inferior_id (&all_threads,
1641 cont_thread);
0d62e5e8
DJ
1642
1643 /* No stepping, no signal - unless one is pending already, of course. */
bd99dc85 1644 if (thread == NULL)
64386c31
DJ
1645 {
1646 struct thread_resume resume_info;
95954743 1647 resume_info.thread = minus_one_ptid;
bd99dc85
PA
1648 resume_info.kind = resume_continue;
1649 resume_info.sig = 0;
2bd7c093 1650 linux_resume (&resume_info, 1);
64386c31 1651 }
bd99dc85 1652 else
95954743 1653 ptid = cont_thread;
0d62e5e8 1654 }
da6d8c04 1655
6bf5e0ba
PA
1656 if (ptid_equal (step_over_bkpt, null_ptid))
1657 pid = linux_wait_for_event (ptid, &w, options);
1658 else
1659 {
1660 if (debug_threads)
1661 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
1662 target_pid_to_str (step_over_bkpt));
1663 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
1664 }
1665
bd99dc85 1666 if (pid == 0) /* only if TARGET_WNOHANG */
95954743 1667 return null_ptid;
bd99dc85 1668
6bf5e0ba 1669 event_child = get_thread_lwp (current_inferior);
da6d8c04 1670
0d62e5e8
DJ
1671 /* If we are waiting for a particular child, and it exited,
1672 linux_wait_for_event will return its exit status. Similarly if
1673 the last child exited. If this is not the last child, however,
1674 do not report it as exited until there is a 'thread exited' response
1675 available in the remote protocol. Instead, just wait for another event.
1676 This should be safe, because if the thread crashed we will already
1677 have reported the termination signal to GDB; that should stop any
1678 in-progress stepping operations, etc.
1679
1680 Report the exit status of the last thread to exit. This matches
1681 LinuxThreads' behavior. */
1682
95954743 1683 if (last_thread_of_process_p (current_inferior))
da6d8c04 1684 {
bd99dc85 1685 if (WIFEXITED (w) || WIFSIGNALED (w))
0d62e5e8 1686 {
bd99dc85
PA
1687 if (WIFEXITED (w))
1688 {
1689 ourstatus->kind = TARGET_WAITKIND_EXITED;
1690 ourstatus->value.integer = WEXITSTATUS (w);
1691
1692 if (debug_threads)
1693 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
1694 }
1695 else
1696 {
1697 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1698 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
1699
1700 if (debug_threads)
1701 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
1702
1703 }
5b1c542e 1704
95954743 1705 return pid_to_ptid (pid);
0d62e5e8 1706 }
da6d8c04 1707 }
0d62e5e8 1708 else
da6d8c04 1709 {
0d62e5e8
DJ
1710 if (!WIFSTOPPED (w))
1711 goto retry;
da6d8c04
DJ
1712 }
1713
6bf5e0ba
PA
1714 /* If this event was not handled before, and is not a SIGTRAP, we
1715 report it. SIGILL and SIGSEGV are also treated as traps in case
1716 a breakpoint is inserted at the current PC. If this target does
1717 not support internal breakpoints at all, we also report the
1718 SIGTRAP without further processing; it's of no concern to us. */
1719 maybe_internal_trap
1720 = (supports_breakpoints ()
1721 && (WSTOPSIG (w) == SIGTRAP
1722 || ((WSTOPSIG (w) == SIGILL
1723 || WSTOPSIG (w) == SIGSEGV)
1724 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
1725
1726 if (maybe_internal_trap)
1727 {
1728 /* Handle anything that requires bookkeeping before deciding to
1729 report the event or continue waiting. */
1730
1731 /* First check if we can explain the SIGTRAP with an internal
1732 breakpoint, or if we should possibly report the event to GDB.
1733 Do this before anything that may remove or insert a
1734 breakpoint. */
1735 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
1736
1737 /* We have a SIGTRAP, possibly a step-over dance has just
1738 finished. If so, tweak the state machine accordingly,
1739 reinsert breakpoints and delete any reinsert (software
1740 single-step) breakpoints. */
1741 step_over_finished = finish_step_over (event_child);
1742
1743 /* Now invoke the callbacks of any internal breakpoints there. */
1744 check_breakpoints (event_child->stop_pc);
1745
219f2f23
PA
1746 /* Handle tracepoint data collecting. This may overflow the
1747 trace buffer, and cause a tracing stop, removing
1748 breakpoints. */
1749 trace_event = handle_tracepoints (event_child);
1750
6bf5e0ba
PA
1751 if (bp_explains_trap)
1752 {
1753 /* If we stepped or ran into an internal breakpoint, we've
1754 already handled it. So next time we resume (from this
1755 PC), we should step over it. */
1756 if (debug_threads)
1757 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1758
8b07ae33
PA
1759 if (breakpoint_here (event_child->stop_pc))
1760 event_child->need_step_over = 1;
6bf5e0ba
PA
1761 }
1762 }
1763 else
1764 {
1765 /* We have some other signal, possibly a step-over dance was in
1766 progress, and it should be cancelled too. */
1767 step_over_finished = finish_step_over (event_child);
219f2f23
PA
1768
1769 trace_event = 0;
6bf5e0ba
PA
1770 }
1771
e471f25b
PA
1772 /* Check whether GDB would be interested in this event. */
1773
1774 /* If GDB is not interested in this signal, don't stop other
1775 threads, and don't report it to GDB. Just resume the inferior
1776 right away. We do this for threading-related signals as well as
1777 any that GDB specifically requested we ignore. But never ignore
1778 SIGSTOP if we sent it ourselves, and do not ignore signals when
1779 stepping - they may require special handling to skip the signal
1780 handler. */
1781 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
1782 thread library? */
1783 if (WIFSTOPPED (w)
1784 && current_inferior->last_resume_kind != resume_step
1785 && (
1786#if defined (USE_THREAD_DB) && defined (__SIGRTMIN)
1787 (current_process ()->private->thread_db != NULL
1788 && (WSTOPSIG (w) == __SIGRTMIN
1789 || WSTOPSIG (w) == __SIGRTMIN + 1))
1790 ||
1791#endif
1792 (pass_signals[target_signal_from_host (WSTOPSIG (w))]
1793 && !(WSTOPSIG (w) == SIGSTOP
1794 && current_inferior->last_resume_kind == resume_stop))))
1795 {
1796 siginfo_t info, *info_p;
1797
1798 if (debug_threads)
1799 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
1800 WSTOPSIG (w), lwpid_of (event_child));
1801
1802 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
1803 info_p = &info;
1804 else
1805 info_p = NULL;
1806 linux_resume_one_lwp (event_child, event_child->stepping,
1807 WSTOPSIG (w), info_p);
1808 goto retry;
1809 }
1810
1811 /* If GDB wanted this thread to single step, we always want to
1812 report the SIGTRAP, and let GDB handle it. Watchpoints should
1813 always be reported. So should signals we can't explain. A
1814 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
1815 not support Z0 breakpoints. If we do, we're be able to handle
1816 GDB breakpoints on top of internal breakpoints, by handling the
1817 internal breakpoint and still reporting the event to GDB. If we
1818 don't, we're out of luck, GDB won't see the breakpoint hit. */
6bf5e0ba 1819 report_to_gdb = (!maybe_internal_trap
8336d594 1820 || current_inferior->last_resume_kind == resume_step
6bf5e0ba 1821 || event_child->stopped_by_watchpoint
219f2f23 1822 || (!step_over_finished && !bp_explains_trap && !trace_event)
8b07ae33 1823 || gdb_breakpoint_here (event_child->stop_pc));
6bf5e0ba
PA
1824
1825 /* We found no reason GDB would want us to stop. We either hit one
1826 of our own breakpoints, or finished an internal step GDB
1827 shouldn't know about. */
1828 if (!report_to_gdb)
1829 {
1830 if (debug_threads)
1831 {
1832 if (bp_explains_trap)
1833 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1834 if (step_over_finished)
1835 fprintf (stderr, "Step-over finished.\n");
219f2f23
PA
1836 if (trace_event)
1837 fprintf (stderr, "Tracepoint event.\n");
6bf5e0ba
PA
1838 }
1839
1840 /* We're not reporting this breakpoint to GDB, so apply the
1841 decr_pc_after_break adjustment to the inferior's regcache
1842 ourselves. */
1843
1844 if (the_low_target.set_pc != NULL)
1845 {
1846 struct regcache *regcache
1847 = get_thread_regcache (get_lwp_thread (event_child), 1);
1848 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
1849 }
1850
7984d532
PA
1851 /* We may have finished stepping over a breakpoint. If so,
1852 we've stopped and suspended all LWPs momentarily except the
1853 stepping one. This is where we resume them all again. We're
1854 going to keep waiting, so use proceed, which handles stepping
1855 over the next breakpoint. */
6bf5e0ba
PA
1856 if (debug_threads)
1857 fprintf (stderr, "proceeding all threads.\n");
7984d532
PA
1858
1859 if (step_over_finished)
1860 unsuspend_all_lwps (event_child);
1861
6bf5e0ba
PA
1862 proceed_all_lwps ();
1863 goto retry;
1864 }
1865
1866 if (debug_threads)
1867 {
8336d594 1868 if (current_inferior->last_resume_kind == resume_step)
6bf5e0ba
PA
1869 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
1870 if (event_child->stopped_by_watchpoint)
1871 fprintf (stderr, "Stopped by watchpoint.\n");
8b07ae33
PA
1872 if (gdb_breakpoint_here (event_child->stop_pc))
1873 fprintf (stderr, "Stopped by GDB breakpoint.\n");
6bf5e0ba
PA
1874 if (debug_threads)
1875 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
1876 }
1877
1878 /* Alright, we're going to report a stop. */
1879
1880 if (!non_stop)
1881 {
1882 /* In all-stop, stop all threads. */
7984d532 1883 stop_all_lwps (0, NULL);
6bf5e0ba
PA
1884
1885 /* If we're not waiting for a specific LWP, choose an event LWP
1886 from among those that have had events. Giving equal priority
1887 to all LWPs that have had events helps prevent
1888 starvation. */
1889 if (ptid_equal (ptid, minus_one_ptid))
1890 {
1891 event_child->status_pending_p = 1;
1892 event_child->status_pending = w;
1893
1894 select_event_lwp (&event_child);
1895
1896 event_child->status_pending_p = 0;
1897 w = event_child->status_pending;
1898 }
1899
1900 /* Now that we've selected our final event LWP, cancel any
1901 breakpoints in other LWPs that have hit a GDB breakpoint.
1902 See the comment in cancel_breakpoints_callback to find out
1903 why. */
1904 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
1905 }
1906 else
1907 {
1908 /* If we just finished a step-over, then all threads had been
1909 momentarily paused. In all-stop, that's fine, we want
1910 threads stopped by now anyway. In non-stop, we need to
1911 re-resume threads that GDB wanted to be running. */
1912 if (step_over_finished)
7984d532 1913 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
1914 }
1915
5b1c542e 1916 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 1917
d50171e4
PA
1918 /* Do this before the gdb_wants_all_stopped calls below, since they
1919 always set last_resume_kind to resume_stop. */
8336d594
PA
1920 if (current_inferior->last_resume_kind == resume_stop
1921 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
1922 {
1923 /* A thread that has been requested to stop by GDB with vCont;t,
1924 and it stopped cleanly, so report as SIG0. The use of
1925 SIGSTOP is an implementation detail. */
1926 ourstatus->value.sig = TARGET_SIGNAL_0;
1927 }
8336d594
PA
1928 else if (current_inferior->last_resume_kind == resume_stop
1929 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
1930 {
1931 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 1932 but, it stopped for other reasons. */
bd99dc85
PA
1933 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1934 }
1935 else
1936 {
1937 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1938 }
1939
d50171e4
PA
1940 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
1941
1942 if (!non_stop)
1943 {
d50171e4
PA
1944 /* From GDB's perspective, all-stop mode always stops all
1945 threads implicitly. Tag all threads as "want-stopped". */
1946 gdb_wants_all_stopped ();
1947 }
1948 else
1949 {
1950 /* We're reporting this LWP as stopped. Update it's
1951 "want-stopped" state to what the client wants, until it gets
1952 a new resume action. */
6bf5e0ba 1953 gdb_wants_lwp_stopped (&event_child->head);
d50171e4
PA
1954 }
1955
bd99dc85 1956 if (debug_threads)
95954743 1957 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
6bf5e0ba 1958 target_pid_to_str (ptid_of (event_child)),
bd99dc85
PA
1959 ourstatus->kind,
1960 ourstatus->value.sig);
1961
7984d532
PA
1962 current_inferior->last_status = *ourstatus;
1963
6bf5e0ba 1964 return ptid_of (event_child);
bd99dc85
PA
1965}
1966
1967/* Get rid of any pending event in the pipe. */
1968static void
1969async_file_flush (void)
1970{
1971 int ret;
1972 char buf;
1973
1974 do
1975 ret = read (linux_event_pipe[0], &buf, 1);
1976 while (ret >= 0 || (ret == -1 && errno == EINTR));
1977}
1978
1979/* Put something in the pipe, so the event loop wakes up. */
1980static void
1981async_file_mark (void)
1982{
1983 int ret;
1984
1985 async_file_flush ();
1986
1987 do
1988 ret = write (linux_event_pipe[1], "+", 1);
1989 while (ret == 0 || (ret == -1 && errno == EINTR));
1990
1991 /* Ignore EAGAIN. If the pipe is full, the event loop will already
1992 be awakened anyway. */
1993}
1994
95954743
PA
1995static ptid_t
1996linux_wait (ptid_t ptid,
1997 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 1998{
95954743 1999 ptid_t event_ptid;
bd99dc85
PA
2000
2001 if (debug_threads)
95954743 2002 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
bd99dc85
PA
2003
2004 /* Flush the async file first. */
2005 if (target_is_async_p ())
2006 async_file_flush ();
2007
95954743 2008 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
bd99dc85
PA
2009
2010 /* If at least one stop was reported, there may be more. A single
2011 SIGCHLD can signal more than one child stop. */
2012 if (target_is_async_p ()
2013 && (target_options & TARGET_WNOHANG) != 0
95954743 2014 && !ptid_equal (event_ptid, null_ptid))
bd99dc85
PA
2015 async_file_mark ();
2016
2017 return event_ptid;
da6d8c04
DJ
2018}
2019
c5f62d5f 2020/* Send a signal to an LWP. */
fd500816
DJ
2021
2022static int
a1928bad 2023kill_lwp (unsigned long lwpid, int signo)
fd500816 2024{
c5f62d5f
DE
2025 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2026 fails, then we are not using nptl threads and we should be using kill. */
fd500816 2027
c5f62d5f
DE
2028#ifdef __NR_tkill
2029 {
2030 static int tkill_failed;
fd500816 2031
c5f62d5f
DE
2032 if (!tkill_failed)
2033 {
2034 int ret;
2035
2036 errno = 0;
2037 ret = syscall (__NR_tkill, lwpid, signo);
2038 if (errno != ENOSYS)
2039 return ret;
2040 tkill_failed = 1;
2041 }
2042 }
fd500816
DJ
2043#endif
2044
2045 return kill (lwpid, signo);
2046}
2047
0d62e5e8 2048static void
02fc4de7 2049send_sigstop (struct lwp_info *lwp)
0d62e5e8 2050{
bd99dc85 2051 int pid;
0d62e5e8 2052
bd99dc85
PA
2053 pid = lwpid_of (lwp);
2054
0d62e5e8
DJ
2055 /* If we already have a pending stop signal for this process, don't
2056 send another. */
54a0b537 2057 if (lwp->stop_expected)
0d62e5e8 2058 {
ae13219e 2059 if (debug_threads)
bd99dc85 2060 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
ae13219e 2061
0d62e5e8
DJ
2062 return;
2063 }
2064
2065 if (debug_threads)
bd99dc85 2066 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
0d62e5e8 2067
d50171e4 2068 lwp->stop_expected = 1;
bd99dc85 2069 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
2070}
2071
7984d532
PA
2072static int
2073send_sigstop_callback (struct inferior_list_entry *entry, void *except)
02fc4de7
PA
2074{
2075 struct lwp_info *lwp = (struct lwp_info *) entry;
2076
7984d532
PA
2077 /* Ignore EXCEPT. */
2078 if (lwp == except)
2079 return 0;
2080
02fc4de7 2081 if (lwp->stopped)
7984d532 2082 return 0;
02fc4de7
PA
2083
2084 send_sigstop (lwp);
7984d532
PA
2085 return 0;
2086}
2087
2088/* Increment the suspend count of an LWP, and stop it, if not stopped
2089 yet. */
2090static int
2091suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2092 void *except)
2093{
2094 struct lwp_info *lwp = (struct lwp_info *) entry;
2095
2096 /* Ignore EXCEPT. */
2097 if (lwp == except)
2098 return 0;
2099
2100 lwp->suspended++;
2101
2102 return send_sigstop_callback (entry, except);
02fc4de7
PA
2103}
2104
95954743
PA
2105static void
2106mark_lwp_dead (struct lwp_info *lwp, int wstat)
2107{
2108 /* It's dead, really. */
2109 lwp->dead = 1;
2110
2111 /* Store the exit status for later. */
2112 lwp->status_pending_p = 1;
2113 lwp->status_pending = wstat;
2114
95954743
PA
2115 /* Prevent trying to stop it. */
2116 lwp->stopped = 1;
2117
2118 /* No further stops are expected from a dead lwp. */
2119 lwp->stop_expected = 0;
2120}
2121
0d62e5e8
DJ
2122static void
2123wait_for_sigstop (struct inferior_list_entry *entry)
2124{
54a0b537 2125 struct lwp_info *lwp = (struct lwp_info *) entry;
bd99dc85 2126 struct thread_info *saved_inferior;
a1928bad 2127 int wstat;
95954743
PA
2128 ptid_t saved_tid;
2129 ptid_t ptid;
d50171e4 2130 int pid;
0d62e5e8 2131
54a0b537 2132 if (lwp->stopped)
d50171e4
PA
2133 {
2134 if (debug_threads)
2135 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2136 lwpid_of (lwp));
2137 return;
2138 }
0d62e5e8
DJ
2139
2140 saved_inferior = current_inferior;
bd99dc85
PA
2141 if (saved_inferior != NULL)
2142 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2143 else
95954743 2144 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 2145
95954743 2146 ptid = lwp->head.id;
bd99dc85 2147
d50171e4
PA
2148 if (debug_threads)
2149 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2150
2151 pid = linux_wait_for_event (ptid, &wstat, __WALL);
0d62e5e8
DJ
2152
2153 /* If we stopped with a non-SIGSTOP signal, save it for later
2154 and record the pending SIGSTOP. If the process exited, just
2155 return. */
d50171e4 2156 if (WIFSTOPPED (wstat))
0d62e5e8
DJ
2157 {
2158 if (debug_threads)
d50171e4
PA
2159 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2160 lwpid_of (lwp), WSTOPSIG (wstat));
c35fafde 2161
d50171e4 2162 if (WSTOPSIG (wstat) != SIGSTOP)
c35fafde
PA
2163 {
2164 if (debug_threads)
d50171e4
PA
2165 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2166 lwpid_of (lwp), wstat);
2167
c35fafde
PA
2168 lwp->status_pending_p = 1;
2169 lwp->status_pending = wstat;
2170 }
0d62e5e8 2171 }
d50171e4 2172 else
95954743
PA
2173 {
2174 if (debug_threads)
d50171e4 2175 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
95954743 2176
d50171e4
PA
2177 lwp = find_lwp_pid (pid_to_ptid (pid));
2178 if (lwp)
2179 {
2180 /* Leave this status pending for the next time we're able to
2181 report it. In the mean time, we'll report this lwp as
2182 dead to GDB, so GDB doesn't try to read registers and
2183 memory from it. This can only happen if this was the
2184 last thread of the process; otherwise, PID is removed
2185 from the thread tables before linux_wait_for_event
2186 returns. */
2187 mark_lwp_dead (lwp, wstat);
2188 }
95954743 2189 }
0d62e5e8 2190
bd99dc85 2191 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
0d62e5e8
DJ
2192 current_inferior = saved_inferior;
2193 else
2194 {
2195 if (debug_threads)
2196 fprintf (stderr, "Previously current thread died.\n");
2197
bd99dc85
PA
2198 if (non_stop)
2199 {
2200 /* We can't change the current inferior behind GDB's back,
2201 otherwise, a subsequent command may apply to the wrong
2202 process. */
2203 current_inferior = NULL;
2204 }
2205 else
2206 {
2207 /* Set a valid thread as current. */
2208 set_desired_inferior (0);
2209 }
0d62e5e8
DJ
2210 }
2211}
2212
7984d532
PA
2213/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
2214 If SUSPEND, then also increase the suspend count of every LWP,
2215 except EXCEPT. */
2216
0d62e5e8 2217static void
7984d532 2218stop_all_lwps (int suspend, struct lwp_info *except)
0d62e5e8
DJ
2219{
2220 stopping_threads = 1;
7984d532
PA
2221
2222 if (suspend)
2223 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
2224 else
2225 find_inferior (&all_lwps, send_sigstop_callback, except);
54a0b537 2226 for_each_inferior (&all_lwps, wait_for_sigstop);
0d62e5e8
DJ
2227 stopping_threads = 0;
2228}
2229
da6d8c04
DJ
2230/* Resume execution of the inferior process.
2231 If STEP is nonzero, single-step it.
2232 If SIGNAL is nonzero, give it that signal. */
2233
ce3a066d 2234static void
2acc282a 2235linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 2236 int step, int signal, siginfo_t *info)
da6d8c04 2237{
0d62e5e8
DJ
2238 struct thread_info *saved_inferior;
2239
54a0b537 2240 if (lwp->stopped == 0)
0d62e5e8
DJ
2241 return;
2242
219f2f23
PA
2243 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2244 user used the "jump" command, or "set $pc = foo"). */
2245 if (lwp->stop_pc != get_pc (lwp))
2246 {
2247 /* Collecting 'while-stepping' actions doesn't make sense
2248 anymore. */
2249 release_while_stepping_state_list (get_lwp_thread (lwp));
2250 }
2251
0d62e5e8
DJ
2252 /* If we have pending signals or status, and a new signal, enqueue the
2253 signal. Also enqueue the signal if we are waiting to reinsert a
2254 breakpoint; it will be picked up again below. */
2255 if (signal != 0
54a0b537
PA
2256 && (lwp->status_pending_p || lwp->pending_signals != NULL
2257 || lwp->bp_reinsert != 0))
0d62e5e8
DJ
2258 {
2259 struct pending_signals *p_sig;
bca929d3 2260 p_sig = xmalloc (sizeof (*p_sig));
54a0b537 2261 p_sig->prev = lwp->pending_signals;
0d62e5e8 2262 p_sig->signal = signal;
32ca6d61
DJ
2263 if (info == NULL)
2264 memset (&p_sig->info, 0, sizeof (siginfo_t));
2265 else
2266 memcpy (&p_sig->info, info, sizeof (siginfo_t));
54a0b537 2267 lwp->pending_signals = p_sig;
0d62e5e8
DJ
2268 }
2269
d50171e4
PA
2270 if (lwp->status_pending_p)
2271 {
2272 if (debug_threads)
2273 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2274 " has pending status\n",
2275 lwpid_of (lwp), step ? "step" : "continue", signal,
2276 lwp->stop_expected ? "expected" : "not expected");
2277 return;
2278 }
0d62e5e8
DJ
2279
2280 saved_inferior = current_inferior;
54a0b537 2281 current_inferior = get_lwp_thread (lwp);
0d62e5e8
DJ
2282
2283 if (debug_threads)
1b3f6016 2284 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
bd99dc85 2285 lwpid_of (lwp), step ? "step" : "continue", signal,
54a0b537 2286 lwp->stop_expected ? "expected" : "not expected");
0d62e5e8
DJ
2287
2288 /* This bit needs some thinking about. If we get a signal that
2289 we must report while a single-step reinsert is still pending,
2290 we often end up resuming the thread. It might be better to
2291 (ew) allow a stack of pending events; then we could be sure that
2292 the reinsert happened right away and not lose any signals.
2293
2294 Making this stack would also shrink the window in which breakpoints are
54a0b537 2295 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
2296 complete correctness, so it won't solve that problem. It may be
2297 worthwhile just to solve this one, however. */
54a0b537 2298 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
2299 {
2300 if (debug_threads)
d50171e4
PA
2301 fprintf (stderr, " pending reinsert at 0x%s\n",
2302 paddress (lwp->bp_reinsert));
2303
2304 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2305 {
2306 if (step == 0)
2307 fprintf (stderr, "BAD - reinserting but not stepping.\n");
7984d532
PA
2308 if (lwp->suspended)
2309 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
2310 lwp->suspended);
d50171e4
PA
2311
2312 step = 1;
2313 }
0d62e5e8
DJ
2314
2315 /* Postpone any pending signal. It was enqueued above. */
2316 signal = 0;
2317 }
2318
219f2f23
PA
2319 /* If we have while-stepping actions in this thread set it stepping.
2320 If we have a signal to deliver, it may or may not be set to
2321 SIG_IGN, we don't know. Assume so, and allow collecting
2322 while-stepping into a signal handler. A possible smart thing to
2323 do would be to set an internal breakpoint at the signal return
2324 address, continue, and carry on catching this while-stepping
2325 action only when that breakpoint is hit. A future
2326 enhancement. */
2327 if (get_lwp_thread (lwp)->while_stepping != NULL
2328 && can_hardware_single_step ())
2329 {
2330 if (debug_threads)
2331 fprintf (stderr,
2332 "lwp %ld has a while-stepping action -> forcing step.\n",
2333 lwpid_of (lwp));
2334 step = 1;
2335 }
2336
aa691b87 2337 if (debug_threads && the_low_target.get_pc != NULL)
0d62e5e8 2338 {
442ea881
PA
2339 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
2340 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
47c0c975 2341 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
0d62e5e8
DJ
2342 }
2343
2344 /* If we have pending signals, consume one unless we are trying to reinsert
2345 a breakpoint. */
54a0b537 2346 if (lwp->pending_signals != NULL && lwp->bp_reinsert == 0)
0d62e5e8
DJ
2347 {
2348 struct pending_signals **p_sig;
2349
54a0b537 2350 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
2351 while ((*p_sig)->prev != NULL)
2352 p_sig = &(*p_sig)->prev;
2353
2354 signal = (*p_sig)->signal;
32ca6d61 2355 if ((*p_sig)->info.si_signo != 0)
bd99dc85 2356 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
32ca6d61 2357
0d62e5e8
DJ
2358 free (*p_sig);
2359 *p_sig = NULL;
2360 }
2361
aa5ca48f
DE
2362 if (the_low_target.prepare_to_resume != NULL)
2363 the_low_target.prepare_to_resume (lwp);
2364
0d62e5e8 2365 regcache_invalidate_one ((struct inferior_list_entry *)
54a0b537 2366 get_lwp_thread (lwp));
da6d8c04 2367 errno = 0;
54a0b537 2368 lwp->stopped = 0;
c3adc08c 2369 lwp->stopped_by_watchpoint = 0;
54a0b537 2370 lwp->stepping = step;
14ce3065
DE
2371 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
2372 /* Coerce to a uintptr_t first to avoid potential gcc warning
2373 of coercing an 8 byte integer to a 4 byte pointer. */
2374 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
0d62e5e8
DJ
2375
2376 current_inferior = saved_inferior;
da6d8c04 2377 if (errno)
3221518c
UW
2378 {
2379 /* ESRCH from ptrace either means that the thread was already
2380 running (an error) or that it is gone (a race condition). If
2381 it's gone, we will get a notification the next time we wait,
2382 so we can ignore the error. We could differentiate these
2383 two, but it's tricky without waiting; the thread still exists
2384 as a zombie, so sending it signal 0 would succeed. So just
2385 ignore ESRCH. */
2386 if (errno == ESRCH)
2387 return;
2388
2389 perror_with_name ("ptrace");
2390 }
da6d8c04
DJ
2391}
2392
2bd7c093
PA
2393struct thread_resume_array
2394{
2395 struct thread_resume *resume;
2396 size_t n;
2397};
64386c31
DJ
2398
2399/* This function is called once per thread. We look up the thread
5544ad89
DJ
2400 in RESUME_PTR, and mark the thread with a pointer to the appropriate
2401 resume request.
2402
2403 This algorithm is O(threads * resume elements), but resume elements
2404 is small (and will remain small at least until GDB supports thread
2405 suspension). */
2bd7c093
PA
2406static int
2407linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
0d62e5e8 2408{
54a0b537 2409 struct lwp_info *lwp;
64386c31 2410 struct thread_info *thread;
5544ad89 2411 int ndx;
2bd7c093 2412 struct thread_resume_array *r;
64386c31
DJ
2413
2414 thread = (struct thread_info *) entry;
54a0b537 2415 lwp = get_thread_lwp (thread);
2bd7c093 2416 r = arg;
64386c31 2417
2bd7c093 2418 for (ndx = 0; ndx < r->n; ndx++)
95954743
PA
2419 {
2420 ptid_t ptid = r->resume[ndx].thread;
2421 if (ptid_equal (ptid, minus_one_ptid)
2422 || ptid_equal (ptid, entry->id)
2423 || (ptid_is_pid (ptid)
2424 && (ptid_get_pid (ptid) == pid_of (lwp)))
2425 || (ptid_get_lwp (ptid) == -1
2426 && (ptid_get_pid (ptid) == pid_of (lwp))))
2427 {
d50171e4 2428 if (r->resume[ndx].kind == resume_stop
8336d594 2429 && thread->last_resume_kind == resume_stop)
d50171e4
PA
2430 {
2431 if (debug_threads)
2432 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
2433 thread->last_status.kind == TARGET_WAITKIND_STOPPED
2434 ? "stopped"
2435 : "stopping",
2436 lwpid_of (lwp));
2437
2438 continue;
2439 }
2440
95954743 2441 lwp->resume = &r->resume[ndx];
8336d594 2442 thread->last_resume_kind = lwp->resume->kind;
95954743
PA
2443 return 0;
2444 }
2445 }
2bd7c093
PA
2446
2447 /* No resume action for this thread. */
2448 lwp->resume = NULL;
64386c31 2449
2bd7c093 2450 return 0;
5544ad89
DJ
2451}
2452
5544ad89 2453
bd99dc85
PA
2454/* Set *FLAG_P if this lwp has an interesting status pending. */
2455static int
2456resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
5544ad89 2457{
bd99dc85 2458 struct lwp_info *lwp = (struct lwp_info *) entry;
5544ad89 2459
bd99dc85
PA
2460 /* LWPs which will not be resumed are not interesting, because
2461 we might not wait for them next time through linux_wait. */
2bd7c093 2462 if (lwp->resume == NULL)
bd99dc85 2463 return 0;
64386c31 2464
bd99dc85 2465 if (lwp->status_pending_p)
d50171e4
PA
2466 * (int *) flag_p = 1;
2467
2468 return 0;
2469}
2470
2471/* Return 1 if this lwp that GDB wants running is stopped at an
2472 internal breakpoint that we need to step over. It assumes that any
2473 required STOP_PC adjustment has already been propagated to the
2474 inferior's regcache. */
2475
2476static int
2477need_step_over_p (struct inferior_list_entry *entry, void *dummy)
2478{
2479 struct lwp_info *lwp = (struct lwp_info *) entry;
8336d594 2480 struct thread_info *thread;
d50171e4
PA
2481 struct thread_info *saved_inferior;
2482 CORE_ADDR pc;
2483
2484 /* LWPs which will not be resumed are not interesting, because we
2485 might not wait for them next time through linux_wait. */
2486
2487 if (!lwp->stopped)
2488 {
2489 if (debug_threads)
2490 fprintf (stderr,
2491 "Need step over [LWP %ld]? Ignoring, not stopped\n",
2492 lwpid_of (lwp));
2493 return 0;
2494 }
2495
8336d594
PA
2496 thread = get_lwp_thread (lwp);
2497
2498 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
2499 {
2500 if (debug_threads)
2501 fprintf (stderr,
2502 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
2503 lwpid_of (lwp));
2504 return 0;
2505 }
2506
7984d532
PA
2507 gdb_assert (lwp->suspended >= 0);
2508
2509 if (lwp->suspended)
2510 {
2511 if (debug_threads)
2512 fprintf (stderr,
2513 "Need step over [LWP %ld]? Ignoring, suspended\n",
2514 lwpid_of (lwp));
2515 return 0;
2516 }
2517
d50171e4
PA
2518 if (!lwp->need_step_over)
2519 {
2520 if (debug_threads)
2521 fprintf (stderr,
2522 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
2523 }
5544ad89 2524
bd99dc85 2525 if (lwp->status_pending_p)
d50171e4
PA
2526 {
2527 if (debug_threads)
2528 fprintf (stderr,
2529 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
2530 lwpid_of (lwp));
2531 return 0;
2532 }
2533
2534 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
2535 or we have. */
2536 pc = get_pc (lwp);
2537
2538 /* If the PC has changed since we stopped, then don't do anything,
2539 and let the breakpoint/tracepoint be hit. This happens if, for
2540 instance, GDB handled the decr_pc_after_break subtraction itself,
2541 GDB is OOL stepping this thread, or the user has issued a "jump"
2542 command, or poked thread's registers herself. */
2543 if (pc != lwp->stop_pc)
2544 {
2545 if (debug_threads)
2546 fprintf (stderr,
2547 "Need step over [LWP %ld]? Cancelling, PC was changed. "
2548 "Old stop_pc was 0x%s, PC is now 0x%s\n",
2549 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
2550
2551 lwp->need_step_over = 0;
2552 return 0;
2553 }
2554
2555 saved_inferior = current_inferior;
8336d594 2556 current_inferior = thread;
d50171e4 2557
8b07ae33 2558 /* We can only step over breakpoints we know about. */
d50171e4
PA
2559 if (breakpoint_here (pc))
2560 {
8b07ae33
PA
2561 /* Don't step over a breakpoint that GDB expects to hit
2562 though. */
2563 if (gdb_breakpoint_here (pc))
2564 {
2565 if (debug_threads)
2566 fprintf (stderr,
2567 "Need step over [LWP %ld]? yes, but found"
2568 " GDB breakpoint at 0x%s; skipping step over\n",
2569 lwpid_of (lwp), paddress (pc));
d50171e4 2570
8b07ae33
PA
2571 current_inferior = saved_inferior;
2572 return 0;
2573 }
2574 else
2575 {
2576 if (debug_threads)
2577 fprintf (stderr,
2578 "Need step over [LWP %ld]? yes, found breakpoint at 0x%s\n",
2579 lwpid_of (lwp), paddress (pc));
d50171e4 2580
8b07ae33
PA
2581 /* We've found an lwp that needs stepping over --- return 1 so
2582 that find_inferior stops looking. */
2583 current_inferior = saved_inferior;
2584
2585 /* If the step over is cancelled, this is set again. */
2586 lwp->need_step_over = 0;
2587 return 1;
2588 }
d50171e4
PA
2589 }
2590
2591 current_inferior = saved_inferior;
2592
2593 if (debug_threads)
2594 fprintf (stderr,
2595 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
2596 lwpid_of (lwp), paddress (pc));
c6ecbae5 2597
bd99dc85 2598 return 0;
5544ad89
DJ
2599}
2600
d50171e4
PA
2601/* Start a step-over operation on LWP. When LWP stopped at a
2602 breakpoint, to make progress, we need to remove the breakpoint out
2603 of the way. If we let other threads run while we do that, they may
2604 pass by the breakpoint location and miss hitting it. To avoid
2605 that, a step-over momentarily stops all threads while LWP is
2606 single-stepped while the breakpoint is temporarily uninserted from
2607 the inferior. When the single-step finishes, we reinsert the
2608 breakpoint, and let all threads that are supposed to be running,
2609 run again.
2610
2611 On targets that don't support hardware single-step, we don't
2612 currently support full software single-stepping. Instead, we only
2613 support stepping over the thread event breakpoint, by asking the
2614 low target where to place a reinsert breakpoint. Since this
2615 routine assumes the breakpoint being stepped over is a thread event
2616 breakpoint, it usually assumes the return address of the current
2617 function is a good enough place to set the reinsert breakpoint. */
2618
2619static int
2620start_step_over (struct lwp_info *lwp)
2621{
2622 struct thread_info *saved_inferior;
2623 CORE_ADDR pc;
2624 int step;
2625
2626 if (debug_threads)
2627 fprintf (stderr,
2628 "Starting step-over on LWP %ld. Stopping all threads\n",
2629 lwpid_of (lwp));
2630
7984d532
PA
2631 stop_all_lwps (1, lwp);
2632 gdb_assert (lwp->suspended == 0);
d50171e4
PA
2633
2634 if (debug_threads)
2635 fprintf (stderr, "Done stopping all threads for step-over.\n");
2636
2637 /* Note, we should always reach here with an already adjusted PC,
2638 either by GDB (if we're resuming due to GDB's request), or by our
2639 caller, if we just finished handling an internal breakpoint GDB
2640 shouldn't care about. */
2641 pc = get_pc (lwp);
2642
2643 saved_inferior = current_inferior;
2644 current_inferior = get_lwp_thread (lwp);
2645
2646 lwp->bp_reinsert = pc;
2647 uninsert_breakpoints_at (pc);
2648
2649 if (can_hardware_single_step ())
2650 {
2651 step = 1;
2652 }
2653 else
2654 {
2655 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
2656 set_reinsert_breakpoint (raddr);
2657 step = 0;
2658 }
2659
2660 current_inferior = saved_inferior;
2661
2662 linux_resume_one_lwp (lwp, step, 0, NULL);
2663
2664 /* Require next event from this LWP. */
2665 step_over_bkpt = lwp->head.id;
2666 return 1;
2667}
2668
2669/* Finish a step-over. Reinsert the breakpoint we had uninserted in
2670 start_step_over, if still there, and delete any reinsert
2671 breakpoints we've set, on non hardware single-step targets. */
2672
2673static int
2674finish_step_over (struct lwp_info *lwp)
2675{
2676 if (lwp->bp_reinsert != 0)
2677 {
2678 if (debug_threads)
2679 fprintf (stderr, "Finished step over.\n");
2680
2681 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
2682 may be no breakpoint to reinsert there by now. */
2683 reinsert_breakpoints_at (lwp->bp_reinsert);
2684
2685 lwp->bp_reinsert = 0;
2686
2687 /* Delete any software-single-step reinsert breakpoints. No
2688 longer needed. We don't have to worry about other threads
2689 hitting this trap, and later not being able to explain it,
2690 because we were stepping over a breakpoint, and we hold all
2691 threads but LWP stopped while doing that. */
2692 if (!can_hardware_single_step ())
2693 delete_reinsert_breakpoints ();
2694
2695 step_over_bkpt = null_ptid;
2696 return 1;
2697 }
2698 else
2699 return 0;
2700}
2701
5544ad89
DJ
2702/* This function is called once per thread. We check the thread's resume
2703 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 2704 stopped; and what signal, if any, it should be sent.
5544ad89 2705
bd99dc85
PA
2706 For threads which we aren't explicitly told otherwise, we preserve
2707 the stepping flag; this is used for stepping over gdbserver-placed
2708 breakpoints.
2709
2710 If pending_flags was set in any thread, we queue any needed
2711 signals, since we won't actually resume. We already have a pending
2712 event to report, so we don't need to preserve any step requests;
2713 they should be re-issued if necessary. */
2714
2715static int
2716linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5544ad89 2717{
54a0b537 2718 struct lwp_info *lwp;
5544ad89 2719 struct thread_info *thread;
bd99dc85 2720 int step;
d50171e4
PA
2721 int leave_all_stopped = * (int *) arg;
2722 int leave_pending;
5544ad89
DJ
2723
2724 thread = (struct thread_info *) entry;
54a0b537 2725 lwp = get_thread_lwp (thread);
5544ad89 2726
2bd7c093 2727 if (lwp->resume == NULL)
bd99dc85 2728 return 0;
5544ad89 2729
bd99dc85 2730 if (lwp->resume->kind == resume_stop)
5544ad89 2731 {
bd99dc85 2732 if (debug_threads)
d50171e4 2733 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
bd99dc85
PA
2734
2735 if (!lwp->stopped)
2736 {
2737 if (debug_threads)
d50171e4 2738 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
bd99dc85 2739
d50171e4
PA
2740 /* Stop the thread, and wait for the event asynchronously,
2741 through the event loop. */
02fc4de7 2742 send_sigstop (lwp);
bd99dc85
PA
2743 }
2744 else
2745 {
2746 if (debug_threads)
d50171e4
PA
2747 fprintf (stderr, "already stopped LWP %ld\n",
2748 lwpid_of (lwp));
2749
2750 /* The LWP may have been stopped in an internal event that
2751 was not meant to be notified back to GDB (e.g., gdbserver
2752 breakpoint), so we should be reporting a stop event in
2753 this case too. */
2754
2755 /* If the thread already has a pending SIGSTOP, this is a
2756 no-op. Otherwise, something later will presumably resume
2757 the thread and this will cause it to cancel any pending
2758 operation, due to last_resume_kind == resume_stop. If
2759 the thread already has a pending status to report, we
2760 will still report it the next time we wait - see
2761 status_pending_p_callback. */
02fc4de7 2762 send_sigstop (lwp);
bd99dc85 2763 }
32ca6d61 2764
bd99dc85
PA
2765 /* For stop requests, we're done. */
2766 lwp->resume = NULL;
fc7238bb 2767 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 2768 return 0;
5544ad89
DJ
2769 }
2770
bd99dc85
PA
2771 /* If this thread which is about to be resumed has a pending status,
2772 then don't resume any threads - we can just report the pending
2773 status. Make sure to queue any signals that would otherwise be
2774 sent. In all-stop mode, we do this decision based on if *any*
d50171e4
PA
2775 thread has a pending status. If there's a thread that needs the
2776 step-over-breakpoint dance, then don't resume any other thread
2777 but that particular one. */
2778 leave_pending = (lwp->status_pending_p || leave_all_stopped);
5544ad89 2779
d50171e4 2780 if (!leave_pending)
bd99dc85
PA
2781 {
2782 if (debug_threads)
2783 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
5544ad89 2784
d50171e4 2785 step = (lwp->resume->kind == resume_step);
2acc282a 2786 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
bd99dc85
PA
2787 }
2788 else
2789 {
2790 if (debug_threads)
2791 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
5544ad89 2792
bd99dc85
PA
2793 /* If we have a new signal, enqueue the signal. */
2794 if (lwp->resume->sig != 0)
2795 {
2796 struct pending_signals *p_sig;
2797 p_sig = xmalloc (sizeof (*p_sig));
2798 p_sig->prev = lwp->pending_signals;
2799 p_sig->signal = lwp->resume->sig;
2800 memset (&p_sig->info, 0, sizeof (siginfo_t));
2801
2802 /* If this is the same signal we were previously stopped by,
2803 make sure to queue its siginfo. We can ignore the return
2804 value of ptrace; if it fails, we'll skip
2805 PTRACE_SETSIGINFO. */
2806 if (WIFSTOPPED (lwp->last_status)
2807 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
2808 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
2809
2810 lwp->pending_signals = p_sig;
2811 }
2812 }
5544ad89 2813
fc7238bb 2814 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 2815 lwp->resume = NULL;
5544ad89 2816 return 0;
0d62e5e8
DJ
2817}
2818
2819static void
2bd7c093 2820linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 2821{
2bd7c093 2822 struct thread_resume_array array = { resume_info, n };
d50171e4
PA
2823 struct lwp_info *need_step_over = NULL;
2824 int any_pending;
2825 int leave_all_stopped;
c6ecbae5 2826
2bd7c093 2827 find_inferior (&all_threads, linux_set_resume_request, &array);
5544ad89 2828
d50171e4
PA
2829 /* If there is a thread which would otherwise be resumed, which has
2830 a pending status, then don't resume any threads - we can just
2831 report the pending status. Make sure to queue any signals that
2832 would otherwise be sent. In non-stop mode, we'll apply this
2833 logic to each thread individually. We consume all pending events
2834 before considering to start a step-over (in all-stop). */
2835 any_pending = 0;
bd99dc85 2836 if (!non_stop)
d50171e4
PA
2837 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
2838
2839 /* If there is a thread which would otherwise be resumed, which is
2840 stopped at a breakpoint that needs stepping over, then don't
2841 resume any threads - have it step over the breakpoint with all
2842 other threads stopped, then resume all threads again. Make sure
2843 to queue any signals that would otherwise be delivered or
2844 queued. */
2845 if (!any_pending && supports_breakpoints ())
2846 need_step_over
2847 = (struct lwp_info *) find_inferior (&all_lwps,
2848 need_step_over_p, NULL);
2849
2850 leave_all_stopped = (need_step_over != NULL || any_pending);
2851
2852 if (debug_threads)
2853 {
2854 if (need_step_over != NULL)
2855 fprintf (stderr, "Not resuming all, need step over\n");
2856 else if (any_pending)
2857 fprintf (stderr,
2858 "Not resuming, all-stop and found "
2859 "an LWP with pending status\n");
2860 else
2861 fprintf (stderr, "Resuming, no pending status or step over needed\n");
2862 }
2863
2864 /* Even if we're leaving threads stopped, queue all signals we'd
2865 otherwise deliver. */
2866 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
2867
2868 if (need_step_over)
2869 start_step_over (need_step_over);
2870}
2871
2872/* This function is called once per thread. We check the thread's
2873 last resume request, which will tell us whether to resume, step, or
2874 leave the thread stopped. Any signal the client requested to be
2875 delivered has already been enqueued at this point.
2876
2877 If any thread that GDB wants running is stopped at an internal
2878 breakpoint that needs stepping over, we start a step-over operation
2879 on that particular thread, and leave all others stopped. */
2880
7984d532
PA
2881static int
2882proceed_one_lwp (struct inferior_list_entry *entry, void *except)
d50171e4 2883{
7984d532 2884 struct lwp_info *lwp = (struct lwp_info *) entry;
8336d594 2885 struct thread_info *thread;
d50171e4
PA
2886 int step;
2887
7984d532
PA
2888 if (lwp == except)
2889 return 0;
d50171e4
PA
2890
2891 if (debug_threads)
2892 fprintf (stderr,
2893 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
2894
2895 if (!lwp->stopped)
2896 {
2897 if (debug_threads)
2898 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
7984d532 2899 return 0;
d50171e4
PA
2900 }
2901
8336d594
PA
2902 thread = get_lwp_thread (lwp);
2903
02fc4de7
PA
2904 if (thread->last_resume_kind == resume_stop
2905 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
2906 {
2907 if (debug_threads)
02fc4de7
PA
2908 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
2909 lwpid_of (lwp));
7984d532 2910 return 0;
d50171e4
PA
2911 }
2912
2913 if (lwp->status_pending_p)
2914 {
2915 if (debug_threads)
2916 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
2917 lwpid_of (lwp));
7984d532 2918 return 0;
d50171e4
PA
2919 }
2920
7984d532
PA
2921 gdb_assert (lwp->suspended >= 0);
2922
d50171e4
PA
2923 if (lwp->suspended)
2924 {
2925 if (debug_threads)
2926 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
7984d532 2927 return 0;
d50171e4
PA
2928 }
2929
02fc4de7
PA
2930 if (thread->last_resume_kind == resume_stop)
2931 {
2932 /* We haven't reported this LWP as stopped yet (otherwise, the
2933 last_status.kind check above would catch it, and we wouldn't
2934 reach here. This LWP may have been momentarily paused by a
2935 stop_all_lwps call while handling for example, another LWP's
2936 step-over. In that case, the pending expected SIGSTOP signal
2937 that was queued at vCont;t handling time will have already
2938 been consumed by wait_for_sigstop, and so we need to requeue
2939 another one here. Note that if the LWP already has a SIGSTOP
2940 pending, this is a no-op. */
2941
2942 if (debug_threads)
2943 fprintf (stderr,
2944 "Client wants LWP %ld to stop. "
2945 "Making sure it has a SIGSTOP pending\n",
2946 lwpid_of (lwp));
2947
2948 send_sigstop (lwp);
2949 }
2950
8336d594 2951 step = thread->last_resume_kind == resume_step;
d50171e4 2952 linux_resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
2953 return 0;
2954}
2955
2956static int
2957unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
2958{
2959 struct lwp_info *lwp = (struct lwp_info *) entry;
2960
2961 if (lwp == except)
2962 return 0;
2963
2964 lwp->suspended--;
2965 gdb_assert (lwp->suspended >= 0);
2966
2967 return proceed_one_lwp (entry, except);
d50171e4
PA
2968}
2969
2970/* When we finish a step-over, set threads running again. If there's
2971 another thread that may need a step-over, now's the time to start
2972 it. Eventually, we'll move all threads past their breakpoints. */
2973
2974static void
2975proceed_all_lwps (void)
2976{
2977 struct lwp_info *need_step_over;
2978
2979 /* If there is a thread which would otherwise be resumed, which is
2980 stopped at a breakpoint that needs stepping over, then don't
2981 resume any threads - have it step over the breakpoint with all
2982 other threads stopped, then resume all threads again. */
2983
2984 if (supports_breakpoints ())
2985 {
2986 need_step_over
2987 = (struct lwp_info *) find_inferior (&all_lwps,
2988 need_step_over_p, NULL);
2989
2990 if (need_step_over != NULL)
2991 {
2992 if (debug_threads)
2993 fprintf (stderr, "proceed_all_lwps: found "
2994 "thread %ld needing a step-over\n",
2995 lwpid_of (need_step_over));
2996
2997 start_step_over (need_step_over);
2998 return;
2999 }
3000 }
5544ad89 3001
d50171e4
PA
3002 if (debug_threads)
3003 fprintf (stderr, "Proceeding, no step-over needed\n");
3004
7984d532 3005 find_inferior (&all_lwps, proceed_one_lwp, NULL);
d50171e4
PA
3006}
3007
3008/* Stopped LWPs that the client wanted to be running, that don't have
3009 pending statuses, are set to run again, except for EXCEPT, if not
3010 NULL. This undoes a stop_all_lwps call. */
3011
3012static void
7984d532 3013unstop_all_lwps (int unsuspend, struct lwp_info *except)
d50171e4 3014{
5544ad89
DJ
3015 if (debug_threads)
3016 {
d50171e4
PA
3017 if (except)
3018 fprintf (stderr,
3019 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
5544ad89 3020 else
d50171e4
PA
3021 fprintf (stderr,
3022 "unstopping all lwps\n");
5544ad89
DJ
3023 }
3024
7984d532
PA
3025 if (unsuspend)
3026 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3027 else
3028 find_inferior (&all_lwps, proceed_one_lwp, except);
0d62e5e8
DJ
3029}
3030
3031#ifdef HAVE_LINUX_USRREGS
da6d8c04
DJ
3032
3033int
0a30fbc4 3034register_addr (int regnum)
da6d8c04
DJ
3035{
3036 int addr;
3037
2ec06d2e 3038 if (regnum < 0 || regnum >= the_low_target.num_regs)
da6d8c04
DJ
3039 error ("Invalid register number %d.", regnum);
3040
2ec06d2e 3041 addr = the_low_target.regmap[regnum];
da6d8c04
DJ
3042
3043 return addr;
3044}
3045
58caa3dc 3046/* Fetch one register. */
da6d8c04 3047static void
442ea881 3048fetch_register (struct regcache *regcache, int regno)
da6d8c04
DJ
3049{
3050 CORE_ADDR regaddr;
48d93c75 3051 int i, size;
0d62e5e8 3052 char *buf;
95954743 3053 int pid;
da6d8c04 3054
2ec06d2e 3055 if (regno >= the_low_target.num_regs)
0a30fbc4 3056 return;
2ec06d2e 3057 if ((*the_low_target.cannot_fetch_register) (regno))
0a30fbc4 3058 return;
da6d8c04 3059
0a30fbc4
DJ
3060 regaddr = register_addr (regno);
3061 if (regaddr == -1)
3062 return;
95954743
PA
3063
3064 pid = lwpid_of (get_thread_lwp (current_inferior));
1b3f6016
PA
3065 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3066 & - sizeof (PTRACE_XFER_TYPE));
48d93c75
UW
3067 buf = alloca (size);
3068 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
da6d8c04
DJ
3069 {
3070 errno = 0;
0d62e5e8 3071 *(PTRACE_XFER_TYPE *) (buf + i) =
14ce3065
DE
3072 ptrace (PTRACE_PEEKUSER, pid,
3073 /* Coerce to a uintptr_t first to avoid potential gcc warning
3074 of coercing an 8 byte integer to a 4 byte pointer. */
3075 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
da6d8c04
DJ
3076 regaddr += sizeof (PTRACE_XFER_TYPE);
3077 if (errno != 0)
f52cd8cd 3078 error ("reading register %d: %s", regno, strerror (errno));
da6d8c04 3079 }
ee1a7ae4
UW
3080
3081 if (the_low_target.supply_ptrace_register)
442ea881 3082 the_low_target.supply_ptrace_register (regcache, regno, buf);
5a1f5858 3083 else
442ea881 3084 supply_register (regcache, regno, buf);
da6d8c04
DJ
3085}
3086
3087/* Fetch all registers, or just one, from the child process. */
58caa3dc 3088static void
442ea881 3089usr_fetch_inferior_registers (struct regcache *regcache, int regno)
da6d8c04 3090{
4463ce24 3091 if (regno == -1)
2ec06d2e 3092 for (regno = 0; regno < the_low_target.num_regs; regno++)
442ea881 3093 fetch_register (regcache, regno);
da6d8c04 3094 else
442ea881 3095 fetch_register (regcache, regno);
da6d8c04
DJ
3096}
3097
3098/* Store our register values back into the inferior.
3099 If REGNO is -1, do this for all registers.
3100 Otherwise, REGNO specifies which register (so we can save time). */
58caa3dc 3101static void
442ea881 3102usr_store_inferior_registers (struct regcache *regcache, int regno)
da6d8c04
DJ
3103{
3104 CORE_ADDR regaddr;
48d93c75 3105 int i, size;
0d62e5e8 3106 char *buf;
55ac2b99 3107 int pid;
da6d8c04
DJ
3108
3109 if (regno >= 0)
3110 {
2ec06d2e 3111 if (regno >= the_low_target.num_regs)
0a30fbc4
DJ
3112 return;
3113
bc1e36ca 3114 if ((*the_low_target.cannot_store_register) (regno) == 1)
0a30fbc4
DJ
3115 return;
3116
3117 regaddr = register_addr (regno);
3118 if (regaddr == -1)
da6d8c04 3119 return;
da6d8c04 3120 errno = 0;
48d93c75
UW
3121 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3122 & - sizeof (PTRACE_XFER_TYPE);
3123 buf = alloca (size);
3124 memset (buf, 0, size);
ee1a7ae4
UW
3125
3126 if (the_low_target.collect_ptrace_register)
442ea881 3127 the_low_target.collect_ptrace_register (regcache, regno, buf);
5a1f5858 3128 else
442ea881 3129 collect_register (regcache, regno, buf);
ee1a7ae4 3130
95954743 3131 pid = lwpid_of (get_thread_lwp (current_inferior));
48d93c75 3132 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
da6d8c04 3133 {
0a30fbc4 3134 errno = 0;
14ce3065
DE
3135 ptrace (PTRACE_POKEUSER, pid,
3136 /* Coerce to a uintptr_t first to avoid potential gcc warning
3137 about coercing an 8 byte integer to a 4 byte pointer. */
3138 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
3139 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
da6d8c04
DJ
3140 if (errno != 0)
3141 {
1b3f6016
PA
3142 /* At this point, ESRCH should mean the process is
3143 already gone, in which case we simply ignore attempts
3144 to change its registers. See also the related
3145 comment in linux_resume_one_lwp. */
3221518c
UW
3146 if (errno == ESRCH)
3147 return;
3148
bc1e36ca 3149 if ((*the_low_target.cannot_store_register) (regno) == 0)
f52cd8cd 3150 error ("writing register %d: %s", regno, strerror (errno));
da6d8c04 3151 }
2ff29de4 3152 regaddr += sizeof (PTRACE_XFER_TYPE);
da6d8c04 3153 }
da6d8c04
DJ
3154 }
3155 else
2ec06d2e 3156 for (regno = 0; regno < the_low_target.num_regs; regno++)
442ea881 3157 usr_store_inferior_registers (regcache, regno);
da6d8c04 3158}
58caa3dc
DJ
3159#endif /* HAVE_LINUX_USRREGS */
3160
3161
3162
3163#ifdef HAVE_LINUX_REGSETS
3164
3165static int
442ea881 3166regsets_fetch_inferior_registers (struct regcache *regcache)
58caa3dc
DJ
3167{
3168 struct regset_info *regset;
e9d25b98 3169 int saw_general_regs = 0;
95954743 3170 int pid;
1570b33e 3171 struct iovec iov;
58caa3dc
DJ
3172
3173 regset = target_regsets;
3174
95954743 3175 pid = lwpid_of (get_thread_lwp (current_inferior));
58caa3dc
DJ
3176 while (regset->size >= 0)
3177 {
1570b33e
L
3178 void *buf, *data;
3179 int nt_type, res;
58caa3dc 3180
52fa2412 3181 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
58caa3dc
DJ
3182 {
3183 regset ++;
3184 continue;
3185 }
3186
bca929d3 3187 buf = xmalloc (regset->size);
1570b33e
L
3188
3189 nt_type = regset->nt_type;
3190 if (nt_type)
3191 {
3192 iov.iov_base = buf;
3193 iov.iov_len = regset->size;
3194 data = (void *) &iov;
3195 }
3196 else
3197 data = buf;
3198
dfb64f85 3199#ifndef __sparc__
1570b33e 3200 res = ptrace (regset->get_request, pid, nt_type, data);
dfb64f85 3201#else
1570b33e 3202 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 3203#endif
58caa3dc
DJ
3204 if (res < 0)
3205 {
3206 if (errno == EIO)
3207 {
52fa2412
UW
3208 /* If we get EIO on a regset, do not try it again for
3209 this process. */
3210 disabled_regsets[regset - target_regsets] = 1;
fdeb2a12 3211 free (buf);
52fa2412 3212 continue;
58caa3dc
DJ
3213 }
3214 else
3215 {
0d62e5e8 3216 char s[256];
95954743
PA
3217 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3218 pid);
0d62e5e8 3219 perror (s);
58caa3dc
DJ
3220 }
3221 }
e9d25b98
DJ
3222 else if (regset->type == GENERAL_REGS)
3223 saw_general_regs = 1;
442ea881 3224 regset->store_function (regcache, buf);
58caa3dc 3225 regset ++;
fdeb2a12 3226 free (buf);
58caa3dc 3227 }
e9d25b98
DJ
3228 if (saw_general_regs)
3229 return 0;
3230 else
3231 return 1;
58caa3dc
DJ
3232}
3233
3234static int
442ea881 3235regsets_store_inferior_registers (struct regcache *regcache)
58caa3dc
DJ
3236{
3237 struct regset_info *regset;
e9d25b98 3238 int saw_general_regs = 0;
95954743 3239 int pid;
1570b33e 3240 struct iovec iov;
58caa3dc
DJ
3241
3242 regset = target_regsets;
3243
95954743 3244 pid = lwpid_of (get_thread_lwp (current_inferior));
58caa3dc
DJ
3245 while (regset->size >= 0)
3246 {
1570b33e
L
3247 void *buf, *data;
3248 int nt_type, res;
58caa3dc 3249
52fa2412 3250 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
58caa3dc
DJ
3251 {
3252 regset ++;
3253 continue;
3254 }
3255
bca929d3 3256 buf = xmalloc (regset->size);
545587ee
DJ
3257
3258 /* First fill the buffer with the current register set contents,
3259 in case there are any items in the kernel's regset that are
3260 not in gdbserver's regcache. */
1570b33e
L
3261
3262 nt_type = regset->nt_type;
3263 if (nt_type)
3264 {
3265 iov.iov_base = buf;
3266 iov.iov_len = regset->size;
3267 data = (void *) &iov;
3268 }
3269 else
3270 data = buf;
3271
dfb64f85 3272#ifndef __sparc__
1570b33e 3273 res = ptrace (regset->get_request, pid, nt_type, data);
dfb64f85 3274#else
1570b33e 3275 res = ptrace (regset->get_request, pid, &iov, data);
dfb64f85 3276#endif
545587ee
DJ
3277
3278 if (res == 0)
3279 {
3280 /* Then overlay our cached registers on that. */
442ea881 3281 regset->fill_function (regcache, buf);
545587ee
DJ
3282
3283 /* Only now do we write the register set. */
dfb64f85 3284#ifndef __sparc__
1570b33e 3285 res = ptrace (regset->set_request, pid, nt_type, data);
dfb64f85 3286#else
1570b33e 3287 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 3288#endif
545587ee
DJ
3289 }
3290
58caa3dc
DJ
3291 if (res < 0)
3292 {
3293 if (errno == EIO)
3294 {
52fa2412
UW
3295 /* If we get EIO on a regset, do not try it again for
3296 this process. */
3297 disabled_regsets[regset - target_regsets] = 1;
fdeb2a12 3298 free (buf);
52fa2412 3299 continue;
58caa3dc 3300 }
3221518c
UW
3301 else if (errno == ESRCH)
3302 {
1b3f6016
PA
3303 /* At this point, ESRCH should mean the process is
3304 already gone, in which case we simply ignore attempts
3305 to change its registers. See also the related
3306 comment in linux_resume_one_lwp. */
fdeb2a12 3307 free (buf);
3221518c
UW
3308 return 0;
3309 }
58caa3dc
DJ
3310 else
3311 {
ce3a066d 3312 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
3313 }
3314 }
e9d25b98
DJ
3315 else if (regset->type == GENERAL_REGS)
3316 saw_general_regs = 1;
58caa3dc 3317 regset ++;
09ec9b38 3318 free (buf);
58caa3dc 3319 }
e9d25b98
DJ
3320 if (saw_general_regs)
3321 return 0;
3322 else
3323 return 1;
ce3a066d 3324 return 0;
58caa3dc
DJ
3325}
3326
3327#endif /* HAVE_LINUX_REGSETS */
3328
3329
3330void
442ea881 3331linux_fetch_registers (struct regcache *regcache, int regno)
58caa3dc
DJ
3332{
3333#ifdef HAVE_LINUX_REGSETS
442ea881 3334 if (regsets_fetch_inferior_registers (regcache) == 0)
52fa2412 3335 return;
58caa3dc
DJ
3336#endif
3337#ifdef HAVE_LINUX_USRREGS
442ea881 3338 usr_fetch_inferior_registers (regcache, regno);
58caa3dc
DJ
3339#endif
3340}
3341
3342void
442ea881 3343linux_store_registers (struct regcache *regcache, int regno)
58caa3dc
DJ
3344{
3345#ifdef HAVE_LINUX_REGSETS
442ea881 3346 if (regsets_store_inferior_registers (regcache) == 0)
52fa2412 3347 return;
58caa3dc
DJ
3348#endif
3349#ifdef HAVE_LINUX_USRREGS
442ea881 3350 usr_store_inferior_registers (regcache, regno);
58caa3dc
DJ
3351#endif
3352}
3353
da6d8c04 3354
da6d8c04
DJ
3355/* Copy LEN bytes from inferior's memory starting at MEMADDR
3356 to debugger memory starting at MYADDR. */
3357
c3e735a6 3358static int
f450004a 3359linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04
DJ
3360{
3361 register int i;
3362 /* Round starting address down to longword boundary. */
3363 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3364 /* Round ending address up; get number of longwords that makes. */
aa691b87
RM
3365 register int count
3366 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
da6d8c04
DJ
3367 / sizeof (PTRACE_XFER_TYPE);
3368 /* Allocate buffer of that many longwords. */
aa691b87 3369 register PTRACE_XFER_TYPE *buffer
da6d8c04 3370 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
fd462a61
DJ
3371 int fd;
3372 char filename[64];
95954743 3373 int pid = lwpid_of (get_thread_lwp (current_inferior));
fd462a61
DJ
3374
3375 /* Try using /proc. Don't bother for one word. */
3376 if (len >= 3 * sizeof (long))
3377 {
3378 /* We could keep this file open and cache it - possibly one per
3379 thread. That requires some juggling, but is even faster. */
95954743 3380 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
3381 fd = open (filename, O_RDONLY | O_LARGEFILE);
3382 if (fd == -1)
3383 goto no_proc;
3384
3385 /* If pread64 is available, use it. It's faster if the kernel
3386 supports it (only one syscall), and it's 64-bit safe even on
3387 32-bit platforms (for instance, SPARC debugging a SPARC64
3388 application). */
3389#ifdef HAVE_PREAD64
3390 if (pread64 (fd, myaddr, len, memaddr) != len)
3391#else
1de1badb 3392 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
fd462a61
DJ
3393#endif
3394 {
3395 close (fd);
3396 goto no_proc;
3397 }
3398
3399 close (fd);
3400 return 0;
3401 }
da6d8c04 3402
fd462a61 3403 no_proc:
da6d8c04
DJ
3404 /* Read all the longwords */
3405 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3406 {
c3e735a6 3407 errno = 0;
14ce3065
DE
3408 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3409 about coercing an 8 byte integer to a 4 byte pointer. */
3410 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
3411 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
c3e735a6
DJ
3412 if (errno)
3413 return errno;
da6d8c04
DJ
3414 }
3415
3416 /* Copy appropriate bytes out of the buffer. */
1b3f6016
PA
3417 memcpy (myaddr,
3418 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
3419 len);
c3e735a6
DJ
3420
3421 return 0;
da6d8c04
DJ
3422}
3423
93ae6fdc
PA
3424/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
3425 memory at MEMADDR. On failure (cannot write to the inferior)
da6d8c04
DJ
3426 returns the value of errno. */
3427
ce3a066d 3428static int
f450004a 3429linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04
DJ
3430{
3431 register int i;
3432 /* Round starting address down to longword boundary. */
3433 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3434 /* Round ending address up; get number of longwords that makes. */
3435 register int count
3436 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
3437 /* Allocate buffer of that many longwords. */
3438 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
95954743 3439 int pid = lwpid_of (get_thread_lwp (current_inferior));
da6d8c04 3440
0d62e5e8
DJ
3441 if (debug_threads)
3442 {
58d6951d
DJ
3443 /* Dump up to four bytes. */
3444 unsigned int val = * (unsigned int *) myaddr;
3445 if (len == 1)
3446 val = val & 0xff;
3447 else if (len == 2)
3448 val = val & 0xffff;
3449 else if (len == 3)
3450 val = val & 0xffffff;
3451 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
3452 val, (long)memaddr);
0d62e5e8
DJ
3453 }
3454
da6d8c04
DJ
3455 /* Fill start and end extra bytes of buffer with existing memory data. */
3456
93ae6fdc 3457 errno = 0;
14ce3065
DE
3458 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3459 about coercing an 8 byte integer to a 4 byte pointer. */
3460 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
3461 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
93ae6fdc
PA
3462 if (errno)
3463 return errno;
da6d8c04
DJ
3464
3465 if (count > 1)
3466 {
93ae6fdc 3467 errno = 0;
da6d8c04 3468 buffer[count - 1]
95954743 3469 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
3470 /* Coerce to a uintptr_t first to avoid potential gcc warning
3471 about coercing an 8 byte integer to a 4 byte pointer. */
3472 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
3473 * sizeof (PTRACE_XFER_TYPE)),
d844cde6 3474 0);
93ae6fdc
PA
3475 if (errno)
3476 return errno;
da6d8c04
DJ
3477 }
3478
93ae6fdc 3479 /* Copy data to be written over corresponding part of buffer. */
da6d8c04
DJ
3480
3481 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
3482
3483 /* Write the entire buffer. */
3484
3485 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3486 {
3487 errno = 0;
14ce3065
DE
3488 ptrace (PTRACE_POKETEXT, pid,
3489 /* Coerce to a uintptr_t first to avoid potential gcc warning
3490 about coercing an 8 byte integer to a 4 byte pointer. */
3491 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
3492 (PTRACE_ARG4_TYPE) buffer[i]);
da6d8c04
DJ
3493 if (errno)
3494 return errno;
3495 }
3496
3497 return 0;
3498}
2f2893d9 3499
6076632b 3500/* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
24a09b5f
DJ
3501static int linux_supports_tracefork_flag;
3502
1e7fc18c
PA
3503static void
3504linux_enable_event_reporting (int pid)
3505{
3506 if (!linux_supports_tracefork_flag)
3507 return;
3508
3509 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
3510}
3511
51c2684e 3512/* Helper functions for linux_test_for_tracefork, called via clone (). */
24a09b5f 3513
51c2684e
DJ
3514static int
3515linux_tracefork_grandchild (void *arg)
3516{
3517 _exit (0);
3518}
3519
7407e2de
AS
3520#define STACK_SIZE 4096
3521
51c2684e
DJ
3522static int
3523linux_tracefork_child (void *arg)
24a09b5f
DJ
3524{
3525 ptrace (PTRACE_TRACEME, 0, 0, 0);
3526 kill (getpid (), SIGSTOP);
e4b7f41c
JK
3527
3528#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3529
3530 if (fork () == 0)
3531 linux_tracefork_grandchild (NULL);
3532
3533#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3534
7407e2de
AS
3535#ifdef __ia64__
3536 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
3537 CLONE_VM | SIGCHLD, NULL);
3538#else
3539 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
3540 CLONE_VM | SIGCHLD, NULL);
3541#endif
e4b7f41c
JK
3542
3543#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3544
24a09b5f
DJ
3545 _exit (0);
3546}
3547
24a09b5f
DJ
3548/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
3549 sure that we can enable the option, and that it had the desired
3550 effect. */
3551
3552static void
3553linux_test_for_tracefork (void)
3554{
3555 int child_pid, ret, status;
3556 long second_pid;
e4b7f41c 3557#if defined(__UCLIBC__) && defined(HAS_NOMMU)
bca929d3 3558 char *stack = xmalloc (STACK_SIZE * 4);
e4b7f41c 3559#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
24a09b5f
DJ
3560
3561 linux_supports_tracefork_flag = 0;
3562
e4b7f41c
JK
3563#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3564
3565 child_pid = fork ();
3566 if (child_pid == 0)
3567 linux_tracefork_child (NULL);
3568
3569#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3570
51c2684e 3571 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
7407e2de
AS
3572#ifdef __ia64__
3573 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
3574 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
e4b7f41c 3575#else /* !__ia64__ */
7407e2de
AS
3576 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
3577 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
e4b7f41c
JK
3578#endif /* !__ia64__ */
3579
3580#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3581
24a09b5f 3582 if (child_pid == -1)
51c2684e 3583 perror_with_name ("clone");
24a09b5f
DJ
3584
3585 ret = my_waitpid (child_pid, &status, 0);
3586 if (ret == -1)
3587 perror_with_name ("waitpid");
3588 else if (ret != child_pid)
3589 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
3590 if (! WIFSTOPPED (status))
3591 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
3592
14ce3065
DE
3593 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
3594 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
24a09b5f
DJ
3595 if (ret != 0)
3596 {
3597 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3598 if (ret != 0)
3599 {
3600 warning ("linux_test_for_tracefork: failed to kill child");
3601 return;
3602 }
3603
3604 ret = my_waitpid (child_pid, &status, 0);
3605 if (ret != child_pid)
3606 warning ("linux_test_for_tracefork: failed to wait for killed child");
3607 else if (!WIFSIGNALED (status))
3608 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
3609 "killed child", status);
3610
3611 return;
3612 }
3613
3614 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
3615 if (ret != 0)
3616 warning ("linux_test_for_tracefork: failed to resume child");
3617
3618 ret = my_waitpid (child_pid, &status, 0);
3619
3620 if (ret == child_pid && WIFSTOPPED (status)
3621 && status >> 16 == PTRACE_EVENT_FORK)
3622 {
3623 second_pid = 0;
3624 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
3625 if (ret == 0 && second_pid != 0)
3626 {
3627 int second_status;
3628
3629 linux_supports_tracefork_flag = 1;
3630 my_waitpid (second_pid, &second_status, 0);
3631 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
3632 if (ret != 0)
3633 warning ("linux_test_for_tracefork: failed to kill second child");
3634 my_waitpid (second_pid, &status, 0);
3635 }
3636 }
3637 else
3638 warning ("linux_test_for_tracefork: unexpected result from waitpid "
3639 "(%d, status 0x%x)", ret, status);
3640
3641 do
3642 {
3643 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3644 if (ret != 0)
3645 warning ("linux_test_for_tracefork: failed to kill child");
3646 my_waitpid (child_pid, &status, 0);
3647 }
3648 while (WIFSTOPPED (status));
51c2684e 3649
e4b7f41c 3650#if defined(__UCLIBC__) && defined(HAS_NOMMU)
51c2684e 3651 free (stack);
e4b7f41c 3652#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
24a09b5f
DJ
3653}
3654
3655
2f2893d9
DJ
3656static void
3657linux_look_up_symbols (void)
3658{
0d62e5e8 3659#ifdef USE_THREAD_DB
95954743
PA
3660 struct process_info *proc = current_process ();
3661
cdbfd419 3662 if (proc->private->thread_db != NULL)
0d62e5e8
DJ
3663 return;
3664
6076632b
DE
3665 /* If the kernel supports tracing forks then it also supports tracing
3666 clones, and then we don't need to use the magic thread event breakpoint
3667 to learn about threads. */
cdbfd419 3668 thread_db_init (!linux_supports_tracefork_flag);
0d62e5e8
DJ
3669#endif
3670}
3671
e5379b03 3672static void
ef57601b 3673linux_request_interrupt (void)
e5379b03 3674{
a1928bad 3675 extern unsigned long signal_pid;
e5379b03 3676
95954743
PA
3677 if (!ptid_equal (cont_thread, null_ptid)
3678 && !ptid_equal (cont_thread, minus_one_ptid))
e5379b03 3679 {
54a0b537 3680 struct lwp_info *lwp;
bd99dc85 3681 int lwpid;
e5379b03 3682
54a0b537 3683 lwp = get_thread_lwp (current_inferior);
bd99dc85
PA
3684 lwpid = lwpid_of (lwp);
3685 kill_lwp (lwpid, SIGINT);
e5379b03
DJ
3686 }
3687 else
ef57601b 3688 kill_lwp (signal_pid, SIGINT);
e5379b03
DJ
3689}
3690
aa691b87
RM
3691/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
3692 to debugger memory starting at MYADDR. */
3693
3694static int
f450004a 3695linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
3696{
3697 char filename[PATH_MAX];
3698 int fd, n;
95954743 3699 int pid = lwpid_of (get_thread_lwp (current_inferior));
aa691b87 3700
95954743 3701 snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
3702
3703 fd = open (filename, O_RDONLY);
3704 if (fd < 0)
3705 return -1;
3706
3707 if (offset != (CORE_ADDR) 0
3708 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
3709 n = -1;
3710 else
3711 n = read (fd, myaddr, len);
3712
3713 close (fd);
3714
3715 return n;
3716}
3717
d993e290
PA
3718/* These breakpoint and watchpoint related wrapper functions simply
3719 pass on the function call if the target has registered a
3720 corresponding function. */
e013ee27
OF
3721
3722static int
d993e290 3723linux_insert_point (char type, CORE_ADDR addr, int len)
e013ee27 3724{
d993e290
PA
3725 if (the_low_target.insert_point != NULL)
3726 return the_low_target.insert_point (type, addr, len);
e013ee27
OF
3727 else
3728 /* Unsupported (see target.h). */
3729 return 1;
3730}
3731
3732static int
d993e290 3733linux_remove_point (char type, CORE_ADDR addr, int len)
e013ee27 3734{
d993e290
PA
3735 if (the_low_target.remove_point != NULL)
3736 return the_low_target.remove_point (type, addr, len);
e013ee27
OF
3737 else
3738 /* Unsupported (see target.h). */
3739 return 1;
3740}
3741
3742static int
3743linux_stopped_by_watchpoint (void)
3744{
c3adc08c
PA
3745 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3746
3747 return lwp->stopped_by_watchpoint;
e013ee27
OF
3748}
3749
3750static CORE_ADDR
3751linux_stopped_data_address (void)
3752{
c3adc08c
PA
3753 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3754
3755 return lwp->stopped_data_address;
e013ee27
OF
3756}
3757
42c81e2a 3758#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
3759#if defined(__mcoldfire__)
3760/* These should really be defined in the kernel's ptrace.h header. */
3761#define PT_TEXT_ADDR 49*4
3762#define PT_DATA_ADDR 50*4
3763#define PT_TEXT_END_ADDR 51*4
3764#endif
3765
3766/* Under uClinux, programs are loaded at non-zero offsets, which we need
3767 to tell gdb about. */
3768
3769static int
3770linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
3771{
3772#if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
3773 unsigned long text, text_end, data;
bd99dc85 3774 int pid = lwpid_of (get_thread_lwp (current_inferior));
52fb6437
NS
3775
3776 errno = 0;
3777
3778 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
3779 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
3780 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
3781
3782 if (errno == 0)
3783 {
3784 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
3785 used by gdb) are relative to the beginning of the program,
3786 with the data segment immediately following the text segment.
3787 However, the actual runtime layout in memory may put the data
3788 somewhere else, so when we send gdb a data base-address, we
3789 use the real data base address and subtract the compile-time
3790 data base-address from it (which is just the length of the
3791 text segment). BSS immediately follows data in both
3792 cases. */
52fb6437
NS
3793 *text_p = text;
3794 *data_p = data - (text_end - text);
1b3f6016 3795
52fb6437
NS
3796 return 1;
3797 }
3798#endif
3799 return 0;
3800}
3801#endif
3802
dc146f7c
VP
3803static int
3804compare_ints (const void *xa, const void *xb)
3805{
3806 int a = *(const int *)xa;
3807 int b = *(const int *)xb;
3808
3809 return a - b;
3810}
3811
3812static int *
3813unique (int *b, int *e)
3814{
3815 int *d = b;
3816 while (++b != e)
3817 if (*d != *b)
3818 *++d = *b;
3819 return ++d;
3820}
3821
3822/* Given PID, iterates over all threads in that process.
3823
3824 Information about each thread, in a format suitable for qXfer:osdata:thread
3825 is printed to BUFFER, if it's not NULL. BUFFER is assumed to be already
3826 initialized, and the caller is responsible for finishing and appending '\0'
3827 to it.
3828
3829 The list of cores that threads are running on is assigned to *CORES, if it
3830 is not NULL. If no cores are found, *CORES will be set to NULL. Caller
3831 should free *CORES. */
3832
3833static void
3834list_threads (int pid, struct buffer *buffer, char **cores)
3835{
3836 int count = 0;
3837 int allocated = 10;
3838 int *core_numbers = xmalloc (sizeof (int) * allocated);
3839 char pathname[128];
3840 DIR *dir;
3841 struct dirent *dp;
3842 struct stat statbuf;
3843
3844 sprintf (pathname, "/proc/%d/task", pid);
3845 if (stat (pathname, &statbuf) == 0 && S_ISDIR (statbuf.st_mode))
3846 {
3847 dir = opendir (pathname);
3848 if (!dir)
3849 {
3850 free (core_numbers);
3851 return;
3852 }
3853
3854 while ((dp = readdir (dir)) != NULL)
3855 {
3856 unsigned long lwp = strtoul (dp->d_name, NULL, 10);
3857
3858 if (lwp != 0)
3859 {
3860 unsigned core = linux_core_of_thread (ptid_build (pid, lwp, 0));
3861
3862 if (core != -1)
3863 {
3864 char s[sizeof ("4294967295")];
3865 sprintf (s, "%u", core);
3866
3867 if (count == allocated)
3868 {
3869 allocated *= 2;
3870 core_numbers = realloc (core_numbers,
3871 sizeof (int) * allocated);
3872 }
3873 core_numbers[count++] = core;
3874 if (buffer)
3875 buffer_xml_printf (buffer,
3876 "<item>"
3877 "<column name=\"pid\">%d</column>"
3878 "<column name=\"tid\">%s</column>"
3879 "<column name=\"core\">%s</column>"
3880 "</item>", pid, dp->d_name, s);
3881 }
3882 else
3883 {
3884 if (buffer)
3885 buffer_xml_printf (buffer,
3886 "<item>"
3887 "<column name=\"pid\">%d</column>"
3888 "<column name=\"tid\">%s</column>"
3889 "</item>", pid, dp->d_name);
3890 }
3891 }
3892 }
3893 }
3894
3895 if (cores)
3896 {
3897 *cores = NULL;
3898 if (count > 0)
3899 {
3900 struct buffer buffer2;
3901 int *b;
3902 int *e;
3903 qsort (core_numbers, count, sizeof (int), compare_ints);
3904
3905 /* Remove duplicates. */
3906 b = core_numbers;
3907 e = unique (b, core_numbers + count);
3908
3909 buffer_init (&buffer2);
3910
3911 for (b = core_numbers; b != e; ++b)
3912 {
3913 char number[sizeof ("4294967295")];
3914 sprintf (number, "%u", *b);
3915 buffer_xml_printf (&buffer2, "%s%s",
3916 (b == core_numbers) ? "" : ",", number);
3917 }
3918 buffer_grow_str0 (&buffer2, "");
3919
3920 *cores = buffer_finish (&buffer2);
3921 }
3922 }
3923 free (core_numbers);
3924}
3925
3926static void
3927show_process (int pid, const char *username, struct buffer *buffer)
3928{
3929 char pathname[128];
3930 FILE *f;
3931 char cmd[MAXPATHLEN + 1];
3932
3933 sprintf (pathname, "/proc/%d/cmdline", pid);
3934
3935 if ((f = fopen (pathname, "r")) != NULL)
3936 {
3937 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
3938 if (len > 0)
3939 {
3940 char *cores = 0;
3941 int i;
3942 for (i = 0; i < len; i++)
3943 if (cmd[i] == '\0')
3944 cmd[i] = ' ';
3945 cmd[len] = '\0';
3946
3947 buffer_xml_printf (buffer,
3948 "<item>"
3949 "<column name=\"pid\">%d</column>"
3950 "<column name=\"user\">%s</column>"
3951 "<column name=\"command\">%s</column>",
3952 pid,
3953 username,
3954 cmd);
3955
3956 /* This only collects core numbers, and does not print threads. */
3957 list_threads (pid, NULL, &cores);
3958
3959 if (cores)
3960 {
3961 buffer_xml_printf (buffer,
3962 "<column name=\"cores\">%s</column>", cores);
3963 free (cores);
3964 }
3965
3966 buffer_xml_printf (buffer, "</item>");
3967 }
3968 fclose (f);
3969 }
3970}
3971
07e059b5
VP
3972static int
3973linux_qxfer_osdata (const char *annex,
1b3f6016
PA
3974 unsigned char *readbuf, unsigned const char *writebuf,
3975 CORE_ADDR offset, int len)
07e059b5
VP
3976{
3977 /* We make the process list snapshot when the object starts to be
3978 read. */
3979 static const char *buf;
3980 static long len_avail = -1;
3981 static struct buffer buffer;
dc146f7c
VP
3982 int processes = 0;
3983 int threads = 0;
07e059b5
VP
3984
3985 DIR *dirp;
3986
dc146f7c
VP
3987 if (strcmp (annex, "processes") == 0)
3988 processes = 1;
3989 else if (strcmp (annex, "threads") == 0)
3990 threads = 1;
3991 else
07e059b5
VP
3992 return 0;
3993
3994 if (!readbuf || writebuf)
3995 return 0;
3996
3997 if (offset == 0)
3998 {
3999 if (len_avail != -1 && len_avail != 0)
4000 buffer_free (&buffer);
4001 len_avail = 0;
4002 buf = NULL;
4003 buffer_init (&buffer);
dc146f7c
VP
4004 if (processes)
4005 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
4006 else if (threads)
4007 buffer_grow_str (&buffer, "<osdata type=\"threads\">");
07e059b5
VP
4008
4009 dirp = opendir ("/proc");
4010 if (dirp)
4011 {
1b3f6016
PA
4012 struct dirent *dp;
4013 while ((dp = readdir (dirp)) != NULL)
4014 {
4015 struct stat statbuf;
4016 char procentry[sizeof ("/proc/4294967295")];
4017
4018 if (!isdigit (dp->d_name[0])
4019 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
4020 continue;
4021
4022 sprintf (procentry, "/proc/%s", dp->d_name);
4023 if (stat (procentry, &statbuf) == 0
4024 && S_ISDIR (statbuf.st_mode))
4025 {
dc146f7c 4026 int pid = (int) strtoul (dp->d_name, NULL, 10);
1b3f6016 4027
dc146f7c 4028 if (processes)
1b3f6016 4029 {
dc146f7c
VP
4030 struct passwd *entry = getpwuid (statbuf.st_uid);
4031 show_process (pid, entry ? entry->pw_name : "?", &buffer);
4032 }
4033 else if (threads)
4034 {
4035 list_threads (pid, &buffer, NULL);
1b3f6016
PA
4036 }
4037 }
4038 }
07e059b5 4039
1b3f6016 4040 closedir (dirp);
07e059b5
VP
4041 }
4042 buffer_grow_str0 (&buffer, "</osdata>\n");
4043 buf = buffer_finish (&buffer);
4044 len_avail = strlen (buf);
4045 }
4046
4047 if (offset >= len_avail)
4048 {
4049 /* Done. Get rid of the data. */
4050 buffer_free (&buffer);
4051 buf = NULL;
4052 len_avail = 0;
4053 return 0;
4054 }
4055
4056 if (len > len_avail - offset)
4057 len = len_avail - offset;
4058 memcpy (readbuf, buf + offset, len);
4059
4060 return len;
4061}
4062
d0722149
DE
4063/* Convert a native/host siginfo object, into/from the siginfo in the
4064 layout of the inferiors' architecture. */
4065
4066static void
4067siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
4068{
4069 int done = 0;
4070
4071 if (the_low_target.siginfo_fixup != NULL)
4072 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4073
4074 /* If there was no callback, or the callback didn't do anything,
4075 then just do a straight memcpy. */
4076 if (!done)
4077 {
4078 if (direction == 1)
4079 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4080 else
4081 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4082 }
4083}
4084
4aa995e1
PA
4085static int
4086linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4087 unsigned const char *writebuf, CORE_ADDR offset, int len)
4088{
d0722149 4089 int pid;
4aa995e1 4090 struct siginfo siginfo;
d0722149 4091 char inf_siginfo[sizeof (struct siginfo)];
4aa995e1
PA
4092
4093 if (current_inferior == NULL)
4094 return -1;
4095
bd99dc85 4096 pid = lwpid_of (get_thread_lwp (current_inferior));
4aa995e1
PA
4097
4098 if (debug_threads)
d0722149 4099 fprintf (stderr, "%s siginfo for lwp %d.\n",
4aa995e1
PA
4100 readbuf != NULL ? "Reading" : "Writing",
4101 pid);
4102
4103 if (offset > sizeof (siginfo))
4104 return -1;
4105
4106 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4107 return -1;
4108
d0722149
DE
4109 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4110 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4111 inferior with a 64-bit GDBSERVER should look the same as debugging it
4112 with a 32-bit GDBSERVER, we need to convert it. */
4113 siginfo_fixup (&siginfo, inf_siginfo, 0);
4114
4aa995e1
PA
4115 if (offset + len > sizeof (siginfo))
4116 len = sizeof (siginfo) - offset;
4117
4118 if (readbuf != NULL)
d0722149 4119 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
4120 else
4121 {
d0722149
DE
4122 memcpy (inf_siginfo + offset, writebuf, len);
4123
4124 /* Convert back to ptrace layout before flushing it out. */
4125 siginfo_fixup (&siginfo, inf_siginfo, 1);
4126
4aa995e1
PA
4127 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4128 return -1;
4129 }
4130
4131 return len;
4132}
4133
bd99dc85
PA
4134/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4135 so we notice when children change state; as the handler for the
4136 sigsuspend in my_waitpid. */
4137
4138static void
4139sigchld_handler (int signo)
4140{
4141 int old_errno = errno;
4142
4143 if (debug_threads)
4144 /* fprintf is not async-signal-safe, so call write directly. */
4145 write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
4146
4147 if (target_is_async_p ())
4148 async_file_mark (); /* trigger a linux_wait */
4149
4150 errno = old_errno;
4151}
4152
4153static int
4154linux_supports_non_stop (void)
4155{
4156 return 1;
4157}
4158
4159static int
4160linux_async (int enable)
4161{
4162 int previous = (linux_event_pipe[0] != -1);
4163
8336d594
PA
4164 if (debug_threads)
4165 fprintf (stderr, "linux_async (%d), previous=%d\n",
4166 enable, previous);
4167
bd99dc85
PA
4168 if (previous != enable)
4169 {
4170 sigset_t mask;
4171 sigemptyset (&mask);
4172 sigaddset (&mask, SIGCHLD);
4173
4174 sigprocmask (SIG_BLOCK, &mask, NULL);
4175
4176 if (enable)
4177 {
4178 if (pipe (linux_event_pipe) == -1)
4179 fatal ("creating event pipe failed.");
4180
4181 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4182 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4183
4184 /* Register the event loop handler. */
4185 add_file_handler (linux_event_pipe[0],
4186 handle_target_event, NULL);
4187
4188 /* Always trigger a linux_wait. */
4189 async_file_mark ();
4190 }
4191 else
4192 {
4193 delete_file_handler (linux_event_pipe[0]);
4194
4195 close (linux_event_pipe[0]);
4196 close (linux_event_pipe[1]);
4197 linux_event_pipe[0] = -1;
4198 linux_event_pipe[1] = -1;
4199 }
4200
4201 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4202 }
4203
4204 return previous;
4205}
4206
4207static int
4208linux_start_non_stop (int nonstop)
4209{
4210 /* Register or unregister from event-loop accordingly. */
4211 linux_async (nonstop);
4212 return 0;
4213}
4214
cf8fd78b
PA
4215static int
4216linux_supports_multi_process (void)
4217{
4218 return 1;
4219}
4220
efcbbd14
UW
4221
4222/* Enumerate spufs IDs for process PID. */
4223static int
4224spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4225{
4226 int pos = 0;
4227 int written = 0;
4228 char path[128];
4229 DIR *dir;
4230 struct dirent *entry;
4231
4232 sprintf (path, "/proc/%ld/fd", pid);
4233 dir = opendir (path);
4234 if (!dir)
4235 return -1;
4236
4237 rewinddir (dir);
4238 while ((entry = readdir (dir)) != NULL)
4239 {
4240 struct stat st;
4241 struct statfs stfs;
4242 int fd;
4243
4244 fd = atoi (entry->d_name);
4245 if (!fd)
4246 continue;
4247
4248 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4249 if (stat (path, &st) != 0)
4250 continue;
4251 if (!S_ISDIR (st.st_mode))
4252 continue;
4253
4254 if (statfs (path, &stfs) != 0)
4255 continue;
4256 if (stfs.f_type != SPUFS_MAGIC)
4257 continue;
4258
4259 if (pos >= offset && pos + 4 <= offset + len)
4260 {
4261 *(unsigned int *)(buf + pos - offset) = fd;
4262 written += 4;
4263 }
4264 pos += 4;
4265 }
4266
4267 closedir (dir);
4268 return written;
4269}
4270
4271/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4272 object type, using the /proc file system. */
4273static int
4274linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4275 unsigned const char *writebuf,
4276 CORE_ADDR offset, int len)
4277{
4278 long pid = lwpid_of (get_thread_lwp (current_inferior));
4279 char buf[128];
4280 int fd = 0;
4281 int ret = 0;
4282
4283 if (!writebuf && !readbuf)
4284 return -1;
4285
4286 if (!*annex)
4287 {
4288 if (!readbuf)
4289 return -1;
4290 else
4291 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4292 }
4293
4294 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4295 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4296 if (fd <= 0)
4297 return -1;
4298
4299 if (offset != 0
4300 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4301 {
4302 close (fd);
4303 return 0;
4304 }
4305
4306 if (writebuf)
4307 ret = write (fd, writebuf, (size_t) len);
4308 else
4309 ret = read (fd, readbuf, (size_t) len);
4310
4311 close (fd);
4312 return ret;
4313}
4314
dc146f7c
VP
4315static int
4316linux_core_of_thread (ptid_t ptid)
4317{
4318 char filename[sizeof ("/proc//task//stat")
4319 + 2 * 20 /* decimal digits for 2 numbers, max 2^64 bit each */
4320 + 1];
4321 FILE *f;
4322 char *content = NULL;
4323 char *p;
4324 char *ts = 0;
4325 int content_read = 0;
4326 int i;
4327 int core;
4328
4329 sprintf (filename, "/proc/%d/task/%ld/stat",
4330 ptid_get_pid (ptid), ptid_get_lwp (ptid));
4331 f = fopen (filename, "r");
4332 if (!f)
4333 return -1;
4334
4335 for (;;)
4336 {
4337 int n;
4338 content = realloc (content, content_read + 1024);
4339 n = fread (content + content_read, 1, 1024, f);
4340 content_read += n;
4341 if (n < 1024)
4342 {
4343 content[content_read] = '\0';
4344 break;
4345 }
4346 }
4347
4348 p = strchr (content, '(');
dc146f7c 4349
ca2a87a0
JK
4350 /* Skip ")". */
4351 if (p != NULL)
4352 p = strchr (p, ')');
4353 if (p != NULL)
4354 p++;
4355
4356 /* If the first field after program name has index 0, then core number is
4357 the field with index 36. There's no constant for that anywhere. */
4358 if (p != NULL)
4359 p = strtok_r (p, " ", &ts);
4360 for (i = 0; p != NULL && i != 36; ++i)
dc146f7c
VP
4361 p = strtok_r (NULL, " ", &ts);
4362
ca2a87a0 4363 if (p == NULL || sscanf (p, "%d", &core) == 0)
dc146f7c
VP
4364 core = -1;
4365
4366 free (content);
4367 fclose (f);
4368
4369 return core;
4370}
4371
1570b33e
L
4372static void
4373linux_process_qsupported (const char *query)
4374{
4375 if (the_low_target.process_qsupported != NULL)
4376 the_low_target.process_qsupported (query);
4377}
4378
219f2f23
PA
4379static int
4380linux_supports_tracepoints (void)
4381{
4382 if (*the_low_target.supports_tracepoints == NULL)
4383 return 0;
4384
4385 return (*the_low_target.supports_tracepoints) ();
4386}
4387
4388static CORE_ADDR
4389linux_read_pc (struct regcache *regcache)
4390{
4391 if (the_low_target.get_pc == NULL)
4392 return 0;
4393
4394 return (*the_low_target.get_pc) (regcache);
4395}
4396
4397static void
4398linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
4399{
4400 gdb_assert (the_low_target.set_pc != NULL);
4401
4402 (*the_low_target.set_pc) (regcache, pc);
4403}
4404
8336d594
PA
4405static int
4406linux_thread_stopped (struct thread_info *thread)
4407{
4408 return get_thread_lwp (thread)->stopped;
4409}
4410
4411/* This exposes stop-all-threads functionality to other modules. */
4412
4413static void
7984d532 4414linux_pause_all (int freeze)
8336d594 4415{
7984d532
PA
4416 stop_all_lwps (freeze, NULL);
4417}
4418
4419/* This exposes unstop-all-threads functionality to other gdbserver
4420 modules. */
4421
4422static void
4423linux_unpause_all (int unfreeze)
4424{
4425 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
4426}
4427
ce3a066d
DJ
4428static struct target_ops linux_target_ops = {
4429 linux_create_inferior,
4430 linux_attach,
4431 linux_kill,
6ad8ae5c 4432 linux_detach,
8336d594 4433 linux_mourn,
444d6139 4434 linux_join,
ce3a066d
DJ
4435 linux_thread_alive,
4436 linux_resume,
4437 linux_wait,
4438 linux_fetch_registers,
4439 linux_store_registers,
4440 linux_read_memory,
4441 linux_write_memory,
2f2893d9 4442 linux_look_up_symbols,
ef57601b 4443 linux_request_interrupt,
aa691b87 4444 linux_read_auxv,
d993e290
PA
4445 linux_insert_point,
4446 linux_remove_point,
e013ee27
OF
4447 linux_stopped_by_watchpoint,
4448 linux_stopped_data_address,
42c81e2a 4449#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437 4450 linux_read_offsets,
dae5f5cf
DJ
4451#else
4452 NULL,
4453#endif
4454#ifdef USE_THREAD_DB
4455 thread_db_get_tls_address,
4456#else
4457 NULL,
52fb6437 4458#endif
efcbbd14 4459 linux_qxfer_spu,
59a016f0 4460 hostio_last_error_from_errno,
07e059b5 4461 linux_qxfer_osdata,
4aa995e1 4462 linux_xfer_siginfo,
bd99dc85
PA
4463 linux_supports_non_stop,
4464 linux_async,
4465 linux_start_non_stop,
cdbfd419
PP
4466 linux_supports_multi_process,
4467#ifdef USE_THREAD_DB
dc146f7c 4468 thread_db_handle_monitor_command,
cdbfd419 4469#else
dc146f7c 4470 NULL,
cdbfd419 4471#endif
1570b33e 4472 linux_core_of_thread,
219f2f23
PA
4473 linux_process_qsupported,
4474 linux_supports_tracepoints,
4475 linux_read_pc,
8336d594
PA
4476 linux_write_pc,
4477 linux_thread_stopped,
7984d532 4478 NULL,
711e434b 4479 linux_pause_all,
7984d532
PA
4480 linux_unpause_all,
4481 linux_cancel_breakpoints
ce3a066d
DJ
4482};
4483
0d62e5e8
DJ
4484static void
4485linux_init_signals ()
4486{
4487 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
4488 to find what the cancel signal actually is. */
60c3d7b0 4489#ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 4490 signal (__SIGRTMIN+1, SIG_IGN);
60c3d7b0 4491#endif
0d62e5e8
DJ
4492}
4493
da6d8c04
DJ
4494void
4495initialize_low (void)
4496{
bd99dc85
PA
4497 struct sigaction sigchld_action;
4498 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 4499 set_target_ops (&linux_target_ops);
611cb4a5
DJ
4500 set_breakpoint_data (the_low_target.breakpoint,
4501 the_low_target.breakpoint_len);
0d62e5e8 4502 linux_init_signals ();
24a09b5f 4503 linux_test_for_tracefork ();
52fa2412
UW
4504#ifdef HAVE_LINUX_REGSETS
4505 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
4506 ;
bca929d3 4507 disabled_regsets = xmalloc (num_regsets);
52fa2412 4508#endif
bd99dc85
PA
4509
4510 sigchld_action.sa_handler = sigchld_handler;
4511 sigemptyset (&sigchld_action.sa_mask);
4512 sigchld_action.sa_flags = SA_RESTART;
4513 sigaction (SIGCHLD, &sigchld_action, NULL);
da6d8c04 4514}