]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/gdbserver/linux-low.c
*** empty log message ***
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
545587ee 2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
4c38e0a4 3 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
da6d8c04
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
19
20#include "server.h"
58caa3dc 21#include "linux-low.h"
da6d8c04 22
58caa3dc 23#include <sys/wait.h>
da6d8c04
DJ
24#include <stdio.h>
25#include <sys/param.h>
da6d8c04 26#include <sys/ptrace.h>
da6d8c04
DJ
27#include <signal.h>
28#include <sys/ioctl.h>
29#include <fcntl.h>
d07c63e7 30#include <string.h>
0a30fbc4
DJ
31#include <stdlib.h>
32#include <unistd.h>
fa6a77dc 33#include <errno.h>
fd500816 34#include <sys/syscall.h>
f9387fc3 35#include <sched.h>
07e059b5
VP
36#include <ctype.h>
37#include <pwd.h>
38#include <sys/types.h>
39#include <dirent.h>
efcbbd14
UW
40#include <sys/stat.h>
41#include <sys/vfs.h>
957f3f49
DE
42#ifndef ELFMAG0
43/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
44 then ELFMAG0 will have been defined. If it didn't get included by
45 gdb_proc_service.h then including it will likely introduce a duplicate
46 definition of elf_fpregset_t. */
47#include <elf.h>
48#endif
efcbbd14
UW
49
50#ifndef SPUFS_MAGIC
51#define SPUFS_MAGIC 0x23c9b64e
52#endif
da6d8c04 53
32ca6d61
DJ
54#ifndef PTRACE_GETSIGINFO
55# define PTRACE_GETSIGINFO 0x4202
56# define PTRACE_SETSIGINFO 0x4203
57#endif
58
fd462a61
DJ
59#ifndef O_LARGEFILE
60#define O_LARGEFILE 0
61#endif
62
24a09b5f
DJ
63/* If the system headers did not provide the constants, hard-code the normal
64 values. */
65#ifndef PTRACE_EVENT_FORK
66
67#define PTRACE_SETOPTIONS 0x4200
68#define PTRACE_GETEVENTMSG 0x4201
69
70/* options set using PTRACE_SETOPTIONS */
71#define PTRACE_O_TRACESYSGOOD 0x00000001
72#define PTRACE_O_TRACEFORK 0x00000002
73#define PTRACE_O_TRACEVFORK 0x00000004
74#define PTRACE_O_TRACECLONE 0x00000008
75#define PTRACE_O_TRACEEXEC 0x00000010
76#define PTRACE_O_TRACEVFORKDONE 0x00000020
77#define PTRACE_O_TRACEEXIT 0x00000040
78
79/* Wait extended result codes for the above trace options. */
80#define PTRACE_EVENT_FORK 1
81#define PTRACE_EVENT_VFORK 2
82#define PTRACE_EVENT_CLONE 3
83#define PTRACE_EVENT_EXEC 4
84#define PTRACE_EVENT_VFORK_DONE 5
85#define PTRACE_EVENT_EXIT 6
86
87#endif /* PTRACE_EVENT_FORK */
88
89/* We can't always assume that this flag is available, but all systems
90 with the ptrace event handlers also have __WALL, so it's safe to use
91 in some contexts. */
92#ifndef __WALL
93#define __WALL 0x40000000 /* Wait for any child. */
94#endif
95
ec8ebe72
DE
96#ifndef W_STOPCODE
97#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
98#endif
99
42c81e2a
DJ
100#ifdef __UCLIBC__
101#if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
102#define HAS_NOMMU
103#endif
104#endif
105
24a09b5f
DJ
106/* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
107 representation of the thread ID.
611cb4a5 108
54a0b537 109 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
95954743
PA
110 the same as the LWP ID.
111
112 ``all_processes'' is keyed by the "overall process ID", which
113 GNU/Linux calls tgid, "thread group ID". */
0d62e5e8 114
54a0b537 115struct inferior_list all_lwps;
0d62e5e8 116
24a09b5f
DJ
117/* A list of all unknown processes which receive stop signals. Some other
118 process will presumably claim each of these as forked children
119 momentarily. */
120
121struct inferior_list stopped_pids;
122
0d62e5e8
DJ
123/* FIXME this is a bit of a hack, and could be removed. */
124int stopping_threads;
125
126/* FIXME make into a target method? */
24a09b5f 127int using_threads = 1;
24a09b5f 128
95954743
PA
129/* This flag is true iff we've just created or attached to our first
130 inferior but it has not stopped yet. As soon as it does, we need
131 to call the low target's arch_setup callback. Doing this only on
132 the first inferior avoids reinializing the architecture on every
133 inferior, and avoids messing with the register caches of the
134 already running inferiors. NOTE: this assumes all inferiors under
135 control of gdbserver have the same architecture. */
d61ddec4
UW
136static int new_inferior;
137
2acc282a 138static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 139 int step, int signal, siginfo_t *info);
2bd7c093 140static void linux_resume (struct thread_resume *resume_info, size_t n);
54a0b537 141static void stop_all_lwps (void);
95954743 142static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
95954743 143static void *add_lwp (ptid_t ptid);
c35fafde 144static int linux_stopped_by_watchpoint (void);
95954743 145static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
dc146f7c 146static int linux_core_of_thread (ptid_t ptid);
d50171e4
PA
147static void proceed_all_lwps (void);
148static void unstop_all_lwps (struct lwp_info *except);
d50171e4
PA
149static int finish_step_over (struct lwp_info *lwp);
150static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
151static int kill_lwp (unsigned long lwpid, int signo);
152
153/* True if the low target can hardware single-step. Such targets
154 don't need a BREAKPOINT_REINSERT_ADDR callback. */
155
156static int
157can_hardware_single_step (void)
158{
159 return (the_low_target.breakpoint_reinsert_addr == NULL);
160}
161
162/* True if the low target supports memory breakpoints. If so, we'll
163 have a GET_PC implementation. */
164
165static int
166supports_breakpoints (void)
167{
168 return (the_low_target.get_pc != NULL);
169}
0d62e5e8
DJ
170
171struct pending_signals
172{
173 int signal;
32ca6d61 174 siginfo_t info;
0d62e5e8
DJ
175 struct pending_signals *prev;
176};
611cb4a5 177
14ce3065
DE
178#define PTRACE_ARG3_TYPE void *
179#define PTRACE_ARG4_TYPE void *
c6ecbae5 180#define PTRACE_XFER_TYPE long
da6d8c04 181
58caa3dc 182#ifdef HAVE_LINUX_REGSETS
52fa2412
UW
183static char *disabled_regsets;
184static int num_regsets;
58caa3dc
DJ
185#endif
186
bd99dc85
PA
187/* The read/write ends of the pipe registered as waitable file in the
188 event loop. */
189static int linux_event_pipe[2] = { -1, -1 };
190
191/* True if we're currently in async mode. */
192#define target_is_async_p() (linux_event_pipe[0] != -1)
193
194static void send_sigstop (struct inferior_list_entry *entry);
195static void wait_for_sigstop (struct inferior_list_entry *entry);
196
d0722149
DE
197/* Accepts an integer PID; Returns a string representing a file that
198 can be opened to get info for the child process.
199 Space for the result is malloc'd, caller must free. */
200
201char *
202linux_child_pid_to_exec_file (int pid)
203{
204 char *name1, *name2;
205
206 name1 = xmalloc (MAXPATHLEN);
207 name2 = xmalloc (MAXPATHLEN);
208 memset (name2, 0, MAXPATHLEN);
209
210 sprintf (name1, "/proc/%d/exe", pid);
211 if (readlink (name1, name2, MAXPATHLEN) > 0)
212 {
213 free (name1);
214 return name2;
215 }
216 else
217 {
218 free (name2);
219 return name1;
220 }
221}
222
223/* Return non-zero if HEADER is a 64-bit ELF file. */
224
225static int
957f3f49 226elf_64_header_p (const Elf64_Ehdr *header)
d0722149
DE
227{
228 return (header->e_ident[EI_MAG0] == ELFMAG0
229 && header->e_ident[EI_MAG1] == ELFMAG1
230 && header->e_ident[EI_MAG2] == ELFMAG2
231 && header->e_ident[EI_MAG3] == ELFMAG3
232 && header->e_ident[EI_CLASS] == ELFCLASS64);
233}
234
235/* Return non-zero if FILE is a 64-bit ELF file,
236 zero if the file is not a 64-bit ELF file,
237 and -1 if the file is not accessible or doesn't exist. */
238
239int
240elf_64_file_p (const char *file)
241{
957f3f49 242 Elf64_Ehdr header;
d0722149
DE
243 int fd;
244
245 fd = open (file, O_RDONLY);
246 if (fd < 0)
247 return -1;
248
249 if (read (fd, &header, sizeof (header)) != sizeof (header))
250 {
251 close (fd);
252 return 0;
253 }
254 close (fd);
255
256 return elf_64_header_p (&header);
257}
258
bd99dc85
PA
259static void
260delete_lwp (struct lwp_info *lwp)
261{
262 remove_thread (get_lwp_thread (lwp));
263 remove_inferior (&all_lwps, &lwp->head);
aa5ca48f 264 free (lwp->arch_private);
bd99dc85
PA
265 free (lwp);
266}
267
95954743
PA
268/* Add a process to the common process list, and set its private
269 data. */
270
271static struct process_info *
272linux_add_process (int pid, int attached)
273{
274 struct process_info *proc;
275
276 /* Is this the first process? If so, then set the arch. */
277 if (all_processes.head == NULL)
278 new_inferior = 1;
279
280 proc = add_process (pid, attached);
281 proc->private = xcalloc (1, sizeof (*proc->private));
282
aa5ca48f
DE
283 if (the_low_target.new_process != NULL)
284 proc->private->arch_private = the_low_target.new_process ();
285
95954743
PA
286 return proc;
287}
288
5091eb23
DE
289/* Remove a process from the common process list,
290 also freeing all private data. */
291
292static void
ca5c370d 293linux_remove_process (struct process_info *process)
5091eb23 294{
cdbfd419
PP
295 struct process_info_private *priv = process->private;
296
cdbfd419
PP
297 free (priv->arch_private);
298 free (priv);
5091eb23
DE
299 remove_process (process);
300}
301
07d4f67e
DE
302/* Wrapper function for waitpid which handles EINTR, and emulates
303 __WALL for systems where that is not available. */
304
305static int
306my_waitpid (int pid, int *status, int flags)
307{
308 int ret, out_errno;
309
310 if (debug_threads)
311 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
312
313 if (flags & __WALL)
314 {
315 sigset_t block_mask, org_mask, wake_mask;
316 int wnohang;
317
318 wnohang = (flags & WNOHANG) != 0;
319 flags &= ~(__WALL | __WCLONE);
320 flags |= WNOHANG;
321
322 /* Block all signals while here. This avoids knowing about
323 LinuxThread's signals. */
324 sigfillset (&block_mask);
325 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
326
327 /* ... except during the sigsuspend below. */
328 sigemptyset (&wake_mask);
329
330 while (1)
331 {
332 /* Since all signals are blocked, there's no need to check
333 for EINTR here. */
334 ret = waitpid (pid, status, flags);
335 out_errno = errno;
336
337 if (ret == -1 && out_errno != ECHILD)
338 break;
339 else if (ret > 0)
340 break;
341
342 if (flags & __WCLONE)
343 {
344 /* We've tried both flavors now. If WNOHANG is set,
345 there's nothing else to do, just bail out. */
346 if (wnohang)
347 break;
348
349 if (debug_threads)
350 fprintf (stderr, "blocking\n");
351
352 /* Block waiting for signals. */
353 sigsuspend (&wake_mask);
354 }
355
356 flags ^= __WCLONE;
357 }
358
359 sigprocmask (SIG_SETMASK, &org_mask, NULL);
360 }
361 else
362 {
363 do
364 ret = waitpid (pid, status, flags);
365 while (ret == -1 && errno == EINTR);
366 out_errno = errno;
367 }
368
369 if (debug_threads)
370 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
371 pid, flags, status ? *status : -1, ret);
372
373 errno = out_errno;
374 return ret;
375}
376
bd99dc85
PA
377/* Handle a GNU/Linux extended wait response. If we see a clone
378 event, we need to add the new LWP to our list (and not report the
379 trap to higher layers). */
0d62e5e8 380
24a09b5f 381static void
54a0b537 382handle_extended_wait (struct lwp_info *event_child, int wstat)
24a09b5f
DJ
383{
384 int event = wstat >> 16;
54a0b537 385 struct lwp_info *new_lwp;
24a09b5f
DJ
386
387 if (event == PTRACE_EVENT_CLONE)
388 {
95954743 389 ptid_t ptid;
24a09b5f 390 unsigned long new_pid;
836acd6d 391 int ret, status = W_STOPCODE (SIGSTOP);
24a09b5f 392
bd99dc85 393 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
24a09b5f
DJ
394
395 /* If we haven't already seen the new PID stop, wait for it now. */
396 if (! pull_pid_from_list (&stopped_pids, new_pid))
397 {
398 /* The new child has a pending SIGSTOP. We can't affect it until it
399 hits the SIGSTOP, but we're already attached. */
400
97438e3f 401 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
402
403 if (ret == -1)
404 perror_with_name ("waiting for new child");
405 else if (ret != new_pid)
406 warning ("wait returned unexpected PID %d", ret);
da5898ce 407 else if (!WIFSTOPPED (status))
24a09b5f
DJ
408 warning ("wait returned unexpected status 0x%x", status);
409 }
410
14ce3065 411 ptrace (PTRACE_SETOPTIONS, new_pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
24a09b5f 412
95954743
PA
413 ptid = ptid_build (pid_of (event_child), new_pid, 0);
414 new_lwp = (struct lwp_info *) add_lwp (ptid);
415 add_thread (ptid, new_lwp);
24a09b5f 416
e27d73f6
DE
417 /* Either we're going to immediately resume the new thread
418 or leave it stopped. linux_resume_one_lwp is a nop if it
419 thinks the thread is currently running, so set this first
420 before calling linux_resume_one_lwp. */
421 new_lwp->stopped = 1;
422
da5898ce
DJ
423 /* Normally we will get the pending SIGSTOP. But in some cases
424 we might get another signal delivered to the group first.
f21cc1a2 425 If we do get another signal, be sure not to lose it. */
da5898ce
DJ
426 if (WSTOPSIG (status) == SIGSTOP)
427 {
d50171e4
PA
428 if (stopping_threads)
429 new_lwp->stop_pc = get_stop_pc (new_lwp);
430 else
e27d73f6 431 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
da5898ce 432 }
24a09b5f 433 else
da5898ce 434 {
54a0b537 435 new_lwp->stop_expected = 1;
d50171e4 436
da5898ce
DJ
437 if (stopping_threads)
438 {
d50171e4 439 new_lwp->stop_pc = get_stop_pc (new_lwp);
54a0b537
PA
440 new_lwp->status_pending_p = 1;
441 new_lwp->status_pending = status;
da5898ce
DJ
442 }
443 else
444 /* Pass the signal on. This is what GDB does - except
445 shouldn't we really report it instead? */
e27d73f6 446 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
da5898ce 447 }
24a09b5f
DJ
448
449 /* Always resume the current thread. If we are stopping
450 threads, it will have a pending SIGSTOP; we may as well
451 collect it now. */
2acc282a 452 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
24a09b5f
DJ
453 }
454}
455
d50171e4
PA
456/* Return the PC as read from the regcache of LWP, without any
457 adjustment. */
458
459static CORE_ADDR
460get_pc (struct lwp_info *lwp)
461{
462 struct thread_info *saved_inferior;
463 struct regcache *regcache;
464 CORE_ADDR pc;
465
466 if (the_low_target.get_pc == NULL)
467 return 0;
468
469 saved_inferior = current_inferior;
470 current_inferior = get_lwp_thread (lwp);
471
472 regcache = get_thread_regcache (current_inferior, 1);
473 pc = (*the_low_target.get_pc) (regcache);
474
475 if (debug_threads)
476 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
477
478 current_inferior = saved_inferior;
479 return pc;
480}
481
482/* This function should only be called if LWP got a SIGTRAP.
0d62e5e8
DJ
483 The SIGTRAP could mean several things.
484
485 On i386, where decr_pc_after_break is non-zero:
486 If we were single-stepping this process using PTRACE_SINGLESTEP,
487 we will get only the one SIGTRAP (even if the instruction we
488 stepped over was a breakpoint). The value of $eip will be the
489 next instruction.
490 If we continue the process using PTRACE_CONT, we will get a
491 SIGTRAP when we hit a breakpoint. The value of $eip will be
492 the instruction after the breakpoint (i.e. needs to be
493 decremented). If we report the SIGTRAP to GDB, we must also
494 report the undecremented PC. If we cancel the SIGTRAP, we
495 must resume at the decremented PC.
496
497 (Presumably, not yet tested) On a non-decr_pc_after_break machine
498 with hardware or kernel single-step:
499 If we single-step over a breakpoint instruction, our PC will
500 point at the following instruction. If we continue and hit a
501 breakpoint instruction, our PC will point at the breakpoint
502 instruction. */
503
504static CORE_ADDR
d50171e4 505get_stop_pc (struct lwp_info *lwp)
0d62e5e8 506{
d50171e4
PA
507 CORE_ADDR stop_pc;
508
509 if (the_low_target.get_pc == NULL)
510 return 0;
0d62e5e8 511
d50171e4
PA
512 stop_pc = get_pc (lwp);
513
bdabb078
PA
514 if (WSTOPSIG (lwp->last_status) == SIGTRAP
515 && !lwp->stepping
516 && !lwp->stopped_by_watchpoint
517 && lwp->last_status >> 16 == 0)
47c0c975
DE
518 stop_pc -= the_low_target.decr_pc_after_break;
519
520 if (debug_threads)
521 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
522
523 return stop_pc;
0d62e5e8 524}
ce3a066d 525
0d62e5e8 526static void *
95954743 527add_lwp (ptid_t ptid)
611cb4a5 528{
54a0b537 529 struct lwp_info *lwp;
0d62e5e8 530
54a0b537
PA
531 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
532 memset (lwp, 0, sizeof (*lwp));
0d62e5e8 533
95954743 534 lwp->head.id = ptid;
0d62e5e8 535
d50171e4
PA
536 lwp->last_resume_kind = resume_continue;
537
aa5ca48f
DE
538 if (the_low_target.new_thread != NULL)
539 lwp->arch_private = the_low_target.new_thread ();
540
54a0b537 541 add_inferior_to_list (&all_lwps, &lwp->head);
0d62e5e8 542
54a0b537 543 return lwp;
0d62e5e8 544}
611cb4a5 545
da6d8c04
DJ
546/* Start an inferior process and returns its pid.
547 ALLARGS is a vector of program-name and args. */
548
ce3a066d
DJ
549static int
550linux_create_inferior (char *program, char **allargs)
da6d8c04 551{
a6dbe5df 552 struct lwp_info *new_lwp;
da6d8c04 553 int pid;
95954743 554 ptid_t ptid;
da6d8c04 555
42c81e2a 556#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
557 pid = vfork ();
558#else
da6d8c04 559 pid = fork ();
52fb6437 560#endif
da6d8c04
DJ
561 if (pid < 0)
562 perror_with_name ("fork");
563
564 if (pid == 0)
565 {
566 ptrace (PTRACE_TRACEME, 0, 0, 0);
567
60c3d7b0 568#ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 569 signal (__SIGRTMIN + 1, SIG_DFL);
60c3d7b0 570#endif
0d62e5e8 571
a9fa9f7d
DJ
572 setpgid (0, 0);
573
2b876972
DJ
574 execv (program, allargs);
575 if (errno == ENOENT)
576 execvp (program, allargs);
da6d8c04
DJ
577
578 fprintf (stderr, "Cannot exec %s: %s.\n", program,
d07c63e7 579 strerror (errno));
da6d8c04
DJ
580 fflush (stderr);
581 _exit (0177);
582 }
583
95954743
PA
584 linux_add_process (pid, 0);
585
586 ptid = ptid_build (pid, pid, 0);
587 new_lwp = add_lwp (ptid);
588 add_thread (ptid, new_lwp);
a6dbe5df 589 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 590
a9fa9f7d 591 return pid;
da6d8c04
DJ
592}
593
594/* Attach to an inferior process. */
595
95954743
PA
596static void
597linux_attach_lwp_1 (unsigned long lwpid, int initial)
da6d8c04 598{
95954743 599 ptid_t ptid;
54a0b537 600 struct lwp_info *new_lwp;
611cb4a5 601
95954743 602 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
da6d8c04 603 {
95954743 604 if (!initial)
2d717e4f
DJ
605 {
606 /* If we fail to attach to an LWP, just warn. */
95954743 607 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
2d717e4f
DJ
608 strerror (errno), errno);
609 fflush (stderr);
610 return;
611 }
612 else
613 /* If we fail to attach to a process, report an error. */
95954743 614 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
43d5792c 615 strerror (errno), errno);
da6d8c04
DJ
616 }
617
95954743
PA
618 if (initial)
619 /* NOTE/FIXME: This lwp might have not been the tgid. */
620 ptid = ptid_build (lwpid, lwpid, 0);
621 else
622 {
623 /* Note that extracting the pid from the current inferior is
624 safe, since we're always called in the context of the same
625 process as this new thread. */
626 int pid = pid_of (get_thread_lwp (current_inferior));
627 ptid = ptid_build (pid, lwpid, 0);
628 }
24a09b5f 629
95954743
PA
630 new_lwp = (struct lwp_info *) add_lwp (ptid);
631 add_thread (ptid, new_lwp);
0d62e5e8 632
a6dbe5df
PA
633 /* We need to wait for SIGSTOP before being able to make the next
634 ptrace call on this LWP. */
635 new_lwp->must_set_ptrace_flags = 1;
636
0d62e5e8 637 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
638 brings it to a halt.
639
640 There are several cases to consider here:
641
642 1) gdbserver has already attached to the process and is being notified
1b3f6016 643 of a new thread that is being created.
d50171e4
PA
644 In this case we should ignore that SIGSTOP and resume the
645 process. This is handled below by setting stop_expected = 1,
646 and the fact that add_lwp sets last_resume_kind ==
647 resume_continue.
0e21c1ec
DE
648
649 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
650 to it via attach_inferior.
651 In this case we want the process thread to stop.
d50171e4
PA
652 This is handled by having linux_attach set last_resume_kind ==
653 resume_stop after we return.
1b3f6016
PA
654 ??? If the process already has several threads we leave the other
655 threads running.
0e21c1ec
DE
656
657 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
658 existing threads.
659 In this case we want the thread to stop.
660 FIXME: This case is currently not properly handled.
661 We should wait for the SIGSTOP but don't. Things work apparently
662 because enough time passes between when we ptrace (ATTACH) and when
663 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
664
665 On the other hand, if we are currently trying to stop all threads, we
666 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 667 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
668 end of the list, and so the new thread has not yet reached
669 wait_for_sigstop (but will). */
d50171e4 670 new_lwp->stop_expected = 1;
0d62e5e8
DJ
671}
672
95954743
PA
673void
674linux_attach_lwp (unsigned long lwpid)
675{
676 linux_attach_lwp_1 (lwpid, 0);
677}
678
0d62e5e8 679int
a1928bad 680linux_attach (unsigned long pid)
0d62e5e8 681{
54a0b537 682 struct lwp_info *lwp;
0d62e5e8 683
95954743
PA
684 linux_attach_lwp_1 (pid, 1);
685
686 linux_add_process (pid, 1);
0d62e5e8 687
bd99dc85
PA
688 if (!non_stop)
689 {
690 /* Don't ignore the initial SIGSTOP if we just attached to this
691 process. It will be collected by wait shortly. */
95954743
PA
692 lwp = (struct lwp_info *) find_inferior_id (&all_lwps,
693 ptid_build (pid, pid, 0));
d50171e4 694 lwp->last_resume_kind = resume_stop;
bd99dc85 695 }
0d62e5e8 696
95954743
PA
697 return 0;
698}
699
700struct counter
701{
702 int pid;
703 int count;
704};
705
706static int
707second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
708{
709 struct counter *counter = args;
710
711 if (ptid_get_pid (entry->id) == counter->pid)
712 {
713 if (++counter->count > 1)
714 return 1;
715 }
d61ddec4 716
da6d8c04
DJ
717 return 0;
718}
719
95954743
PA
720static int
721last_thread_of_process_p (struct thread_info *thread)
722{
723 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
724 int pid = ptid_get_pid (ptid);
725 struct counter counter = { pid , 0 };
da6d8c04 726
95954743
PA
727 return (find_inferior (&all_threads,
728 second_thread_of_pid_p, &counter) == NULL);
729}
730
731/* Kill the inferior lwp. */
732
733static int
734linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
da6d8c04 735{
0d62e5e8 736 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 737 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 738 int wstat;
95954743
PA
739 int pid = * (int *) args;
740
741 if (ptid_get_pid (entry->id) != pid)
742 return 0;
0d62e5e8 743
fd500816
DJ
744 /* We avoid killing the first thread here, because of a Linux kernel (at
745 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
746 the children get a chance to be reaped, it will remain a zombie
747 forever. */
95954743 748
12b42a12 749 if (lwpid_of (lwp) == pid)
95954743
PA
750 {
751 if (debug_threads)
752 fprintf (stderr, "lkop: is last of process %s\n",
753 target_pid_to_str (entry->id));
754 return 0;
755 }
fd500816 756
bd99dc85
PA
757 /* If we're killing a running inferior, make sure it is stopped
758 first, as PTRACE_KILL will not work otherwise. */
759 if (!lwp->stopped)
760 send_sigstop (&lwp->head);
761
0d62e5e8
DJ
762 do
763 {
bd99dc85 764 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
0d62e5e8
DJ
765
766 /* Make sure it died. The loop is most likely unnecessary. */
95954743 767 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
bd99dc85 768 } while (pid > 0 && WIFSTOPPED (wstat));
95954743
PA
769
770 return 0;
da6d8c04
DJ
771}
772
95954743
PA
773static int
774linux_kill (int pid)
0d62e5e8 775{
95954743 776 struct process_info *process;
54a0b537 777 struct lwp_info *lwp;
95954743 778 struct thread_info *thread;
fd500816 779 int wstat;
95954743 780 int lwpid;
fd500816 781
95954743
PA
782 process = find_process_pid (pid);
783 if (process == NULL)
784 return -1;
9d606399 785
95954743 786 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
fd500816 787
54a0b537 788 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 789 thread in the list, so do so now. */
95954743
PA
790 lwp = find_lwp_pid (pid_to_ptid (pid));
791 thread = get_lwp_thread (lwp);
bd99dc85
PA
792
793 if (debug_threads)
95954743
PA
794 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
795 lwpid_of (lwp), pid);
bd99dc85
PA
796
797 /* If we're killing a running inferior, make sure it is stopped
798 first, as PTRACE_KILL will not work otherwise. */
799 if (!lwp->stopped)
800 send_sigstop (&lwp->head);
801
fd500816
DJ
802 do
803 {
bd99dc85 804 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
fd500816
DJ
805
806 /* Make sure it died. The loop is most likely unnecessary. */
95954743
PA
807 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
808 } while (lwpid > 0 && WIFSTOPPED (wstat));
2d717e4f 809
ca5c370d
PA
810#ifdef USE_THREAD_DB
811 thread_db_free (process, 0);
812#endif
bd99dc85 813 delete_lwp (lwp);
ca5c370d 814 linux_remove_process (process);
95954743 815 return 0;
0d62e5e8
DJ
816}
817
95954743
PA
818static int
819linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
6ad8ae5c
DJ
820{
821 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 822 struct lwp_info *lwp = get_thread_lwp (thread);
95954743
PA
823 int pid = * (int *) args;
824
825 if (ptid_get_pid (entry->id) != pid)
826 return 0;
6ad8ae5c 827
bd99dc85
PA
828 /* If we're detaching from a running inferior, make sure it is
829 stopped first, as PTRACE_DETACH will not work otherwise. */
830 if (!lwp->stopped)
831 {
95954743 832 int lwpid = lwpid_of (lwp);
bd99dc85
PA
833
834 stopping_threads = 1;
835 send_sigstop (&lwp->head);
836
837 /* If this detects a new thread through a clone event, the new
838 thread is appended to the end of the lwp list, so we'll
839 eventually detach from it. */
840 wait_for_sigstop (&lwp->head);
841 stopping_threads = 0;
842
843 /* If LWP exits while we're trying to stop it, there's nothing
844 left to do. */
95954743 845 lwp = find_lwp_pid (pid_to_ptid (lwpid));
bd99dc85 846 if (lwp == NULL)
95954743 847 return 0;
bd99dc85
PA
848 }
849
ae13219e
DJ
850 /* If this process is stopped but is expecting a SIGSTOP, then make
851 sure we take care of that now. This isn't absolutely guaranteed
852 to collect the SIGSTOP, but is fairly likely to. */
54a0b537 853 if (lwp->stop_expected)
ae13219e 854 {
bd99dc85 855 int wstat;
ae13219e 856 /* Clear stop_expected, so that the SIGSTOP will be reported. */
54a0b537
PA
857 lwp->stop_expected = 0;
858 if (lwp->stopped)
2acc282a 859 linux_resume_one_lwp (lwp, 0, 0, NULL);
95954743 860 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
ae13219e
DJ
861 }
862
863 /* Flush any pending changes to the process's registers. */
864 regcache_invalidate_one ((struct inferior_list_entry *)
54a0b537 865 get_lwp_thread (lwp));
ae13219e
DJ
866
867 /* Finally, let it resume. */
bd99dc85
PA
868 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
869
870 delete_lwp (lwp);
95954743 871 return 0;
6ad8ae5c
DJ
872}
873
dd6953e1 874static int
95954743 875any_thread_of (struct inferior_list_entry *entry, void *args)
6ad8ae5c 876{
95954743
PA
877 int *pid_p = args;
878
879 if (ptid_get_pid (entry->id) == *pid_p)
880 return 1;
881
882 return 0;
883}
884
885static int
886linux_detach (int pid)
887{
888 struct process_info *process;
889
890 process = find_process_pid (pid);
891 if (process == NULL)
892 return -1;
893
ca5c370d
PA
894#ifdef USE_THREAD_DB
895 thread_db_free (process, 1);
896#endif
897
95954743
PA
898 current_inferior =
899 (struct thread_info *) find_inferior (&all_threads, any_thread_of, &pid);
900
ae13219e 901 delete_all_breakpoints ();
95954743 902 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
ca5c370d 903 linux_remove_process (process);
dd6953e1 904 return 0;
6ad8ae5c
DJ
905}
906
444d6139 907static void
95954743 908linux_join (int pid)
444d6139 909{
444d6139 910 int status, ret;
95954743 911 struct process_info *process;
bd99dc85 912
95954743
PA
913 process = find_process_pid (pid);
914 if (process == NULL)
915 return;
444d6139
PA
916
917 do {
95954743 918 ret = my_waitpid (pid, &status, 0);
444d6139
PA
919 if (WIFEXITED (status) || WIFSIGNALED (status))
920 break;
921 } while (ret != -1 || errno != ECHILD);
922}
923
6ad8ae5c 924/* Return nonzero if the given thread is still alive. */
0d62e5e8 925static int
95954743 926linux_thread_alive (ptid_t ptid)
0d62e5e8 927{
95954743
PA
928 struct lwp_info *lwp = find_lwp_pid (ptid);
929
930 /* We assume we always know if a thread exits. If a whole process
931 exited but we still haven't been able to report it to GDB, we'll
932 hold on to the last lwp of the dead process. */
933 if (lwp != NULL)
934 return !lwp->dead;
0d62e5e8
DJ
935 else
936 return 0;
937}
938
6bf5e0ba 939/* Return 1 if this lwp has an interesting status pending. */
611cb4a5 940static int
d50171e4 941status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
0d62e5e8 942{
54a0b537 943 struct lwp_info *lwp = (struct lwp_info *) entry;
95954743 944 ptid_t ptid = * (ptid_t *) arg;
d50171e4 945 struct thread_info *thread = get_lwp_thread (lwp);
95954743
PA
946
947 /* Check if we're only interested in events from a specific process
948 or its lwps. */
949 if (!ptid_equal (minus_one_ptid, ptid)
950 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
951 return 0;
0d62e5e8 952
d50171e4
PA
953 thread = get_lwp_thread (lwp);
954
955 /* If we got a `vCont;t', but we haven't reported a stop yet, do
956 report any status pending the LWP may have. */
957 if (lwp->last_resume_kind == resume_stop
958 && thread->last_status.kind == TARGET_WAITKIND_STOPPED)
959 return 0;
0d62e5e8 960
d50171e4 961 return lwp->status_pending_p;
0d62e5e8
DJ
962}
963
95954743
PA
964static int
965same_lwp (struct inferior_list_entry *entry, void *data)
966{
967 ptid_t ptid = *(ptid_t *) data;
968 int lwp;
969
970 if (ptid_get_lwp (ptid) != 0)
971 lwp = ptid_get_lwp (ptid);
972 else
973 lwp = ptid_get_pid (ptid);
974
975 if (ptid_get_lwp (entry->id) == lwp)
976 return 1;
977
978 return 0;
979}
980
981struct lwp_info *
982find_lwp_pid (ptid_t ptid)
983{
984 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
985}
986
bd99dc85 987static struct lwp_info *
95954743 988linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
611cb4a5 989{
0d62e5e8 990 int ret;
95954743 991 int to_wait_for = -1;
bd99dc85 992 struct lwp_info *child = NULL;
0d62e5e8 993
bd99dc85 994 if (debug_threads)
95954743
PA
995 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
996
997 if (ptid_equal (ptid, minus_one_ptid))
998 to_wait_for = -1; /* any child */
999 else
1000 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
0d62e5e8 1001
bd99dc85 1002 options |= __WALL;
0d62e5e8 1003
bd99dc85 1004retry:
0d62e5e8 1005
bd99dc85
PA
1006 ret = my_waitpid (to_wait_for, wstatp, options);
1007 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1008 return NULL;
1009 else if (ret == -1)
1010 perror_with_name ("waitpid");
0d62e5e8
DJ
1011
1012 if (debug_threads
1013 && (!WIFSTOPPED (*wstatp)
1014 || (WSTOPSIG (*wstatp) != 32
1015 && WSTOPSIG (*wstatp) != 33)))
1016 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1017
95954743 1018 child = find_lwp_pid (pid_to_ptid (ret));
0d62e5e8 1019
24a09b5f
DJ
1020 /* If we didn't find a process, one of two things presumably happened:
1021 - A process we started and then detached from has exited. Ignore it.
1022 - A process we are controlling has forked and the new child's stop
1023 was reported to us by the kernel. Save its PID. */
bd99dc85 1024 if (child == NULL && WIFSTOPPED (*wstatp))
24a09b5f
DJ
1025 {
1026 add_pid_to_list (&stopped_pids, ret);
1027 goto retry;
1028 }
bd99dc85 1029 else if (child == NULL)
24a09b5f
DJ
1030 goto retry;
1031
bd99dc85 1032 child->stopped = 1;
0d62e5e8 1033
bd99dc85 1034 child->last_status = *wstatp;
32ca6d61 1035
d61ddec4
UW
1036 /* Architecture-specific setup after inferior is running.
1037 This needs to happen after we have attached to the inferior
1038 and it is stopped for the first time, but before we access
1039 any inferior registers. */
1040 if (new_inferior)
1041 {
1042 the_low_target.arch_setup ();
52fa2412
UW
1043#ifdef HAVE_LINUX_REGSETS
1044 memset (disabled_regsets, 0, num_regsets);
1045#endif
d61ddec4
UW
1046 new_inferior = 0;
1047 }
1048
c3adc08c
PA
1049 /* Fetch the possibly triggered data watchpoint info and store it in
1050 CHILD.
1051
1052 On some archs, like x86, that use debug registers to set
1053 watchpoints, it's possible that the way to know which watched
1054 address trapped, is to check the register that is used to select
1055 which address to watch. Problem is, between setting the
1056 watchpoint and reading back which data address trapped, the user
1057 may change the set of watchpoints, and, as a consequence, GDB
1058 changes the debug registers in the inferior. To avoid reading
1059 back a stale stopped-data-address when that happens, we cache in
1060 LP the fact that a watchpoint trapped, and the corresponding data
1061 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1062 changes the debug registers meanwhile, we have the cached data we
1063 can rely on. */
1064
1065 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1066 {
1067 if (the_low_target.stopped_by_watchpoint == NULL)
1068 {
1069 child->stopped_by_watchpoint = 0;
1070 }
1071 else
1072 {
1073 struct thread_info *saved_inferior;
1074
1075 saved_inferior = current_inferior;
1076 current_inferior = get_lwp_thread (child);
1077
1078 child->stopped_by_watchpoint
1079 = the_low_target.stopped_by_watchpoint ();
1080
1081 if (child->stopped_by_watchpoint)
1082 {
1083 if (the_low_target.stopped_data_address != NULL)
1084 child->stopped_data_address
1085 = the_low_target.stopped_data_address ();
1086 else
1087 child->stopped_data_address = 0;
1088 }
1089
1090 current_inferior = saved_inferior;
1091 }
1092 }
1093
d50171e4
PA
1094 /* Store the STOP_PC, with adjustment applied. This depends on the
1095 architecture being defined already (so that CHILD has a valid
1096 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1097 not). */
1098 if (WIFSTOPPED (*wstatp))
1099 child->stop_pc = get_stop_pc (child);
1100
0d62e5e8 1101 if (debug_threads
47c0c975
DE
1102 && WIFSTOPPED (*wstatp)
1103 && the_low_target.get_pc != NULL)
0d62e5e8 1104 {
896c7fbb 1105 struct thread_info *saved_inferior = current_inferior;
bce522a2 1106 struct regcache *regcache;
47c0c975
DE
1107 CORE_ADDR pc;
1108
d50171e4 1109 current_inferior = get_lwp_thread (child);
bce522a2 1110 regcache = get_thread_regcache (current_inferior, 1);
442ea881 1111 pc = (*the_low_target.get_pc) (regcache);
47c0c975 1112 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
896c7fbb 1113 current_inferior = saved_inferior;
0d62e5e8 1114 }
bd99dc85
PA
1115
1116 return child;
0d62e5e8 1117}
611cb4a5 1118
d50171e4
PA
1119/* Arrange for a breakpoint to be hit again later. We don't keep the
1120 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1121 will handle the current event, eventually we will resume this LWP,
1122 and this breakpoint will trap again. */
1123
1124static int
1125cancel_breakpoint (struct lwp_info *lwp)
1126{
1127 struct thread_info *saved_inferior;
1128 struct regcache *regcache;
1129
1130 /* There's nothing to do if we don't support breakpoints. */
1131 if (!supports_breakpoints ())
1132 return 0;
1133
d50171e4
PA
1134 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1135
1136 /* breakpoint_at reads from current inferior. */
1137 saved_inferior = current_inferior;
1138 current_inferior = get_lwp_thread (lwp);
1139
1140 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1141 {
1142 if (debug_threads)
1143 fprintf (stderr,
1144 "CB: Push back breakpoint for %s\n",
1145 target_pid_to_str (lwp->head.id));
1146
1147 /* Back up the PC if necessary. */
1148 if (the_low_target.decr_pc_after_break)
1149 {
1150 struct regcache *regcache
1151 = get_thread_regcache (get_lwp_thread (lwp), 1);
1152 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1153 }
1154
1155 current_inferior = saved_inferior;
1156 return 1;
1157 }
1158 else
1159 {
1160 if (debug_threads)
1161 fprintf (stderr,
1162 "CB: No breakpoint found at %s for [%s]\n",
1163 paddress (lwp->stop_pc),
1164 target_pid_to_str (lwp->head.id));
1165 }
1166
1167 current_inferior = saved_inferior;
1168 return 0;
1169}
1170
1171/* When the event-loop is doing a step-over, this points at the thread
1172 being stepped. */
1173ptid_t step_over_bkpt;
1174
bd99dc85
PA
1175/* Wait for an event from child PID. If PID is -1, wait for any
1176 child. Store the stop status through the status pointer WSTAT.
1177 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1178 event was found and OPTIONS contains WNOHANG. Return the PID of
1179 the stopped child otherwise. */
1180
0d62e5e8 1181static int
95954743 1182linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
0d62e5e8 1183{
d50171e4
PA
1184 struct lwp_info *event_child, *requested_child;
1185
d50171e4
PA
1186 event_child = NULL;
1187 requested_child = NULL;
0d62e5e8 1188
95954743 1189 /* Check for a lwp with a pending status. */
bd99dc85 1190
95954743
PA
1191 if (ptid_equal (ptid, minus_one_ptid)
1192 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
0d62e5e8 1193 {
54a0b537 1194 event_child = (struct lwp_info *)
d50171e4 1195 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
0d62e5e8 1196 if (debug_threads && event_child)
bd99dc85 1197 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
0d62e5e8
DJ
1198 }
1199 else
1200 {
95954743 1201 requested_child = find_lwp_pid (ptid);
d50171e4
PA
1202
1203 if (requested_child->status_pending_p)
bd99dc85 1204 event_child = requested_child;
0d62e5e8 1205 }
611cb4a5 1206
0d62e5e8
DJ
1207 if (event_child != NULL)
1208 {
bd99dc85
PA
1209 if (debug_threads)
1210 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1211 lwpid_of (event_child), event_child->status_pending);
1212 *wstat = event_child->status_pending;
1213 event_child->status_pending_p = 0;
1214 event_child->status_pending = 0;
1215 current_inferior = get_lwp_thread (event_child);
1216 return lwpid_of (event_child);
0d62e5e8
DJ
1217 }
1218
1219 /* We only enter this loop if no process has a pending wait status. Thus
1220 any action taken in response to a wait status inside this loop is
1221 responding as soon as we detect the status, not after any pending
1222 events. */
1223 while (1)
1224 {
6bf5e0ba 1225 event_child = linux_wait_for_lwp (ptid, wstat, options);
0d62e5e8 1226
bd99dc85 1227 if ((options & WNOHANG) && event_child == NULL)
d50171e4
PA
1228 {
1229 if (debug_threads)
1230 fprintf (stderr, "WNOHANG set, no event found\n");
1231 return 0;
1232 }
0d62e5e8
DJ
1233
1234 if (event_child == NULL)
1235 error ("event from unknown child");
611cb4a5 1236
bd99dc85 1237 current_inferior = get_lwp_thread (event_child);
0d62e5e8 1238
89be2091 1239 /* Check for thread exit. */
bd99dc85 1240 if (! WIFSTOPPED (*wstat))
0d62e5e8 1241 {
89be2091 1242 if (debug_threads)
95954743 1243 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
89be2091
DJ
1244
1245 /* If the last thread is exiting, just return. */
95954743 1246 if (last_thread_of_process_p (current_inferior))
bd99dc85
PA
1247 {
1248 if (debug_threads)
95954743
PA
1249 fprintf (stderr, "LWP %ld is last lwp of process\n",
1250 lwpid_of (event_child));
bd99dc85
PA
1251 return lwpid_of (event_child);
1252 }
89be2091 1253
bd99dc85
PA
1254 if (!non_stop)
1255 {
1256 current_inferior = (struct thread_info *) all_threads.head;
1257 if (debug_threads)
1258 fprintf (stderr, "Current inferior is now %ld\n",
1259 lwpid_of (get_thread_lwp (current_inferior)));
1260 }
1261 else
1262 {
1263 current_inferior = NULL;
1264 if (debug_threads)
1265 fprintf (stderr, "Current inferior is now <NULL>\n");
1266 }
89be2091
DJ
1267
1268 /* If we were waiting for this particular child to do something...
1269 well, it did something. */
bd99dc85 1270 if (requested_child != NULL)
d50171e4
PA
1271 {
1272 int lwpid = lwpid_of (event_child);
1273
1274 /* Cancel the step-over operation --- the thread that
1275 started it is gone. */
1276 if (finish_step_over (event_child))
1277 unstop_all_lwps (event_child);
1278 delete_lwp (event_child);
1279 return lwpid;
1280 }
1281
1282 delete_lwp (event_child);
89be2091
DJ
1283
1284 /* Wait for a more interesting event. */
1285 continue;
1286 }
1287
a6dbe5df
PA
1288 if (event_child->must_set_ptrace_flags)
1289 {
1290 ptrace (PTRACE_SETOPTIONS, lwpid_of (event_child),
14ce3065 1291 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
a6dbe5df
PA
1292 event_child->must_set_ptrace_flags = 0;
1293 }
1294
bd99dc85
PA
1295 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1296 && *wstat >> 16 != 0)
24a09b5f 1297 {
bd99dc85 1298 handle_extended_wait (event_child, *wstat);
24a09b5f
DJ
1299 continue;
1300 }
1301
89be2091
DJ
1302 /* If GDB is not interested in this signal, don't stop other
1303 threads, and don't report it to GDB. Just resume the
1304 inferior right away. We do this for threading-related
69f223ed
DJ
1305 signals as well as any that GDB specifically requested we
1306 ignore. But never ignore SIGSTOP if we sent it ourselves,
1307 and do not ignore signals when stepping - they may require
1308 special handling to skip the signal handler. */
89be2091
DJ
1309 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
1310 thread library? */
bd99dc85 1311 if (WIFSTOPPED (*wstat)
69f223ed 1312 && !event_child->stepping
24a09b5f 1313 && (
60c3d7b0 1314#if defined (USE_THREAD_DB) && defined (__SIGRTMIN)
cdbfd419 1315 (current_process ()->private->thread_db != NULL
bd99dc85
PA
1316 && (WSTOPSIG (*wstat) == __SIGRTMIN
1317 || WSTOPSIG (*wstat) == __SIGRTMIN + 1))
24a09b5f
DJ
1318 ||
1319#endif
bd99dc85 1320 (pass_signals[target_signal_from_host (WSTOPSIG (*wstat))]
d50171e4
PA
1321 && !(WSTOPSIG (*wstat) == SIGSTOP
1322 && event_child->stop_expected))))
89be2091
DJ
1323 {
1324 siginfo_t info, *info_p;
1325
1326 if (debug_threads)
24a09b5f 1327 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
bd99dc85 1328 WSTOPSIG (*wstat), lwpid_of (event_child));
89be2091 1329
bd99dc85 1330 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
89be2091
DJ
1331 info_p = &info;
1332 else
1333 info_p = NULL;
d50171e4 1334 linux_resume_one_lwp (event_child, event_child->stepping,
bd99dc85 1335 WSTOPSIG (*wstat), info_p);
89be2091 1336 continue;
0d62e5e8 1337 }
611cb4a5 1338
d50171e4
PA
1339 if (WIFSTOPPED (*wstat)
1340 && WSTOPSIG (*wstat) == SIGSTOP
1341 && event_child->stop_expected)
1342 {
1343 int should_stop;
1344
1345 if (debug_threads)
1346 fprintf (stderr, "Expected stop.\n");
1347 event_child->stop_expected = 0;
1348
1349 should_stop = (event_child->last_resume_kind == resume_stop
1350 || stopping_threads);
1351
1352 if (!should_stop)
1353 {
1354 linux_resume_one_lwp (event_child,
1355 event_child->stepping, 0, NULL);
1356 continue;
1357 }
1358 }
1359
bd99dc85 1360 return lwpid_of (event_child);
611cb4a5 1361 }
0d62e5e8 1362
611cb4a5
DJ
1363 /* NOTREACHED */
1364 return 0;
1365}
1366
95954743
PA
1367static int
1368linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1369{
1370 ptid_t wait_ptid;
1371
1372 if (ptid_is_pid (ptid))
1373 {
1374 /* A request to wait for a specific tgid. This is not possible
1375 with waitpid, so instead, we wait for any child, and leave
1376 children we're not interested in right now with a pending
1377 status to report later. */
1378 wait_ptid = minus_one_ptid;
1379 }
1380 else
1381 wait_ptid = ptid;
1382
1383 while (1)
1384 {
1385 int event_pid;
1386
1387 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1388
1389 if (event_pid > 0
1390 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1391 {
1392 struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1393
1394 if (! WIFSTOPPED (*wstat))
1395 mark_lwp_dead (event_child, *wstat);
1396 else
1397 {
1398 event_child->status_pending_p = 1;
1399 event_child->status_pending = *wstat;
1400 }
1401 }
1402 else
1403 return event_pid;
1404 }
1405}
1406
6bf5e0ba
PA
1407
1408/* Count the LWP's that have had events. */
1409
1410static int
1411count_events_callback (struct inferior_list_entry *entry, void *data)
1412{
1413 struct lwp_info *lp = (struct lwp_info *) entry;
1414 int *count = data;
1415
1416 gdb_assert (count != NULL);
1417
1418 /* Count only resumed LWPs that have a SIGTRAP event pending that
1419 should be reported to GDB. */
1420 if (get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE
1421 && lp->last_resume_kind != resume_stop
1422 && lp->status_pending_p
1423 && WIFSTOPPED (lp->status_pending)
1424 && WSTOPSIG (lp->status_pending) == SIGTRAP
1425 && !breakpoint_inserted_here (lp->stop_pc))
1426 (*count)++;
1427
1428 return 0;
1429}
1430
1431/* Select the LWP (if any) that is currently being single-stepped. */
1432
1433static int
1434select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1435{
1436 struct lwp_info *lp = (struct lwp_info *) entry;
1437
1438 if (get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE
1439 && lp->last_resume_kind == resume_step
1440 && lp->status_pending_p)
1441 return 1;
1442 else
1443 return 0;
1444}
1445
1446/* Select the Nth LWP that has had a SIGTRAP event that should be
1447 reported to GDB. */
1448
1449static int
1450select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1451{
1452 struct lwp_info *lp = (struct lwp_info *) entry;
1453 int *selector = data;
1454
1455 gdb_assert (selector != NULL);
1456
1457 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1458 if (lp->last_resume_kind != resume_stop
1459 && get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE
1460 && lp->status_pending_p
1461 && WIFSTOPPED (lp->status_pending)
1462 && WSTOPSIG (lp->status_pending) == SIGTRAP
1463 && !breakpoint_inserted_here (lp->stop_pc))
1464 if ((*selector)-- == 0)
1465 return 1;
1466
1467 return 0;
1468}
1469
1470static int
1471cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1472{
1473 struct lwp_info *lp = (struct lwp_info *) entry;
1474 struct lwp_info *event_lp = data;
1475
1476 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1477 if (lp == event_lp)
1478 return 0;
1479
1480 /* If a LWP other than the LWP that we're reporting an event for has
1481 hit a GDB breakpoint (as opposed to some random trap signal),
1482 then just arrange for it to hit it again later. We don't keep
1483 the SIGTRAP status and don't forward the SIGTRAP signal to the
1484 LWP. We will handle the current event, eventually we will resume
1485 all LWPs, and this one will get its breakpoint trap again.
1486
1487 If we do not do this, then we run the risk that the user will
1488 delete or disable the breakpoint, but the LWP will have already
1489 tripped on it. */
1490
1491 if (lp->last_resume_kind != resume_stop
1492 && get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE
1493 && lp->status_pending_p
1494 && WIFSTOPPED (lp->status_pending)
1495 && WSTOPSIG (lp->status_pending) == SIGTRAP
bdabb078
PA
1496 && !lp->stepping
1497 && !lp->stopped_by_watchpoint
6bf5e0ba
PA
1498 && cancel_breakpoint (lp))
1499 /* Throw away the SIGTRAP. */
1500 lp->status_pending_p = 0;
1501
1502 return 0;
1503}
1504
1505/* Select one LWP out of those that have events pending. */
1506
1507static void
1508select_event_lwp (struct lwp_info **orig_lp)
1509{
1510 int num_events = 0;
1511 int random_selector;
1512 struct lwp_info *event_lp;
1513
1514 /* Give preference to any LWP that is being single-stepped. */
1515 event_lp
1516 = (struct lwp_info *) find_inferior (&all_lwps,
1517 select_singlestep_lwp_callback, NULL);
1518 if (event_lp != NULL)
1519 {
1520 if (debug_threads)
1521 fprintf (stderr,
1522 "SEL: Select single-step %s\n",
1523 target_pid_to_str (ptid_of (event_lp)));
1524 }
1525 else
1526 {
1527 /* No single-stepping LWP. Select one at random, out of those
1528 which have had SIGTRAP events. */
1529
1530 /* First see how many SIGTRAP events we have. */
1531 find_inferior (&all_lwps, count_events_callback, &num_events);
1532
1533 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1534 random_selector = (int)
1535 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1536
1537 if (debug_threads && num_events > 1)
1538 fprintf (stderr,
1539 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1540 num_events, random_selector);
1541
1542 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1543 select_event_lwp_callback,
1544 &random_selector);
1545 }
1546
1547 if (event_lp != NULL)
1548 {
1549 /* Switch the event LWP. */
1550 *orig_lp = event_lp;
1551 }
1552}
1553
d50171e4
PA
1554/* Set this inferior LWP's state as "want-stopped". We won't resume
1555 this LWP until the client gives us another action for it. */
1556
1557static void
1558gdb_wants_lwp_stopped (struct inferior_list_entry *entry)
1559{
1560 struct lwp_info *lwp = (struct lwp_info *) entry;
1561 struct thread_info *thread = get_lwp_thread (lwp);
1562
1563 /* Most threads are stopped implicitly (all-stop); tag that with
1564 signal 0. The thread being explicitly reported stopped to the
1565 client, gets it's status fixed up afterwards. */
1566 thread->last_status.kind = TARGET_WAITKIND_STOPPED;
1567 thread->last_status.value.sig = TARGET_SIGNAL_0;
1568
1569 lwp->last_resume_kind = resume_stop;
1570}
1571
1572/* Set all LWP's states as "want-stopped". */
1573
1574static void
1575gdb_wants_all_stopped (void)
1576{
1577 for_each_inferior (&all_lwps, gdb_wants_lwp_stopped);
1578}
1579
0d62e5e8 1580/* Wait for process, returns status. */
da6d8c04 1581
95954743
PA
1582static ptid_t
1583linux_wait_1 (ptid_t ptid,
1584 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 1585{
e5f1222d 1586 int w;
bd99dc85 1587 struct thread_info *thread = NULL;
6bf5e0ba 1588 struct lwp_info *event_child = NULL;
bd99dc85 1589 int options;
bd99dc85 1590 int pid;
6bf5e0ba
PA
1591 int step_over_finished;
1592 int bp_explains_trap;
1593 int maybe_internal_trap;
1594 int report_to_gdb;
bd99dc85
PA
1595
1596 /* Translate generic target options into linux options. */
1597 options = __WALL;
1598 if (target_options & TARGET_WNOHANG)
1599 options |= WNOHANG;
0d62e5e8
DJ
1600
1601retry:
bd99dc85
PA
1602 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1603
0d62e5e8
DJ
1604 /* If we were only supposed to resume one thread, only wait for
1605 that thread - if it's still alive. If it died, however - which
1606 can happen if we're coming from the thread death case below -
1607 then we need to make sure we restart the other threads. We could
1608 pick a thread at random or restart all; restarting all is less
1609 arbitrary. */
95954743
PA
1610 if (!non_stop
1611 && !ptid_equal (cont_thread, null_ptid)
1612 && !ptid_equal (cont_thread, minus_one_ptid))
0d62e5e8 1613 {
bd99dc85
PA
1614 thread = (struct thread_info *) find_inferior_id (&all_threads,
1615 cont_thread);
0d62e5e8
DJ
1616
1617 /* No stepping, no signal - unless one is pending already, of course. */
bd99dc85 1618 if (thread == NULL)
64386c31
DJ
1619 {
1620 struct thread_resume resume_info;
95954743 1621 resume_info.thread = minus_one_ptid;
bd99dc85
PA
1622 resume_info.kind = resume_continue;
1623 resume_info.sig = 0;
2bd7c093 1624 linux_resume (&resume_info, 1);
64386c31 1625 }
bd99dc85 1626 else
95954743 1627 ptid = cont_thread;
0d62e5e8 1628 }
da6d8c04 1629
6bf5e0ba
PA
1630 if (ptid_equal (step_over_bkpt, null_ptid))
1631 pid = linux_wait_for_event (ptid, &w, options);
1632 else
1633 {
1634 if (debug_threads)
1635 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
1636 target_pid_to_str (step_over_bkpt));
1637 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
1638 }
1639
bd99dc85 1640 if (pid == 0) /* only if TARGET_WNOHANG */
95954743 1641 return null_ptid;
bd99dc85 1642
6bf5e0ba 1643 event_child = get_thread_lwp (current_inferior);
da6d8c04 1644
0d62e5e8
DJ
1645 /* If we are waiting for a particular child, and it exited,
1646 linux_wait_for_event will return its exit status. Similarly if
1647 the last child exited. If this is not the last child, however,
1648 do not report it as exited until there is a 'thread exited' response
1649 available in the remote protocol. Instead, just wait for another event.
1650 This should be safe, because if the thread crashed we will already
1651 have reported the termination signal to GDB; that should stop any
1652 in-progress stepping operations, etc.
1653
1654 Report the exit status of the last thread to exit. This matches
1655 LinuxThreads' behavior. */
1656
95954743 1657 if (last_thread_of_process_p (current_inferior))
da6d8c04 1658 {
bd99dc85 1659 if (WIFEXITED (w) || WIFSIGNALED (w))
0d62e5e8 1660 {
6bf5e0ba 1661 int pid = pid_of (event_child);
95954743 1662 struct process_info *process = find_process_pid (pid);
5b1c542e 1663
ca5c370d
PA
1664#ifdef USE_THREAD_DB
1665 thread_db_free (process, 0);
1666#endif
6bf5e0ba 1667 delete_lwp (event_child);
ca5c370d 1668 linux_remove_process (process);
5b1c542e 1669
bd99dc85 1670 current_inferior = NULL;
5b1c542e 1671
bd99dc85
PA
1672 if (WIFEXITED (w))
1673 {
1674 ourstatus->kind = TARGET_WAITKIND_EXITED;
1675 ourstatus->value.integer = WEXITSTATUS (w);
1676
1677 if (debug_threads)
1678 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
1679 }
1680 else
1681 {
1682 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1683 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
1684
1685 if (debug_threads)
1686 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
1687
1688 }
5b1c542e 1689
95954743 1690 return pid_to_ptid (pid);
0d62e5e8 1691 }
da6d8c04 1692 }
0d62e5e8 1693 else
da6d8c04 1694 {
0d62e5e8
DJ
1695 if (!WIFSTOPPED (w))
1696 goto retry;
da6d8c04
DJ
1697 }
1698
6bf5e0ba
PA
1699 /* If this event was not handled before, and is not a SIGTRAP, we
1700 report it. SIGILL and SIGSEGV are also treated as traps in case
1701 a breakpoint is inserted at the current PC. If this target does
1702 not support internal breakpoints at all, we also report the
1703 SIGTRAP without further processing; it's of no concern to us. */
1704 maybe_internal_trap
1705 = (supports_breakpoints ()
1706 && (WSTOPSIG (w) == SIGTRAP
1707 || ((WSTOPSIG (w) == SIGILL
1708 || WSTOPSIG (w) == SIGSEGV)
1709 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
1710
1711 if (maybe_internal_trap)
1712 {
1713 /* Handle anything that requires bookkeeping before deciding to
1714 report the event or continue waiting. */
1715
1716 /* First check if we can explain the SIGTRAP with an internal
1717 breakpoint, or if we should possibly report the event to GDB.
1718 Do this before anything that may remove or insert a
1719 breakpoint. */
1720 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
1721
1722 /* We have a SIGTRAP, possibly a step-over dance has just
1723 finished. If so, tweak the state machine accordingly,
1724 reinsert breakpoints and delete any reinsert (software
1725 single-step) breakpoints. */
1726 step_over_finished = finish_step_over (event_child);
1727
1728 /* Now invoke the callbacks of any internal breakpoints there. */
1729 check_breakpoints (event_child->stop_pc);
1730
1731 if (bp_explains_trap)
1732 {
1733 /* If we stepped or ran into an internal breakpoint, we've
1734 already handled it. So next time we resume (from this
1735 PC), we should step over it. */
1736 if (debug_threads)
1737 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1738
8b07ae33
PA
1739 if (breakpoint_here (event_child->stop_pc))
1740 event_child->need_step_over = 1;
6bf5e0ba
PA
1741 }
1742 }
1743 else
1744 {
1745 /* We have some other signal, possibly a step-over dance was in
1746 progress, and it should be cancelled too. */
1747 step_over_finished = finish_step_over (event_child);
1748 }
1749
1750 /* We have all the data we need. Either report the event to GDB, or
1751 resume threads and keep waiting for more. */
1752
1753 /* Check If GDB would be interested in this event. If GDB wanted
1754 this thread to single step, we always want to report the SIGTRAP,
8b07ae33
PA
1755 and let GDB handle it. Watchpoints should always be reported.
1756 So should signals we can't explain. A SIGTRAP we can't explain
1757 could be a GDB breakpoint --- we may or not support Z0
1758 breakpoints. If we do, we're be able to handle GDB breakpoints
1759 on top of internal breakpoints, by handling the internal
1760 breakpoint and still reporting the event to GDB. If we don't,
1761 we're out of luck, GDB won't see the breakpoint hit. */
6bf5e0ba
PA
1762 report_to_gdb = (!maybe_internal_trap
1763 || event_child->last_resume_kind == resume_step
1764 || event_child->stopped_by_watchpoint
8b07ae33
PA
1765 || (!step_over_finished && !bp_explains_trap)
1766 || gdb_breakpoint_here (event_child->stop_pc));
6bf5e0ba
PA
1767
1768 /* We found no reason GDB would want us to stop. We either hit one
1769 of our own breakpoints, or finished an internal step GDB
1770 shouldn't know about. */
1771 if (!report_to_gdb)
1772 {
1773 if (debug_threads)
1774 {
1775 if (bp_explains_trap)
1776 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1777 if (step_over_finished)
1778 fprintf (stderr, "Step-over finished.\n");
1779 }
1780
1781 /* We're not reporting this breakpoint to GDB, so apply the
1782 decr_pc_after_break adjustment to the inferior's regcache
1783 ourselves. */
1784
1785 if (the_low_target.set_pc != NULL)
1786 {
1787 struct regcache *regcache
1788 = get_thread_regcache (get_lwp_thread (event_child), 1);
1789 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
1790 }
1791
1792 /* We've finished stepping over a breakpoint. We've stopped all
1793 LWPs momentarily except the stepping one. This is where we
1794 resume them all again. We're going to keep waiting, so use
1795 proceed, which handles stepping over the next breakpoint. */
1796 if (debug_threads)
1797 fprintf (stderr, "proceeding all threads.\n");
1798 proceed_all_lwps ();
1799 goto retry;
1800 }
1801
1802 if (debug_threads)
1803 {
1804 if (event_child->last_resume_kind == resume_step)
1805 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
1806 if (event_child->stopped_by_watchpoint)
1807 fprintf (stderr, "Stopped by watchpoint.\n");
8b07ae33
PA
1808 if (gdb_breakpoint_here (event_child->stop_pc))
1809 fprintf (stderr, "Stopped by GDB breakpoint.\n");
6bf5e0ba
PA
1810 if (debug_threads)
1811 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
1812 }
1813
1814 /* Alright, we're going to report a stop. */
1815
1816 if (!non_stop)
1817 {
1818 /* In all-stop, stop all threads. */
1819 stop_all_lwps ();
1820
1821 /* If we're not waiting for a specific LWP, choose an event LWP
1822 from among those that have had events. Giving equal priority
1823 to all LWPs that have had events helps prevent
1824 starvation. */
1825 if (ptid_equal (ptid, minus_one_ptid))
1826 {
1827 event_child->status_pending_p = 1;
1828 event_child->status_pending = w;
1829
1830 select_event_lwp (&event_child);
1831
1832 event_child->status_pending_p = 0;
1833 w = event_child->status_pending;
1834 }
1835
1836 /* Now that we've selected our final event LWP, cancel any
1837 breakpoints in other LWPs that have hit a GDB breakpoint.
1838 See the comment in cancel_breakpoints_callback to find out
1839 why. */
1840 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
1841 }
1842 else
1843 {
1844 /* If we just finished a step-over, then all threads had been
1845 momentarily paused. In all-stop, that's fine, we want
1846 threads stopped by now anyway. In non-stop, we need to
1847 re-resume threads that GDB wanted to be running. */
1848 if (step_over_finished)
1849 unstop_all_lwps (event_child);
1850 }
1851
5b1c542e 1852 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 1853
d50171e4
PA
1854 /* Do this before the gdb_wants_all_stopped calls below, since they
1855 always set last_resume_kind to resume_stop. */
6bf5e0ba 1856 if (event_child->last_resume_kind == resume_stop && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
1857 {
1858 /* A thread that has been requested to stop by GDB with vCont;t,
1859 and it stopped cleanly, so report as SIG0. The use of
1860 SIGSTOP is an implementation detail. */
1861 ourstatus->value.sig = TARGET_SIGNAL_0;
1862 }
6bf5e0ba 1863 else if (event_child->last_resume_kind == resume_stop && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
1864 {
1865 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 1866 but, it stopped for other reasons. */
bd99dc85
PA
1867 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1868 }
1869 else
1870 {
1871 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1872 }
1873
d50171e4
PA
1874 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
1875
1876 if (!non_stop)
1877 {
d50171e4
PA
1878 /* From GDB's perspective, all-stop mode always stops all
1879 threads implicitly. Tag all threads as "want-stopped". */
1880 gdb_wants_all_stopped ();
1881 }
1882 else
1883 {
1884 /* We're reporting this LWP as stopped. Update it's
1885 "want-stopped" state to what the client wants, until it gets
1886 a new resume action. */
6bf5e0ba 1887 gdb_wants_lwp_stopped (&event_child->head);
d50171e4
PA
1888 }
1889
bd99dc85 1890 if (debug_threads)
95954743 1891 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
6bf5e0ba 1892 target_pid_to_str (ptid_of (event_child)),
bd99dc85
PA
1893 ourstatus->kind,
1894 ourstatus->value.sig);
1895
6bf5e0ba
PA
1896 get_lwp_thread (event_child)->last_status = *ourstatus;
1897 return ptid_of (event_child);
bd99dc85
PA
1898}
1899
1900/* Get rid of any pending event in the pipe. */
1901static void
1902async_file_flush (void)
1903{
1904 int ret;
1905 char buf;
1906
1907 do
1908 ret = read (linux_event_pipe[0], &buf, 1);
1909 while (ret >= 0 || (ret == -1 && errno == EINTR));
1910}
1911
1912/* Put something in the pipe, so the event loop wakes up. */
1913static void
1914async_file_mark (void)
1915{
1916 int ret;
1917
1918 async_file_flush ();
1919
1920 do
1921 ret = write (linux_event_pipe[1], "+", 1);
1922 while (ret == 0 || (ret == -1 && errno == EINTR));
1923
1924 /* Ignore EAGAIN. If the pipe is full, the event loop will already
1925 be awakened anyway. */
1926}
1927
95954743
PA
1928static ptid_t
1929linux_wait (ptid_t ptid,
1930 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 1931{
95954743 1932 ptid_t event_ptid;
bd99dc85
PA
1933
1934 if (debug_threads)
95954743 1935 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
bd99dc85
PA
1936
1937 /* Flush the async file first. */
1938 if (target_is_async_p ())
1939 async_file_flush ();
1940
95954743 1941 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
bd99dc85
PA
1942
1943 /* If at least one stop was reported, there may be more. A single
1944 SIGCHLD can signal more than one child stop. */
1945 if (target_is_async_p ()
1946 && (target_options & TARGET_WNOHANG) != 0
95954743 1947 && !ptid_equal (event_ptid, null_ptid))
bd99dc85
PA
1948 async_file_mark ();
1949
1950 return event_ptid;
da6d8c04
DJ
1951}
1952
c5f62d5f 1953/* Send a signal to an LWP. */
fd500816
DJ
1954
1955static int
a1928bad 1956kill_lwp (unsigned long lwpid, int signo)
fd500816 1957{
c5f62d5f
DE
1958 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1959 fails, then we are not using nptl threads and we should be using kill. */
fd500816 1960
c5f62d5f
DE
1961#ifdef __NR_tkill
1962 {
1963 static int tkill_failed;
fd500816 1964
c5f62d5f
DE
1965 if (!tkill_failed)
1966 {
1967 int ret;
1968
1969 errno = 0;
1970 ret = syscall (__NR_tkill, lwpid, signo);
1971 if (errno != ENOSYS)
1972 return ret;
1973 tkill_failed = 1;
1974 }
1975 }
fd500816
DJ
1976#endif
1977
1978 return kill (lwpid, signo);
1979}
1980
0d62e5e8
DJ
1981static void
1982send_sigstop (struct inferior_list_entry *entry)
1983{
54a0b537 1984 struct lwp_info *lwp = (struct lwp_info *) entry;
bd99dc85 1985 int pid;
0d62e5e8 1986
54a0b537 1987 if (lwp->stopped)
0d62e5e8
DJ
1988 return;
1989
bd99dc85
PA
1990 pid = lwpid_of (lwp);
1991
0d62e5e8
DJ
1992 /* If we already have a pending stop signal for this process, don't
1993 send another. */
54a0b537 1994 if (lwp->stop_expected)
0d62e5e8 1995 {
ae13219e 1996 if (debug_threads)
bd99dc85 1997 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
ae13219e 1998
0d62e5e8
DJ
1999 return;
2000 }
2001
2002 if (debug_threads)
bd99dc85 2003 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
0d62e5e8 2004
d50171e4 2005 lwp->stop_expected = 1;
bd99dc85 2006 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
2007}
2008
95954743
PA
2009static void
2010mark_lwp_dead (struct lwp_info *lwp, int wstat)
2011{
2012 /* It's dead, really. */
2013 lwp->dead = 1;
2014
2015 /* Store the exit status for later. */
2016 lwp->status_pending_p = 1;
2017 lwp->status_pending = wstat;
2018
95954743
PA
2019 /* Prevent trying to stop it. */
2020 lwp->stopped = 1;
2021
2022 /* No further stops are expected from a dead lwp. */
2023 lwp->stop_expected = 0;
2024}
2025
0d62e5e8
DJ
2026static void
2027wait_for_sigstop (struct inferior_list_entry *entry)
2028{
54a0b537 2029 struct lwp_info *lwp = (struct lwp_info *) entry;
bd99dc85 2030 struct thread_info *saved_inferior;
a1928bad 2031 int wstat;
95954743
PA
2032 ptid_t saved_tid;
2033 ptid_t ptid;
d50171e4 2034 int pid;
0d62e5e8 2035
54a0b537 2036 if (lwp->stopped)
d50171e4
PA
2037 {
2038 if (debug_threads)
2039 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2040 lwpid_of (lwp));
2041 return;
2042 }
0d62e5e8
DJ
2043
2044 saved_inferior = current_inferior;
bd99dc85
PA
2045 if (saved_inferior != NULL)
2046 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2047 else
95954743 2048 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 2049
95954743 2050 ptid = lwp->head.id;
bd99dc85 2051
d50171e4
PA
2052 if (debug_threads)
2053 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2054
2055 pid = linux_wait_for_event (ptid, &wstat, __WALL);
0d62e5e8
DJ
2056
2057 /* If we stopped with a non-SIGSTOP signal, save it for later
2058 and record the pending SIGSTOP. If the process exited, just
2059 return. */
d50171e4 2060 if (WIFSTOPPED (wstat))
0d62e5e8
DJ
2061 {
2062 if (debug_threads)
d50171e4
PA
2063 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2064 lwpid_of (lwp), WSTOPSIG (wstat));
c35fafde 2065
d50171e4 2066 if (WSTOPSIG (wstat) != SIGSTOP)
c35fafde
PA
2067 {
2068 if (debug_threads)
d50171e4
PA
2069 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2070 lwpid_of (lwp), wstat);
2071
c35fafde
PA
2072 lwp->status_pending_p = 1;
2073 lwp->status_pending = wstat;
2074 }
0d62e5e8 2075 }
d50171e4 2076 else
95954743
PA
2077 {
2078 if (debug_threads)
d50171e4 2079 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
95954743 2080
d50171e4
PA
2081 lwp = find_lwp_pid (pid_to_ptid (pid));
2082 if (lwp)
2083 {
2084 /* Leave this status pending for the next time we're able to
2085 report it. In the mean time, we'll report this lwp as
2086 dead to GDB, so GDB doesn't try to read registers and
2087 memory from it. This can only happen if this was the
2088 last thread of the process; otherwise, PID is removed
2089 from the thread tables before linux_wait_for_event
2090 returns. */
2091 mark_lwp_dead (lwp, wstat);
2092 }
95954743 2093 }
0d62e5e8 2094
bd99dc85 2095 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
0d62e5e8
DJ
2096 current_inferior = saved_inferior;
2097 else
2098 {
2099 if (debug_threads)
2100 fprintf (stderr, "Previously current thread died.\n");
2101
bd99dc85
PA
2102 if (non_stop)
2103 {
2104 /* We can't change the current inferior behind GDB's back,
2105 otherwise, a subsequent command may apply to the wrong
2106 process. */
2107 current_inferior = NULL;
2108 }
2109 else
2110 {
2111 /* Set a valid thread as current. */
2112 set_desired_inferior (0);
2113 }
0d62e5e8
DJ
2114 }
2115}
2116
2117static void
54a0b537 2118stop_all_lwps (void)
0d62e5e8
DJ
2119{
2120 stopping_threads = 1;
54a0b537
PA
2121 for_each_inferior (&all_lwps, send_sigstop);
2122 for_each_inferior (&all_lwps, wait_for_sigstop);
0d62e5e8
DJ
2123 stopping_threads = 0;
2124}
2125
da6d8c04
DJ
2126/* Resume execution of the inferior process.
2127 If STEP is nonzero, single-step it.
2128 If SIGNAL is nonzero, give it that signal. */
2129
ce3a066d 2130static void
2acc282a 2131linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 2132 int step, int signal, siginfo_t *info)
da6d8c04 2133{
0d62e5e8
DJ
2134 struct thread_info *saved_inferior;
2135
54a0b537 2136 if (lwp->stopped == 0)
0d62e5e8
DJ
2137 return;
2138
2139 /* If we have pending signals or status, and a new signal, enqueue the
2140 signal. Also enqueue the signal if we are waiting to reinsert a
2141 breakpoint; it will be picked up again below. */
2142 if (signal != 0
54a0b537
PA
2143 && (lwp->status_pending_p || lwp->pending_signals != NULL
2144 || lwp->bp_reinsert != 0))
0d62e5e8
DJ
2145 {
2146 struct pending_signals *p_sig;
bca929d3 2147 p_sig = xmalloc (sizeof (*p_sig));
54a0b537 2148 p_sig->prev = lwp->pending_signals;
0d62e5e8 2149 p_sig->signal = signal;
32ca6d61
DJ
2150 if (info == NULL)
2151 memset (&p_sig->info, 0, sizeof (siginfo_t));
2152 else
2153 memcpy (&p_sig->info, info, sizeof (siginfo_t));
54a0b537 2154 lwp->pending_signals = p_sig;
0d62e5e8
DJ
2155 }
2156
d50171e4
PA
2157 if (lwp->status_pending_p)
2158 {
2159 if (debug_threads)
2160 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2161 " has pending status\n",
2162 lwpid_of (lwp), step ? "step" : "continue", signal,
2163 lwp->stop_expected ? "expected" : "not expected");
2164 return;
2165 }
0d62e5e8
DJ
2166
2167 saved_inferior = current_inferior;
54a0b537 2168 current_inferior = get_lwp_thread (lwp);
0d62e5e8
DJ
2169
2170 if (debug_threads)
1b3f6016 2171 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
bd99dc85 2172 lwpid_of (lwp), step ? "step" : "continue", signal,
54a0b537 2173 lwp->stop_expected ? "expected" : "not expected");
0d62e5e8
DJ
2174
2175 /* This bit needs some thinking about. If we get a signal that
2176 we must report while a single-step reinsert is still pending,
2177 we often end up resuming the thread. It might be better to
2178 (ew) allow a stack of pending events; then we could be sure that
2179 the reinsert happened right away and not lose any signals.
2180
2181 Making this stack would also shrink the window in which breakpoints are
54a0b537 2182 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
2183 complete correctness, so it won't solve that problem. It may be
2184 worthwhile just to solve this one, however. */
54a0b537 2185 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
2186 {
2187 if (debug_threads)
d50171e4
PA
2188 fprintf (stderr, " pending reinsert at 0x%s\n",
2189 paddress (lwp->bp_reinsert));
2190
2191 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2192 {
2193 if (step == 0)
2194 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2195
2196 step = 1;
2197 }
0d62e5e8
DJ
2198
2199 /* Postpone any pending signal. It was enqueued above. */
2200 signal = 0;
2201 }
2202
aa691b87 2203 if (debug_threads && the_low_target.get_pc != NULL)
0d62e5e8 2204 {
442ea881
PA
2205 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
2206 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
47c0c975 2207 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
0d62e5e8
DJ
2208 }
2209
2210 /* If we have pending signals, consume one unless we are trying to reinsert
2211 a breakpoint. */
54a0b537 2212 if (lwp->pending_signals != NULL && lwp->bp_reinsert == 0)
0d62e5e8
DJ
2213 {
2214 struct pending_signals **p_sig;
2215
54a0b537 2216 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
2217 while ((*p_sig)->prev != NULL)
2218 p_sig = &(*p_sig)->prev;
2219
2220 signal = (*p_sig)->signal;
32ca6d61 2221 if ((*p_sig)->info.si_signo != 0)
bd99dc85 2222 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
32ca6d61 2223
0d62e5e8
DJ
2224 free (*p_sig);
2225 *p_sig = NULL;
2226 }
2227
aa5ca48f
DE
2228 if (the_low_target.prepare_to_resume != NULL)
2229 the_low_target.prepare_to_resume (lwp);
2230
0d62e5e8 2231 regcache_invalidate_one ((struct inferior_list_entry *)
54a0b537 2232 get_lwp_thread (lwp));
da6d8c04 2233 errno = 0;
54a0b537 2234 lwp->stopped = 0;
c3adc08c 2235 lwp->stopped_by_watchpoint = 0;
54a0b537 2236 lwp->stepping = step;
14ce3065
DE
2237 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
2238 /* Coerce to a uintptr_t first to avoid potential gcc warning
2239 of coercing an 8 byte integer to a 4 byte pointer. */
2240 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
0d62e5e8
DJ
2241
2242 current_inferior = saved_inferior;
da6d8c04 2243 if (errno)
3221518c
UW
2244 {
2245 /* ESRCH from ptrace either means that the thread was already
2246 running (an error) or that it is gone (a race condition). If
2247 it's gone, we will get a notification the next time we wait,
2248 so we can ignore the error. We could differentiate these
2249 two, but it's tricky without waiting; the thread still exists
2250 as a zombie, so sending it signal 0 would succeed. So just
2251 ignore ESRCH. */
2252 if (errno == ESRCH)
2253 return;
2254
2255 perror_with_name ("ptrace");
2256 }
da6d8c04
DJ
2257}
2258
2bd7c093
PA
2259struct thread_resume_array
2260{
2261 struct thread_resume *resume;
2262 size_t n;
2263};
64386c31
DJ
2264
2265/* This function is called once per thread. We look up the thread
5544ad89
DJ
2266 in RESUME_PTR, and mark the thread with a pointer to the appropriate
2267 resume request.
2268
2269 This algorithm is O(threads * resume elements), but resume elements
2270 is small (and will remain small at least until GDB supports thread
2271 suspension). */
2bd7c093
PA
2272static int
2273linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
0d62e5e8 2274{
54a0b537 2275 struct lwp_info *lwp;
64386c31 2276 struct thread_info *thread;
5544ad89 2277 int ndx;
2bd7c093 2278 struct thread_resume_array *r;
64386c31
DJ
2279
2280 thread = (struct thread_info *) entry;
54a0b537 2281 lwp = get_thread_lwp (thread);
2bd7c093 2282 r = arg;
64386c31 2283
2bd7c093 2284 for (ndx = 0; ndx < r->n; ndx++)
95954743
PA
2285 {
2286 ptid_t ptid = r->resume[ndx].thread;
2287 if (ptid_equal (ptid, minus_one_ptid)
2288 || ptid_equal (ptid, entry->id)
2289 || (ptid_is_pid (ptid)
2290 && (ptid_get_pid (ptid) == pid_of (lwp)))
2291 || (ptid_get_lwp (ptid) == -1
2292 && (ptid_get_pid (ptid) == pid_of (lwp))))
2293 {
d50171e4
PA
2294 if (r->resume[ndx].kind == resume_stop
2295 && lwp->last_resume_kind == resume_stop)
2296 {
2297 if (debug_threads)
2298 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
2299 thread->last_status.kind == TARGET_WAITKIND_STOPPED
2300 ? "stopped"
2301 : "stopping",
2302 lwpid_of (lwp));
2303
2304 continue;
2305 }
2306
95954743 2307 lwp->resume = &r->resume[ndx];
d50171e4 2308 lwp->last_resume_kind = lwp->resume->kind;
95954743
PA
2309 return 0;
2310 }
2311 }
2bd7c093
PA
2312
2313 /* No resume action for this thread. */
2314 lwp->resume = NULL;
64386c31 2315
2bd7c093 2316 return 0;
5544ad89
DJ
2317}
2318
5544ad89 2319
bd99dc85
PA
2320/* Set *FLAG_P if this lwp has an interesting status pending. */
2321static int
2322resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
5544ad89 2323{
bd99dc85 2324 struct lwp_info *lwp = (struct lwp_info *) entry;
5544ad89 2325
bd99dc85
PA
2326 /* LWPs which will not be resumed are not interesting, because
2327 we might not wait for them next time through linux_wait. */
2bd7c093 2328 if (lwp->resume == NULL)
bd99dc85 2329 return 0;
64386c31 2330
bd99dc85 2331 if (lwp->status_pending_p)
d50171e4
PA
2332 * (int *) flag_p = 1;
2333
2334 return 0;
2335}
2336
2337/* Return 1 if this lwp that GDB wants running is stopped at an
2338 internal breakpoint that we need to step over. It assumes that any
2339 required STOP_PC adjustment has already been propagated to the
2340 inferior's regcache. */
2341
2342static int
2343need_step_over_p (struct inferior_list_entry *entry, void *dummy)
2344{
2345 struct lwp_info *lwp = (struct lwp_info *) entry;
2346 struct thread_info *saved_inferior;
2347 CORE_ADDR pc;
2348
2349 /* LWPs which will not be resumed are not interesting, because we
2350 might not wait for them next time through linux_wait. */
2351
2352 if (!lwp->stopped)
2353 {
2354 if (debug_threads)
2355 fprintf (stderr,
2356 "Need step over [LWP %ld]? Ignoring, not stopped\n",
2357 lwpid_of (lwp));
2358 return 0;
2359 }
2360
2361 if (lwp->last_resume_kind == resume_stop)
2362 {
2363 if (debug_threads)
2364 fprintf (stderr,
2365 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
2366 lwpid_of (lwp));
2367 return 0;
2368 }
2369
2370 if (!lwp->need_step_over)
2371 {
2372 if (debug_threads)
2373 fprintf (stderr,
2374 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
2375 }
5544ad89 2376
bd99dc85 2377 if (lwp->status_pending_p)
d50171e4
PA
2378 {
2379 if (debug_threads)
2380 fprintf (stderr,
2381 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
2382 lwpid_of (lwp));
2383 return 0;
2384 }
2385
2386 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
2387 or we have. */
2388 pc = get_pc (lwp);
2389
2390 /* If the PC has changed since we stopped, then don't do anything,
2391 and let the breakpoint/tracepoint be hit. This happens if, for
2392 instance, GDB handled the decr_pc_after_break subtraction itself,
2393 GDB is OOL stepping this thread, or the user has issued a "jump"
2394 command, or poked thread's registers herself. */
2395 if (pc != lwp->stop_pc)
2396 {
2397 if (debug_threads)
2398 fprintf (stderr,
2399 "Need step over [LWP %ld]? Cancelling, PC was changed. "
2400 "Old stop_pc was 0x%s, PC is now 0x%s\n",
2401 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
2402
2403 lwp->need_step_over = 0;
2404 return 0;
2405 }
2406
2407 saved_inferior = current_inferior;
2408 current_inferior = get_lwp_thread (lwp);
2409
8b07ae33 2410 /* We can only step over breakpoints we know about. */
d50171e4
PA
2411 if (breakpoint_here (pc))
2412 {
8b07ae33
PA
2413 /* Don't step over a breakpoint that GDB expects to hit
2414 though. */
2415 if (gdb_breakpoint_here (pc))
2416 {
2417 if (debug_threads)
2418 fprintf (stderr,
2419 "Need step over [LWP %ld]? yes, but found"
2420 " GDB breakpoint at 0x%s; skipping step over\n",
2421 lwpid_of (lwp), paddress (pc));
d50171e4 2422
8b07ae33
PA
2423 current_inferior = saved_inferior;
2424 return 0;
2425 }
2426 else
2427 {
2428 if (debug_threads)
2429 fprintf (stderr,
2430 "Need step over [LWP %ld]? yes, found breakpoint at 0x%s\n",
2431 lwpid_of (lwp), paddress (pc));
d50171e4 2432
8b07ae33
PA
2433 /* We've found an lwp that needs stepping over --- return 1 so
2434 that find_inferior stops looking. */
2435 current_inferior = saved_inferior;
2436
2437 /* If the step over is cancelled, this is set again. */
2438 lwp->need_step_over = 0;
2439 return 1;
2440 }
d50171e4
PA
2441 }
2442
2443 current_inferior = saved_inferior;
2444
2445 if (debug_threads)
2446 fprintf (stderr,
2447 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
2448 lwpid_of (lwp), paddress (pc));
c6ecbae5 2449
bd99dc85 2450 return 0;
5544ad89
DJ
2451}
2452
d50171e4
PA
2453/* Start a step-over operation on LWP. When LWP stopped at a
2454 breakpoint, to make progress, we need to remove the breakpoint out
2455 of the way. If we let other threads run while we do that, they may
2456 pass by the breakpoint location and miss hitting it. To avoid
2457 that, a step-over momentarily stops all threads while LWP is
2458 single-stepped while the breakpoint is temporarily uninserted from
2459 the inferior. When the single-step finishes, we reinsert the
2460 breakpoint, and let all threads that are supposed to be running,
2461 run again.
2462
2463 On targets that don't support hardware single-step, we don't
2464 currently support full software single-stepping. Instead, we only
2465 support stepping over the thread event breakpoint, by asking the
2466 low target where to place a reinsert breakpoint. Since this
2467 routine assumes the breakpoint being stepped over is a thread event
2468 breakpoint, it usually assumes the return address of the current
2469 function is a good enough place to set the reinsert breakpoint. */
2470
2471static int
2472start_step_over (struct lwp_info *lwp)
2473{
2474 struct thread_info *saved_inferior;
2475 CORE_ADDR pc;
2476 int step;
2477
2478 if (debug_threads)
2479 fprintf (stderr,
2480 "Starting step-over on LWP %ld. Stopping all threads\n",
2481 lwpid_of (lwp));
2482
2483 stop_all_lwps ();
2484
2485 if (debug_threads)
2486 fprintf (stderr, "Done stopping all threads for step-over.\n");
2487
2488 /* Note, we should always reach here with an already adjusted PC,
2489 either by GDB (if we're resuming due to GDB's request), or by our
2490 caller, if we just finished handling an internal breakpoint GDB
2491 shouldn't care about. */
2492 pc = get_pc (lwp);
2493
2494 saved_inferior = current_inferior;
2495 current_inferior = get_lwp_thread (lwp);
2496
2497 lwp->bp_reinsert = pc;
2498 uninsert_breakpoints_at (pc);
2499
2500 if (can_hardware_single_step ())
2501 {
2502 step = 1;
2503 }
2504 else
2505 {
2506 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
2507 set_reinsert_breakpoint (raddr);
2508 step = 0;
2509 }
2510
2511 current_inferior = saved_inferior;
2512
2513 linux_resume_one_lwp (lwp, step, 0, NULL);
2514
2515 /* Require next event from this LWP. */
2516 step_over_bkpt = lwp->head.id;
2517 return 1;
2518}
2519
2520/* Finish a step-over. Reinsert the breakpoint we had uninserted in
2521 start_step_over, if still there, and delete any reinsert
2522 breakpoints we've set, on non hardware single-step targets. */
2523
2524static int
2525finish_step_over (struct lwp_info *lwp)
2526{
2527 if (lwp->bp_reinsert != 0)
2528 {
2529 if (debug_threads)
2530 fprintf (stderr, "Finished step over.\n");
2531
2532 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
2533 may be no breakpoint to reinsert there by now. */
2534 reinsert_breakpoints_at (lwp->bp_reinsert);
2535
2536 lwp->bp_reinsert = 0;
2537
2538 /* Delete any software-single-step reinsert breakpoints. No
2539 longer needed. We don't have to worry about other threads
2540 hitting this trap, and later not being able to explain it,
2541 because we were stepping over a breakpoint, and we hold all
2542 threads but LWP stopped while doing that. */
2543 if (!can_hardware_single_step ())
2544 delete_reinsert_breakpoints ();
2545
2546 step_over_bkpt = null_ptid;
2547 return 1;
2548 }
2549 else
2550 return 0;
2551}
2552
5544ad89
DJ
2553/* This function is called once per thread. We check the thread's resume
2554 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 2555 stopped; and what signal, if any, it should be sent.
5544ad89 2556
bd99dc85
PA
2557 For threads which we aren't explicitly told otherwise, we preserve
2558 the stepping flag; this is used for stepping over gdbserver-placed
2559 breakpoints.
2560
2561 If pending_flags was set in any thread, we queue any needed
2562 signals, since we won't actually resume. We already have a pending
2563 event to report, so we don't need to preserve any step requests;
2564 they should be re-issued if necessary. */
2565
2566static int
2567linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5544ad89 2568{
54a0b537 2569 struct lwp_info *lwp;
5544ad89 2570 struct thread_info *thread;
bd99dc85 2571 int step;
d50171e4
PA
2572 int leave_all_stopped = * (int *) arg;
2573 int leave_pending;
5544ad89
DJ
2574
2575 thread = (struct thread_info *) entry;
54a0b537 2576 lwp = get_thread_lwp (thread);
5544ad89 2577
2bd7c093 2578 if (lwp->resume == NULL)
bd99dc85 2579 return 0;
5544ad89 2580
bd99dc85 2581 if (lwp->resume->kind == resume_stop)
5544ad89 2582 {
bd99dc85 2583 if (debug_threads)
d50171e4 2584 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
bd99dc85
PA
2585
2586 if (!lwp->stopped)
2587 {
2588 if (debug_threads)
d50171e4 2589 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
bd99dc85 2590
d50171e4
PA
2591 /* Stop the thread, and wait for the event asynchronously,
2592 through the event loop. */
bd99dc85
PA
2593 send_sigstop (&lwp->head);
2594 }
2595 else
2596 {
2597 if (debug_threads)
d50171e4
PA
2598 fprintf (stderr, "already stopped LWP %ld\n",
2599 lwpid_of (lwp));
2600
2601 /* The LWP may have been stopped in an internal event that
2602 was not meant to be notified back to GDB (e.g., gdbserver
2603 breakpoint), so we should be reporting a stop event in
2604 this case too. */
2605
2606 /* If the thread already has a pending SIGSTOP, this is a
2607 no-op. Otherwise, something later will presumably resume
2608 the thread and this will cause it to cancel any pending
2609 operation, due to last_resume_kind == resume_stop. If
2610 the thread already has a pending status to report, we
2611 will still report it the next time we wait - see
2612 status_pending_p_callback. */
2613 send_sigstop (&lwp->head);
bd99dc85 2614 }
32ca6d61 2615
bd99dc85
PA
2616 /* For stop requests, we're done. */
2617 lwp->resume = NULL;
d50171e4 2618 get_lwp_thread (lwp)->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 2619 return 0;
5544ad89
DJ
2620 }
2621
bd99dc85
PA
2622 /* If this thread which is about to be resumed has a pending status,
2623 then don't resume any threads - we can just report the pending
2624 status. Make sure to queue any signals that would otherwise be
2625 sent. In all-stop mode, we do this decision based on if *any*
d50171e4
PA
2626 thread has a pending status. If there's a thread that needs the
2627 step-over-breakpoint dance, then don't resume any other thread
2628 but that particular one. */
2629 leave_pending = (lwp->status_pending_p || leave_all_stopped);
5544ad89 2630
d50171e4 2631 if (!leave_pending)
bd99dc85
PA
2632 {
2633 if (debug_threads)
2634 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
5544ad89 2635
d50171e4 2636 step = (lwp->resume->kind == resume_step);
2acc282a 2637 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
d50171e4 2638 get_lwp_thread (lwp)->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85
PA
2639 }
2640 else
2641 {
2642 if (debug_threads)
2643 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
5544ad89 2644
bd99dc85
PA
2645 /* If we have a new signal, enqueue the signal. */
2646 if (lwp->resume->sig != 0)
2647 {
2648 struct pending_signals *p_sig;
2649 p_sig = xmalloc (sizeof (*p_sig));
2650 p_sig->prev = lwp->pending_signals;
2651 p_sig->signal = lwp->resume->sig;
2652 memset (&p_sig->info, 0, sizeof (siginfo_t));
2653
2654 /* If this is the same signal we were previously stopped by,
2655 make sure to queue its siginfo. We can ignore the return
2656 value of ptrace; if it fails, we'll skip
2657 PTRACE_SETSIGINFO. */
2658 if (WIFSTOPPED (lwp->last_status)
2659 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
2660 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
2661
2662 lwp->pending_signals = p_sig;
2663 }
2664 }
5544ad89 2665
bd99dc85 2666 lwp->resume = NULL;
5544ad89 2667 return 0;
0d62e5e8
DJ
2668}
2669
2670static void
2bd7c093 2671linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 2672{
2bd7c093 2673 struct thread_resume_array array = { resume_info, n };
d50171e4
PA
2674 struct lwp_info *need_step_over = NULL;
2675 int any_pending;
2676 int leave_all_stopped;
c6ecbae5 2677
2bd7c093 2678 find_inferior (&all_threads, linux_set_resume_request, &array);
5544ad89 2679
d50171e4
PA
2680 /* If there is a thread which would otherwise be resumed, which has
2681 a pending status, then don't resume any threads - we can just
2682 report the pending status. Make sure to queue any signals that
2683 would otherwise be sent. In non-stop mode, we'll apply this
2684 logic to each thread individually. We consume all pending events
2685 before considering to start a step-over (in all-stop). */
2686 any_pending = 0;
bd99dc85 2687 if (!non_stop)
d50171e4
PA
2688 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
2689
2690 /* If there is a thread which would otherwise be resumed, which is
2691 stopped at a breakpoint that needs stepping over, then don't
2692 resume any threads - have it step over the breakpoint with all
2693 other threads stopped, then resume all threads again. Make sure
2694 to queue any signals that would otherwise be delivered or
2695 queued. */
2696 if (!any_pending && supports_breakpoints ())
2697 need_step_over
2698 = (struct lwp_info *) find_inferior (&all_lwps,
2699 need_step_over_p, NULL);
2700
2701 leave_all_stopped = (need_step_over != NULL || any_pending);
2702
2703 if (debug_threads)
2704 {
2705 if (need_step_over != NULL)
2706 fprintf (stderr, "Not resuming all, need step over\n");
2707 else if (any_pending)
2708 fprintf (stderr,
2709 "Not resuming, all-stop and found "
2710 "an LWP with pending status\n");
2711 else
2712 fprintf (stderr, "Resuming, no pending status or step over needed\n");
2713 }
2714
2715 /* Even if we're leaving threads stopped, queue all signals we'd
2716 otherwise deliver. */
2717 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
2718
2719 if (need_step_over)
2720 start_step_over (need_step_over);
2721}
2722
2723/* This function is called once per thread. We check the thread's
2724 last resume request, which will tell us whether to resume, step, or
2725 leave the thread stopped. Any signal the client requested to be
2726 delivered has already been enqueued at this point.
2727
2728 If any thread that GDB wants running is stopped at an internal
2729 breakpoint that needs stepping over, we start a step-over operation
2730 on that particular thread, and leave all others stopped. */
2731
2732static void
2733proceed_one_lwp (struct inferior_list_entry *entry)
2734{
2735 struct lwp_info *lwp;
2736 int step;
2737
2738 lwp = (struct lwp_info *) entry;
2739
2740 if (debug_threads)
2741 fprintf (stderr,
2742 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
2743
2744 if (!lwp->stopped)
2745 {
2746 if (debug_threads)
2747 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
2748 return;
2749 }
2750
2751 if (lwp->last_resume_kind == resume_stop)
2752 {
2753 if (debug_threads)
2754 fprintf (stderr, " client wants LWP %ld stopped\n", lwpid_of (lwp));
2755 return;
2756 }
2757
2758 if (lwp->status_pending_p)
2759 {
2760 if (debug_threads)
2761 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
2762 lwpid_of (lwp));
2763 return;
2764 }
2765
2766 if (lwp->suspended)
2767 {
2768 if (debug_threads)
2769 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
2770 return;
2771 }
2772
2773 step = lwp->last_resume_kind == resume_step;
2774 linux_resume_one_lwp (lwp, step, 0, NULL);
2775}
2776
2777/* When we finish a step-over, set threads running again. If there's
2778 another thread that may need a step-over, now's the time to start
2779 it. Eventually, we'll move all threads past their breakpoints. */
2780
2781static void
2782proceed_all_lwps (void)
2783{
2784 struct lwp_info *need_step_over;
2785
2786 /* If there is a thread which would otherwise be resumed, which is
2787 stopped at a breakpoint that needs stepping over, then don't
2788 resume any threads - have it step over the breakpoint with all
2789 other threads stopped, then resume all threads again. */
2790
2791 if (supports_breakpoints ())
2792 {
2793 need_step_over
2794 = (struct lwp_info *) find_inferior (&all_lwps,
2795 need_step_over_p, NULL);
2796
2797 if (need_step_over != NULL)
2798 {
2799 if (debug_threads)
2800 fprintf (stderr, "proceed_all_lwps: found "
2801 "thread %ld needing a step-over\n",
2802 lwpid_of (need_step_over));
2803
2804 start_step_over (need_step_over);
2805 return;
2806 }
2807 }
5544ad89 2808
d50171e4
PA
2809 if (debug_threads)
2810 fprintf (stderr, "Proceeding, no step-over needed\n");
2811
2812 for_each_inferior (&all_lwps, proceed_one_lwp);
2813}
2814
2815/* Stopped LWPs that the client wanted to be running, that don't have
2816 pending statuses, are set to run again, except for EXCEPT, if not
2817 NULL. This undoes a stop_all_lwps call. */
2818
2819static void
2820unstop_all_lwps (struct lwp_info *except)
2821{
5544ad89
DJ
2822 if (debug_threads)
2823 {
d50171e4
PA
2824 if (except)
2825 fprintf (stderr,
2826 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
5544ad89 2827 else
d50171e4
PA
2828 fprintf (stderr,
2829 "unstopping all lwps\n");
5544ad89
DJ
2830 }
2831
d50171e4
PA
2832 /* Make sure proceed_one_lwp doesn't try to resume this thread. */
2833 if (except != NULL)
2834 ++except->suspended;
2835
2836 for_each_inferior (&all_lwps, proceed_one_lwp);
2837
2838 if (except != NULL)
2839 --except->suspended;
0d62e5e8
DJ
2840}
2841
2842#ifdef HAVE_LINUX_USRREGS
da6d8c04
DJ
2843
2844int
0a30fbc4 2845register_addr (int regnum)
da6d8c04
DJ
2846{
2847 int addr;
2848
2ec06d2e 2849 if (regnum < 0 || regnum >= the_low_target.num_regs)
da6d8c04
DJ
2850 error ("Invalid register number %d.", regnum);
2851
2ec06d2e 2852 addr = the_low_target.regmap[regnum];
da6d8c04
DJ
2853
2854 return addr;
2855}
2856
58caa3dc 2857/* Fetch one register. */
da6d8c04 2858static void
442ea881 2859fetch_register (struct regcache *regcache, int regno)
da6d8c04
DJ
2860{
2861 CORE_ADDR regaddr;
48d93c75 2862 int i, size;
0d62e5e8 2863 char *buf;
95954743 2864 int pid;
da6d8c04 2865
2ec06d2e 2866 if (regno >= the_low_target.num_regs)
0a30fbc4 2867 return;
2ec06d2e 2868 if ((*the_low_target.cannot_fetch_register) (regno))
0a30fbc4 2869 return;
da6d8c04 2870
0a30fbc4
DJ
2871 regaddr = register_addr (regno);
2872 if (regaddr == -1)
2873 return;
95954743
PA
2874
2875 pid = lwpid_of (get_thread_lwp (current_inferior));
1b3f6016
PA
2876 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2877 & - sizeof (PTRACE_XFER_TYPE));
48d93c75
UW
2878 buf = alloca (size);
2879 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
da6d8c04
DJ
2880 {
2881 errno = 0;
0d62e5e8 2882 *(PTRACE_XFER_TYPE *) (buf + i) =
14ce3065
DE
2883 ptrace (PTRACE_PEEKUSER, pid,
2884 /* Coerce to a uintptr_t first to avoid potential gcc warning
2885 of coercing an 8 byte integer to a 4 byte pointer. */
2886 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
da6d8c04
DJ
2887 regaddr += sizeof (PTRACE_XFER_TYPE);
2888 if (errno != 0)
f52cd8cd 2889 error ("reading register %d: %s", regno, strerror (errno));
da6d8c04 2890 }
ee1a7ae4
UW
2891
2892 if (the_low_target.supply_ptrace_register)
442ea881 2893 the_low_target.supply_ptrace_register (regcache, regno, buf);
5a1f5858 2894 else
442ea881 2895 supply_register (regcache, regno, buf);
da6d8c04
DJ
2896}
2897
2898/* Fetch all registers, or just one, from the child process. */
58caa3dc 2899static void
442ea881 2900usr_fetch_inferior_registers (struct regcache *regcache, int regno)
da6d8c04 2901{
4463ce24 2902 if (regno == -1)
2ec06d2e 2903 for (regno = 0; regno < the_low_target.num_regs; regno++)
442ea881 2904 fetch_register (regcache, regno);
da6d8c04 2905 else
442ea881 2906 fetch_register (regcache, regno);
da6d8c04
DJ
2907}
2908
2909/* Store our register values back into the inferior.
2910 If REGNO is -1, do this for all registers.
2911 Otherwise, REGNO specifies which register (so we can save time). */
58caa3dc 2912static void
442ea881 2913usr_store_inferior_registers (struct regcache *regcache, int regno)
da6d8c04
DJ
2914{
2915 CORE_ADDR regaddr;
48d93c75 2916 int i, size;
0d62e5e8 2917 char *buf;
55ac2b99 2918 int pid;
da6d8c04
DJ
2919
2920 if (regno >= 0)
2921 {
2ec06d2e 2922 if (regno >= the_low_target.num_regs)
0a30fbc4
DJ
2923 return;
2924
bc1e36ca 2925 if ((*the_low_target.cannot_store_register) (regno) == 1)
0a30fbc4
DJ
2926 return;
2927
2928 regaddr = register_addr (regno);
2929 if (regaddr == -1)
da6d8c04 2930 return;
da6d8c04 2931 errno = 0;
48d93c75
UW
2932 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2933 & - sizeof (PTRACE_XFER_TYPE);
2934 buf = alloca (size);
2935 memset (buf, 0, size);
ee1a7ae4
UW
2936
2937 if (the_low_target.collect_ptrace_register)
442ea881 2938 the_low_target.collect_ptrace_register (regcache, regno, buf);
5a1f5858 2939 else
442ea881 2940 collect_register (regcache, regno, buf);
ee1a7ae4 2941
95954743 2942 pid = lwpid_of (get_thread_lwp (current_inferior));
48d93c75 2943 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
da6d8c04 2944 {
0a30fbc4 2945 errno = 0;
14ce3065
DE
2946 ptrace (PTRACE_POKEUSER, pid,
2947 /* Coerce to a uintptr_t first to avoid potential gcc warning
2948 about coercing an 8 byte integer to a 4 byte pointer. */
2949 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
2950 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
da6d8c04
DJ
2951 if (errno != 0)
2952 {
1b3f6016
PA
2953 /* At this point, ESRCH should mean the process is
2954 already gone, in which case we simply ignore attempts
2955 to change its registers. See also the related
2956 comment in linux_resume_one_lwp. */
3221518c
UW
2957 if (errno == ESRCH)
2958 return;
2959
bc1e36ca 2960 if ((*the_low_target.cannot_store_register) (regno) == 0)
f52cd8cd 2961 error ("writing register %d: %s", regno, strerror (errno));
da6d8c04 2962 }
2ff29de4 2963 regaddr += sizeof (PTRACE_XFER_TYPE);
da6d8c04 2964 }
da6d8c04
DJ
2965 }
2966 else
2ec06d2e 2967 for (regno = 0; regno < the_low_target.num_regs; regno++)
442ea881 2968 usr_store_inferior_registers (regcache, regno);
da6d8c04 2969}
58caa3dc
DJ
2970#endif /* HAVE_LINUX_USRREGS */
2971
2972
2973
2974#ifdef HAVE_LINUX_REGSETS
2975
2976static int
442ea881 2977regsets_fetch_inferior_registers (struct regcache *regcache)
58caa3dc
DJ
2978{
2979 struct regset_info *regset;
e9d25b98 2980 int saw_general_regs = 0;
95954743 2981 int pid;
58caa3dc
DJ
2982
2983 regset = target_regsets;
2984
95954743 2985 pid = lwpid_of (get_thread_lwp (current_inferior));
58caa3dc
DJ
2986 while (regset->size >= 0)
2987 {
2988 void *buf;
2989 int res;
2990
52fa2412 2991 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
58caa3dc
DJ
2992 {
2993 regset ++;
2994 continue;
2995 }
2996
bca929d3 2997 buf = xmalloc (regset->size);
dfb64f85 2998#ifndef __sparc__
95954743 2999 res = ptrace (regset->get_request, pid, 0, buf);
dfb64f85 3000#else
95954743 3001 res = ptrace (regset->get_request, pid, buf, 0);
dfb64f85 3002#endif
58caa3dc
DJ
3003 if (res < 0)
3004 {
3005 if (errno == EIO)
3006 {
52fa2412
UW
3007 /* If we get EIO on a regset, do not try it again for
3008 this process. */
3009 disabled_regsets[regset - target_regsets] = 1;
fdeb2a12 3010 free (buf);
52fa2412 3011 continue;
58caa3dc
DJ
3012 }
3013 else
3014 {
0d62e5e8 3015 char s[256];
95954743
PA
3016 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3017 pid);
0d62e5e8 3018 perror (s);
58caa3dc
DJ
3019 }
3020 }
e9d25b98
DJ
3021 else if (regset->type == GENERAL_REGS)
3022 saw_general_regs = 1;
442ea881 3023 regset->store_function (regcache, buf);
58caa3dc 3024 regset ++;
fdeb2a12 3025 free (buf);
58caa3dc 3026 }
e9d25b98
DJ
3027 if (saw_general_regs)
3028 return 0;
3029 else
3030 return 1;
58caa3dc
DJ
3031}
3032
3033static int
442ea881 3034regsets_store_inferior_registers (struct regcache *regcache)
58caa3dc
DJ
3035{
3036 struct regset_info *regset;
e9d25b98 3037 int saw_general_regs = 0;
95954743 3038 int pid;
58caa3dc
DJ
3039
3040 regset = target_regsets;
3041
95954743 3042 pid = lwpid_of (get_thread_lwp (current_inferior));
58caa3dc
DJ
3043 while (regset->size >= 0)
3044 {
3045 void *buf;
3046 int res;
3047
52fa2412 3048 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
58caa3dc
DJ
3049 {
3050 regset ++;
3051 continue;
3052 }
3053
bca929d3 3054 buf = xmalloc (regset->size);
545587ee
DJ
3055
3056 /* First fill the buffer with the current register set contents,
3057 in case there are any items in the kernel's regset that are
3058 not in gdbserver's regcache. */
dfb64f85 3059#ifndef __sparc__
95954743 3060 res = ptrace (regset->get_request, pid, 0, buf);
dfb64f85 3061#else
95954743 3062 res = ptrace (regset->get_request, pid, buf, 0);
dfb64f85 3063#endif
545587ee
DJ
3064
3065 if (res == 0)
3066 {
3067 /* Then overlay our cached registers on that. */
442ea881 3068 regset->fill_function (regcache, buf);
545587ee
DJ
3069
3070 /* Only now do we write the register set. */
dfb64f85 3071#ifndef __sparc__
95954743 3072 res = ptrace (regset->set_request, pid, 0, buf);
dfb64f85 3073#else
95954743 3074 res = ptrace (regset->set_request, pid, buf, 0);
dfb64f85 3075#endif
545587ee
DJ
3076 }
3077
58caa3dc
DJ
3078 if (res < 0)
3079 {
3080 if (errno == EIO)
3081 {
52fa2412
UW
3082 /* If we get EIO on a regset, do not try it again for
3083 this process. */
3084 disabled_regsets[regset - target_regsets] = 1;
fdeb2a12 3085 free (buf);
52fa2412 3086 continue;
58caa3dc 3087 }
3221518c
UW
3088 else if (errno == ESRCH)
3089 {
1b3f6016
PA
3090 /* At this point, ESRCH should mean the process is
3091 already gone, in which case we simply ignore attempts
3092 to change its registers. See also the related
3093 comment in linux_resume_one_lwp. */
fdeb2a12 3094 free (buf);
3221518c
UW
3095 return 0;
3096 }
58caa3dc
DJ
3097 else
3098 {
ce3a066d 3099 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
3100 }
3101 }
e9d25b98
DJ
3102 else if (regset->type == GENERAL_REGS)
3103 saw_general_regs = 1;
58caa3dc 3104 regset ++;
09ec9b38 3105 free (buf);
58caa3dc 3106 }
e9d25b98
DJ
3107 if (saw_general_regs)
3108 return 0;
3109 else
3110 return 1;
ce3a066d 3111 return 0;
58caa3dc
DJ
3112}
3113
3114#endif /* HAVE_LINUX_REGSETS */
3115
3116
3117void
442ea881 3118linux_fetch_registers (struct regcache *regcache, int regno)
58caa3dc
DJ
3119{
3120#ifdef HAVE_LINUX_REGSETS
442ea881 3121 if (regsets_fetch_inferior_registers (regcache) == 0)
52fa2412 3122 return;
58caa3dc
DJ
3123#endif
3124#ifdef HAVE_LINUX_USRREGS
442ea881 3125 usr_fetch_inferior_registers (regcache, regno);
58caa3dc
DJ
3126#endif
3127}
3128
3129void
442ea881 3130linux_store_registers (struct regcache *regcache, int regno)
58caa3dc
DJ
3131{
3132#ifdef HAVE_LINUX_REGSETS
442ea881 3133 if (regsets_store_inferior_registers (regcache) == 0)
52fa2412 3134 return;
58caa3dc
DJ
3135#endif
3136#ifdef HAVE_LINUX_USRREGS
442ea881 3137 usr_store_inferior_registers (regcache, regno);
58caa3dc
DJ
3138#endif
3139}
3140
da6d8c04 3141
da6d8c04
DJ
3142/* Copy LEN bytes from inferior's memory starting at MEMADDR
3143 to debugger memory starting at MYADDR. */
3144
c3e735a6 3145static int
f450004a 3146linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04
DJ
3147{
3148 register int i;
3149 /* Round starting address down to longword boundary. */
3150 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3151 /* Round ending address up; get number of longwords that makes. */
aa691b87
RM
3152 register int count
3153 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
da6d8c04
DJ
3154 / sizeof (PTRACE_XFER_TYPE);
3155 /* Allocate buffer of that many longwords. */
aa691b87 3156 register PTRACE_XFER_TYPE *buffer
da6d8c04 3157 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
fd462a61
DJ
3158 int fd;
3159 char filename[64];
95954743 3160 int pid = lwpid_of (get_thread_lwp (current_inferior));
fd462a61
DJ
3161
3162 /* Try using /proc. Don't bother for one word. */
3163 if (len >= 3 * sizeof (long))
3164 {
3165 /* We could keep this file open and cache it - possibly one per
3166 thread. That requires some juggling, but is even faster. */
95954743 3167 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
3168 fd = open (filename, O_RDONLY | O_LARGEFILE);
3169 if (fd == -1)
3170 goto no_proc;
3171
3172 /* If pread64 is available, use it. It's faster if the kernel
3173 supports it (only one syscall), and it's 64-bit safe even on
3174 32-bit platforms (for instance, SPARC debugging a SPARC64
3175 application). */
3176#ifdef HAVE_PREAD64
3177 if (pread64 (fd, myaddr, len, memaddr) != len)
3178#else
1de1badb 3179 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
fd462a61
DJ
3180#endif
3181 {
3182 close (fd);
3183 goto no_proc;
3184 }
3185
3186 close (fd);
3187 return 0;
3188 }
da6d8c04 3189
fd462a61 3190 no_proc:
da6d8c04
DJ
3191 /* Read all the longwords */
3192 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3193 {
c3e735a6 3194 errno = 0;
14ce3065
DE
3195 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3196 about coercing an 8 byte integer to a 4 byte pointer. */
3197 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
3198 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
c3e735a6
DJ
3199 if (errno)
3200 return errno;
da6d8c04
DJ
3201 }
3202
3203 /* Copy appropriate bytes out of the buffer. */
1b3f6016
PA
3204 memcpy (myaddr,
3205 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
3206 len);
c3e735a6
DJ
3207
3208 return 0;
da6d8c04
DJ
3209}
3210
93ae6fdc
PA
3211/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
3212 memory at MEMADDR. On failure (cannot write to the inferior)
da6d8c04
DJ
3213 returns the value of errno. */
3214
ce3a066d 3215static int
f450004a 3216linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04
DJ
3217{
3218 register int i;
3219 /* Round starting address down to longword boundary. */
3220 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3221 /* Round ending address up; get number of longwords that makes. */
3222 register int count
3223 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
3224 /* Allocate buffer of that many longwords. */
3225 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
95954743 3226 int pid = lwpid_of (get_thread_lwp (current_inferior));
da6d8c04 3227
0d62e5e8
DJ
3228 if (debug_threads)
3229 {
58d6951d
DJ
3230 /* Dump up to four bytes. */
3231 unsigned int val = * (unsigned int *) myaddr;
3232 if (len == 1)
3233 val = val & 0xff;
3234 else if (len == 2)
3235 val = val & 0xffff;
3236 else if (len == 3)
3237 val = val & 0xffffff;
3238 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
3239 val, (long)memaddr);
0d62e5e8
DJ
3240 }
3241
da6d8c04
DJ
3242 /* Fill start and end extra bytes of buffer with existing memory data. */
3243
93ae6fdc 3244 errno = 0;
14ce3065
DE
3245 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3246 about coercing an 8 byte integer to a 4 byte pointer. */
3247 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
3248 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
93ae6fdc
PA
3249 if (errno)
3250 return errno;
da6d8c04
DJ
3251
3252 if (count > 1)
3253 {
93ae6fdc 3254 errno = 0;
da6d8c04 3255 buffer[count - 1]
95954743 3256 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
3257 /* Coerce to a uintptr_t first to avoid potential gcc warning
3258 about coercing an 8 byte integer to a 4 byte pointer. */
3259 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
3260 * sizeof (PTRACE_XFER_TYPE)),
d844cde6 3261 0);
93ae6fdc
PA
3262 if (errno)
3263 return errno;
da6d8c04
DJ
3264 }
3265
93ae6fdc 3266 /* Copy data to be written over corresponding part of buffer. */
da6d8c04
DJ
3267
3268 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
3269
3270 /* Write the entire buffer. */
3271
3272 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3273 {
3274 errno = 0;
14ce3065
DE
3275 ptrace (PTRACE_POKETEXT, pid,
3276 /* Coerce to a uintptr_t first to avoid potential gcc warning
3277 about coercing an 8 byte integer to a 4 byte pointer. */
3278 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
3279 (PTRACE_ARG4_TYPE) buffer[i]);
da6d8c04
DJ
3280 if (errno)
3281 return errno;
3282 }
3283
3284 return 0;
3285}
2f2893d9 3286
6076632b 3287/* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
24a09b5f
DJ
3288static int linux_supports_tracefork_flag;
3289
51c2684e 3290/* Helper functions for linux_test_for_tracefork, called via clone (). */
24a09b5f 3291
51c2684e
DJ
3292static int
3293linux_tracefork_grandchild (void *arg)
3294{
3295 _exit (0);
3296}
3297
7407e2de
AS
3298#define STACK_SIZE 4096
3299
51c2684e
DJ
3300static int
3301linux_tracefork_child (void *arg)
24a09b5f
DJ
3302{
3303 ptrace (PTRACE_TRACEME, 0, 0, 0);
3304 kill (getpid (), SIGSTOP);
e4b7f41c
JK
3305
3306#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3307
3308 if (fork () == 0)
3309 linux_tracefork_grandchild (NULL);
3310
3311#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3312
7407e2de
AS
3313#ifdef __ia64__
3314 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
3315 CLONE_VM | SIGCHLD, NULL);
3316#else
3317 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
3318 CLONE_VM | SIGCHLD, NULL);
3319#endif
e4b7f41c
JK
3320
3321#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3322
24a09b5f
DJ
3323 _exit (0);
3324}
3325
24a09b5f
DJ
3326/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
3327 sure that we can enable the option, and that it had the desired
3328 effect. */
3329
3330static void
3331linux_test_for_tracefork (void)
3332{
3333 int child_pid, ret, status;
3334 long second_pid;
e4b7f41c 3335#if defined(__UCLIBC__) && defined(HAS_NOMMU)
bca929d3 3336 char *stack = xmalloc (STACK_SIZE * 4);
e4b7f41c 3337#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
24a09b5f
DJ
3338
3339 linux_supports_tracefork_flag = 0;
3340
e4b7f41c
JK
3341#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3342
3343 child_pid = fork ();
3344 if (child_pid == 0)
3345 linux_tracefork_child (NULL);
3346
3347#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3348
51c2684e 3349 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
7407e2de
AS
3350#ifdef __ia64__
3351 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
3352 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
e4b7f41c 3353#else /* !__ia64__ */
7407e2de
AS
3354 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
3355 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
e4b7f41c
JK
3356#endif /* !__ia64__ */
3357
3358#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3359
24a09b5f 3360 if (child_pid == -1)
51c2684e 3361 perror_with_name ("clone");
24a09b5f
DJ
3362
3363 ret = my_waitpid (child_pid, &status, 0);
3364 if (ret == -1)
3365 perror_with_name ("waitpid");
3366 else if (ret != child_pid)
3367 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
3368 if (! WIFSTOPPED (status))
3369 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
3370
14ce3065
DE
3371 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
3372 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
24a09b5f
DJ
3373 if (ret != 0)
3374 {
3375 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3376 if (ret != 0)
3377 {
3378 warning ("linux_test_for_tracefork: failed to kill child");
3379 return;
3380 }
3381
3382 ret = my_waitpid (child_pid, &status, 0);
3383 if (ret != child_pid)
3384 warning ("linux_test_for_tracefork: failed to wait for killed child");
3385 else if (!WIFSIGNALED (status))
3386 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
3387 "killed child", status);
3388
3389 return;
3390 }
3391
3392 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
3393 if (ret != 0)
3394 warning ("linux_test_for_tracefork: failed to resume child");
3395
3396 ret = my_waitpid (child_pid, &status, 0);
3397
3398 if (ret == child_pid && WIFSTOPPED (status)
3399 && status >> 16 == PTRACE_EVENT_FORK)
3400 {
3401 second_pid = 0;
3402 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
3403 if (ret == 0 && second_pid != 0)
3404 {
3405 int second_status;
3406
3407 linux_supports_tracefork_flag = 1;
3408 my_waitpid (second_pid, &second_status, 0);
3409 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
3410 if (ret != 0)
3411 warning ("linux_test_for_tracefork: failed to kill second child");
3412 my_waitpid (second_pid, &status, 0);
3413 }
3414 }
3415 else
3416 warning ("linux_test_for_tracefork: unexpected result from waitpid "
3417 "(%d, status 0x%x)", ret, status);
3418
3419 do
3420 {
3421 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3422 if (ret != 0)
3423 warning ("linux_test_for_tracefork: failed to kill child");
3424 my_waitpid (child_pid, &status, 0);
3425 }
3426 while (WIFSTOPPED (status));
51c2684e 3427
e4b7f41c 3428#if defined(__UCLIBC__) && defined(HAS_NOMMU)
51c2684e 3429 free (stack);
e4b7f41c 3430#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
24a09b5f
DJ
3431}
3432
3433
2f2893d9
DJ
3434static void
3435linux_look_up_symbols (void)
3436{
0d62e5e8 3437#ifdef USE_THREAD_DB
95954743
PA
3438 struct process_info *proc = current_process ();
3439
cdbfd419 3440 if (proc->private->thread_db != NULL)
0d62e5e8
DJ
3441 return;
3442
6076632b
DE
3443 /* If the kernel supports tracing forks then it also supports tracing
3444 clones, and then we don't need to use the magic thread event breakpoint
3445 to learn about threads. */
cdbfd419 3446 thread_db_init (!linux_supports_tracefork_flag);
0d62e5e8
DJ
3447#endif
3448}
3449
e5379b03 3450static void
ef57601b 3451linux_request_interrupt (void)
e5379b03 3452{
a1928bad 3453 extern unsigned long signal_pid;
e5379b03 3454
95954743
PA
3455 if (!ptid_equal (cont_thread, null_ptid)
3456 && !ptid_equal (cont_thread, minus_one_ptid))
e5379b03 3457 {
54a0b537 3458 struct lwp_info *lwp;
bd99dc85 3459 int lwpid;
e5379b03 3460
54a0b537 3461 lwp = get_thread_lwp (current_inferior);
bd99dc85
PA
3462 lwpid = lwpid_of (lwp);
3463 kill_lwp (lwpid, SIGINT);
e5379b03
DJ
3464 }
3465 else
ef57601b 3466 kill_lwp (signal_pid, SIGINT);
e5379b03
DJ
3467}
3468
aa691b87
RM
3469/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
3470 to debugger memory starting at MYADDR. */
3471
3472static int
f450004a 3473linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
3474{
3475 char filename[PATH_MAX];
3476 int fd, n;
95954743 3477 int pid = lwpid_of (get_thread_lwp (current_inferior));
aa691b87 3478
95954743 3479 snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
3480
3481 fd = open (filename, O_RDONLY);
3482 if (fd < 0)
3483 return -1;
3484
3485 if (offset != (CORE_ADDR) 0
3486 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
3487 n = -1;
3488 else
3489 n = read (fd, myaddr, len);
3490
3491 close (fd);
3492
3493 return n;
3494}
3495
d993e290
PA
3496/* These breakpoint and watchpoint related wrapper functions simply
3497 pass on the function call if the target has registered a
3498 corresponding function. */
e013ee27
OF
3499
3500static int
d993e290 3501linux_insert_point (char type, CORE_ADDR addr, int len)
e013ee27 3502{
d993e290
PA
3503 if (the_low_target.insert_point != NULL)
3504 return the_low_target.insert_point (type, addr, len);
e013ee27
OF
3505 else
3506 /* Unsupported (see target.h). */
3507 return 1;
3508}
3509
3510static int
d993e290 3511linux_remove_point (char type, CORE_ADDR addr, int len)
e013ee27 3512{
d993e290
PA
3513 if (the_low_target.remove_point != NULL)
3514 return the_low_target.remove_point (type, addr, len);
e013ee27
OF
3515 else
3516 /* Unsupported (see target.h). */
3517 return 1;
3518}
3519
3520static int
3521linux_stopped_by_watchpoint (void)
3522{
c3adc08c
PA
3523 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3524
3525 return lwp->stopped_by_watchpoint;
e013ee27
OF
3526}
3527
3528static CORE_ADDR
3529linux_stopped_data_address (void)
3530{
c3adc08c
PA
3531 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3532
3533 return lwp->stopped_data_address;
e013ee27
OF
3534}
3535
42c81e2a 3536#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
3537#if defined(__mcoldfire__)
3538/* These should really be defined in the kernel's ptrace.h header. */
3539#define PT_TEXT_ADDR 49*4
3540#define PT_DATA_ADDR 50*4
3541#define PT_TEXT_END_ADDR 51*4
3542#endif
3543
3544/* Under uClinux, programs are loaded at non-zero offsets, which we need
3545 to tell gdb about. */
3546
3547static int
3548linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
3549{
3550#if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
3551 unsigned long text, text_end, data;
bd99dc85 3552 int pid = lwpid_of (get_thread_lwp (current_inferior));
52fb6437
NS
3553
3554 errno = 0;
3555
3556 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
3557 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
3558 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
3559
3560 if (errno == 0)
3561 {
3562 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
3563 used by gdb) are relative to the beginning of the program,
3564 with the data segment immediately following the text segment.
3565 However, the actual runtime layout in memory may put the data
3566 somewhere else, so when we send gdb a data base-address, we
3567 use the real data base address and subtract the compile-time
3568 data base-address from it (which is just the length of the
3569 text segment). BSS immediately follows data in both
3570 cases. */
52fb6437
NS
3571 *text_p = text;
3572 *data_p = data - (text_end - text);
1b3f6016 3573
52fb6437
NS
3574 return 1;
3575 }
3576#endif
3577 return 0;
3578}
3579#endif
3580
dc146f7c
VP
3581static int
3582compare_ints (const void *xa, const void *xb)
3583{
3584 int a = *(const int *)xa;
3585 int b = *(const int *)xb;
3586
3587 return a - b;
3588}
3589
3590static int *
3591unique (int *b, int *e)
3592{
3593 int *d = b;
3594 while (++b != e)
3595 if (*d != *b)
3596 *++d = *b;
3597 return ++d;
3598}
3599
3600/* Given PID, iterates over all threads in that process.
3601
3602 Information about each thread, in a format suitable for qXfer:osdata:thread
3603 is printed to BUFFER, if it's not NULL. BUFFER is assumed to be already
3604 initialized, and the caller is responsible for finishing and appending '\0'
3605 to it.
3606
3607 The list of cores that threads are running on is assigned to *CORES, if it
3608 is not NULL. If no cores are found, *CORES will be set to NULL. Caller
3609 should free *CORES. */
3610
3611static void
3612list_threads (int pid, struct buffer *buffer, char **cores)
3613{
3614 int count = 0;
3615 int allocated = 10;
3616 int *core_numbers = xmalloc (sizeof (int) * allocated);
3617 char pathname[128];
3618 DIR *dir;
3619 struct dirent *dp;
3620 struct stat statbuf;
3621
3622 sprintf (pathname, "/proc/%d/task", pid);
3623 if (stat (pathname, &statbuf) == 0 && S_ISDIR (statbuf.st_mode))
3624 {
3625 dir = opendir (pathname);
3626 if (!dir)
3627 {
3628 free (core_numbers);
3629 return;
3630 }
3631
3632 while ((dp = readdir (dir)) != NULL)
3633 {
3634 unsigned long lwp = strtoul (dp->d_name, NULL, 10);
3635
3636 if (lwp != 0)
3637 {
3638 unsigned core = linux_core_of_thread (ptid_build (pid, lwp, 0));
3639
3640 if (core != -1)
3641 {
3642 char s[sizeof ("4294967295")];
3643 sprintf (s, "%u", core);
3644
3645 if (count == allocated)
3646 {
3647 allocated *= 2;
3648 core_numbers = realloc (core_numbers,
3649 sizeof (int) * allocated);
3650 }
3651 core_numbers[count++] = core;
3652 if (buffer)
3653 buffer_xml_printf (buffer,
3654 "<item>"
3655 "<column name=\"pid\">%d</column>"
3656 "<column name=\"tid\">%s</column>"
3657 "<column name=\"core\">%s</column>"
3658 "</item>", pid, dp->d_name, s);
3659 }
3660 else
3661 {
3662 if (buffer)
3663 buffer_xml_printf (buffer,
3664 "<item>"
3665 "<column name=\"pid\">%d</column>"
3666 "<column name=\"tid\">%s</column>"
3667 "</item>", pid, dp->d_name);
3668 }
3669 }
3670 }
3671 }
3672
3673 if (cores)
3674 {
3675 *cores = NULL;
3676 if (count > 0)
3677 {
3678 struct buffer buffer2;
3679 int *b;
3680 int *e;
3681 qsort (core_numbers, count, sizeof (int), compare_ints);
3682
3683 /* Remove duplicates. */
3684 b = core_numbers;
3685 e = unique (b, core_numbers + count);
3686
3687 buffer_init (&buffer2);
3688
3689 for (b = core_numbers; b != e; ++b)
3690 {
3691 char number[sizeof ("4294967295")];
3692 sprintf (number, "%u", *b);
3693 buffer_xml_printf (&buffer2, "%s%s",
3694 (b == core_numbers) ? "" : ",", number);
3695 }
3696 buffer_grow_str0 (&buffer2, "");
3697
3698 *cores = buffer_finish (&buffer2);
3699 }
3700 }
3701 free (core_numbers);
3702}
3703
3704static void
3705show_process (int pid, const char *username, struct buffer *buffer)
3706{
3707 char pathname[128];
3708 FILE *f;
3709 char cmd[MAXPATHLEN + 1];
3710
3711 sprintf (pathname, "/proc/%d/cmdline", pid);
3712
3713 if ((f = fopen (pathname, "r")) != NULL)
3714 {
3715 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
3716 if (len > 0)
3717 {
3718 char *cores = 0;
3719 int i;
3720 for (i = 0; i < len; i++)
3721 if (cmd[i] == '\0')
3722 cmd[i] = ' ';
3723 cmd[len] = '\0';
3724
3725 buffer_xml_printf (buffer,
3726 "<item>"
3727 "<column name=\"pid\">%d</column>"
3728 "<column name=\"user\">%s</column>"
3729 "<column name=\"command\">%s</column>",
3730 pid,
3731 username,
3732 cmd);
3733
3734 /* This only collects core numbers, and does not print threads. */
3735 list_threads (pid, NULL, &cores);
3736
3737 if (cores)
3738 {
3739 buffer_xml_printf (buffer,
3740 "<column name=\"cores\">%s</column>", cores);
3741 free (cores);
3742 }
3743
3744 buffer_xml_printf (buffer, "</item>");
3745 }
3746 fclose (f);
3747 }
3748}
3749
07e059b5
VP
3750static int
3751linux_qxfer_osdata (const char *annex,
1b3f6016
PA
3752 unsigned char *readbuf, unsigned const char *writebuf,
3753 CORE_ADDR offset, int len)
07e059b5
VP
3754{
3755 /* We make the process list snapshot when the object starts to be
3756 read. */
3757 static const char *buf;
3758 static long len_avail = -1;
3759 static struct buffer buffer;
dc146f7c
VP
3760 int processes = 0;
3761 int threads = 0;
07e059b5
VP
3762
3763 DIR *dirp;
3764
dc146f7c
VP
3765 if (strcmp (annex, "processes") == 0)
3766 processes = 1;
3767 else if (strcmp (annex, "threads") == 0)
3768 threads = 1;
3769 else
07e059b5
VP
3770 return 0;
3771
3772 if (!readbuf || writebuf)
3773 return 0;
3774
3775 if (offset == 0)
3776 {
3777 if (len_avail != -1 && len_avail != 0)
3778 buffer_free (&buffer);
3779 len_avail = 0;
3780 buf = NULL;
3781 buffer_init (&buffer);
dc146f7c
VP
3782 if (processes)
3783 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
3784 else if (threads)
3785 buffer_grow_str (&buffer, "<osdata type=\"threads\">");
07e059b5
VP
3786
3787 dirp = opendir ("/proc");
3788 if (dirp)
3789 {
1b3f6016
PA
3790 struct dirent *dp;
3791 while ((dp = readdir (dirp)) != NULL)
3792 {
3793 struct stat statbuf;
3794 char procentry[sizeof ("/proc/4294967295")];
3795
3796 if (!isdigit (dp->d_name[0])
3797 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
3798 continue;
3799
3800 sprintf (procentry, "/proc/%s", dp->d_name);
3801 if (stat (procentry, &statbuf) == 0
3802 && S_ISDIR (statbuf.st_mode))
3803 {
dc146f7c 3804 int pid = (int) strtoul (dp->d_name, NULL, 10);
1b3f6016 3805
dc146f7c 3806 if (processes)
1b3f6016 3807 {
dc146f7c
VP
3808 struct passwd *entry = getpwuid (statbuf.st_uid);
3809 show_process (pid, entry ? entry->pw_name : "?", &buffer);
3810 }
3811 else if (threads)
3812 {
3813 list_threads (pid, &buffer, NULL);
1b3f6016
PA
3814 }
3815 }
3816 }
07e059b5 3817
1b3f6016 3818 closedir (dirp);
07e059b5
VP
3819 }
3820 buffer_grow_str0 (&buffer, "</osdata>\n");
3821 buf = buffer_finish (&buffer);
3822 len_avail = strlen (buf);
3823 }
3824
3825 if (offset >= len_avail)
3826 {
3827 /* Done. Get rid of the data. */
3828 buffer_free (&buffer);
3829 buf = NULL;
3830 len_avail = 0;
3831 return 0;
3832 }
3833
3834 if (len > len_avail - offset)
3835 len = len_avail - offset;
3836 memcpy (readbuf, buf + offset, len);
3837
3838 return len;
3839}
3840
d0722149
DE
3841/* Convert a native/host siginfo object, into/from the siginfo in the
3842 layout of the inferiors' architecture. */
3843
3844static void
3845siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
3846{
3847 int done = 0;
3848
3849 if (the_low_target.siginfo_fixup != NULL)
3850 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
3851
3852 /* If there was no callback, or the callback didn't do anything,
3853 then just do a straight memcpy. */
3854 if (!done)
3855 {
3856 if (direction == 1)
3857 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
3858 else
3859 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
3860 }
3861}
3862
4aa995e1
PA
3863static int
3864linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
3865 unsigned const char *writebuf, CORE_ADDR offset, int len)
3866{
d0722149 3867 int pid;
4aa995e1 3868 struct siginfo siginfo;
d0722149 3869 char inf_siginfo[sizeof (struct siginfo)];
4aa995e1
PA
3870
3871 if (current_inferior == NULL)
3872 return -1;
3873
bd99dc85 3874 pid = lwpid_of (get_thread_lwp (current_inferior));
4aa995e1
PA
3875
3876 if (debug_threads)
d0722149 3877 fprintf (stderr, "%s siginfo for lwp %d.\n",
4aa995e1
PA
3878 readbuf != NULL ? "Reading" : "Writing",
3879 pid);
3880
3881 if (offset > sizeof (siginfo))
3882 return -1;
3883
3884 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
3885 return -1;
3886
d0722149
DE
3887 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
3888 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3889 inferior with a 64-bit GDBSERVER should look the same as debugging it
3890 with a 32-bit GDBSERVER, we need to convert it. */
3891 siginfo_fixup (&siginfo, inf_siginfo, 0);
3892
4aa995e1
PA
3893 if (offset + len > sizeof (siginfo))
3894 len = sizeof (siginfo) - offset;
3895
3896 if (readbuf != NULL)
d0722149 3897 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
3898 else
3899 {
d0722149
DE
3900 memcpy (inf_siginfo + offset, writebuf, len);
3901
3902 /* Convert back to ptrace layout before flushing it out. */
3903 siginfo_fixup (&siginfo, inf_siginfo, 1);
3904
4aa995e1
PA
3905 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
3906 return -1;
3907 }
3908
3909 return len;
3910}
3911
bd99dc85
PA
3912/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
3913 so we notice when children change state; as the handler for the
3914 sigsuspend in my_waitpid. */
3915
3916static void
3917sigchld_handler (int signo)
3918{
3919 int old_errno = errno;
3920
3921 if (debug_threads)
3922 /* fprintf is not async-signal-safe, so call write directly. */
3923 write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
3924
3925 if (target_is_async_p ())
3926 async_file_mark (); /* trigger a linux_wait */
3927
3928 errno = old_errno;
3929}
3930
3931static int
3932linux_supports_non_stop (void)
3933{
3934 return 1;
3935}
3936
3937static int
3938linux_async (int enable)
3939{
3940 int previous = (linux_event_pipe[0] != -1);
3941
3942 if (previous != enable)
3943 {
3944 sigset_t mask;
3945 sigemptyset (&mask);
3946 sigaddset (&mask, SIGCHLD);
3947
3948 sigprocmask (SIG_BLOCK, &mask, NULL);
3949
3950 if (enable)
3951 {
3952 if (pipe (linux_event_pipe) == -1)
3953 fatal ("creating event pipe failed.");
3954
3955 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
3956 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
3957
3958 /* Register the event loop handler. */
3959 add_file_handler (linux_event_pipe[0],
3960 handle_target_event, NULL);
3961
3962 /* Always trigger a linux_wait. */
3963 async_file_mark ();
3964 }
3965 else
3966 {
3967 delete_file_handler (linux_event_pipe[0]);
3968
3969 close (linux_event_pipe[0]);
3970 close (linux_event_pipe[1]);
3971 linux_event_pipe[0] = -1;
3972 linux_event_pipe[1] = -1;
3973 }
3974
3975 sigprocmask (SIG_UNBLOCK, &mask, NULL);
3976 }
3977
3978 return previous;
3979}
3980
3981static int
3982linux_start_non_stop (int nonstop)
3983{
3984 /* Register or unregister from event-loop accordingly. */
3985 linux_async (nonstop);
3986 return 0;
3987}
3988
cf8fd78b
PA
3989static int
3990linux_supports_multi_process (void)
3991{
3992 return 1;
3993}
3994
efcbbd14
UW
3995
3996/* Enumerate spufs IDs for process PID. */
3997static int
3998spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
3999{
4000 int pos = 0;
4001 int written = 0;
4002 char path[128];
4003 DIR *dir;
4004 struct dirent *entry;
4005
4006 sprintf (path, "/proc/%ld/fd", pid);
4007 dir = opendir (path);
4008 if (!dir)
4009 return -1;
4010
4011 rewinddir (dir);
4012 while ((entry = readdir (dir)) != NULL)
4013 {
4014 struct stat st;
4015 struct statfs stfs;
4016 int fd;
4017
4018 fd = atoi (entry->d_name);
4019 if (!fd)
4020 continue;
4021
4022 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4023 if (stat (path, &st) != 0)
4024 continue;
4025 if (!S_ISDIR (st.st_mode))
4026 continue;
4027
4028 if (statfs (path, &stfs) != 0)
4029 continue;
4030 if (stfs.f_type != SPUFS_MAGIC)
4031 continue;
4032
4033 if (pos >= offset && pos + 4 <= offset + len)
4034 {
4035 *(unsigned int *)(buf + pos - offset) = fd;
4036 written += 4;
4037 }
4038 pos += 4;
4039 }
4040
4041 closedir (dir);
4042 return written;
4043}
4044
4045/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4046 object type, using the /proc file system. */
4047static int
4048linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4049 unsigned const char *writebuf,
4050 CORE_ADDR offset, int len)
4051{
4052 long pid = lwpid_of (get_thread_lwp (current_inferior));
4053 char buf[128];
4054 int fd = 0;
4055 int ret = 0;
4056
4057 if (!writebuf && !readbuf)
4058 return -1;
4059
4060 if (!*annex)
4061 {
4062 if (!readbuf)
4063 return -1;
4064 else
4065 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4066 }
4067
4068 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4069 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4070 if (fd <= 0)
4071 return -1;
4072
4073 if (offset != 0
4074 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4075 {
4076 close (fd);
4077 return 0;
4078 }
4079
4080 if (writebuf)
4081 ret = write (fd, writebuf, (size_t) len);
4082 else
4083 ret = read (fd, readbuf, (size_t) len);
4084
4085 close (fd);
4086 return ret;
4087}
4088
dc146f7c
VP
4089static int
4090linux_core_of_thread (ptid_t ptid)
4091{
4092 char filename[sizeof ("/proc//task//stat")
4093 + 2 * 20 /* decimal digits for 2 numbers, max 2^64 bit each */
4094 + 1];
4095 FILE *f;
4096 char *content = NULL;
4097 char *p;
4098 char *ts = 0;
4099 int content_read = 0;
4100 int i;
4101 int core;
4102
4103 sprintf (filename, "/proc/%d/task/%ld/stat",
4104 ptid_get_pid (ptid), ptid_get_lwp (ptid));
4105 f = fopen (filename, "r");
4106 if (!f)
4107 return -1;
4108
4109 for (;;)
4110 {
4111 int n;
4112 content = realloc (content, content_read + 1024);
4113 n = fread (content + content_read, 1, 1024, f);
4114 content_read += n;
4115 if (n < 1024)
4116 {
4117 content[content_read] = '\0';
4118 break;
4119 }
4120 }
4121
4122 p = strchr (content, '(');
4123 p = strchr (p, ')') + 2; /* skip ")" and a whitespace. */
4124
4125 p = strtok_r (p, " ", &ts);
4126 for (i = 0; i != 36; ++i)
4127 p = strtok_r (NULL, " ", &ts);
4128
4129 if (sscanf (p, "%d", &core) == 0)
4130 core = -1;
4131
4132 free (content);
4133 fclose (f);
4134
4135 return core;
4136}
4137
ce3a066d
DJ
4138static struct target_ops linux_target_ops = {
4139 linux_create_inferior,
4140 linux_attach,
4141 linux_kill,
6ad8ae5c 4142 linux_detach,
444d6139 4143 linux_join,
ce3a066d
DJ
4144 linux_thread_alive,
4145 linux_resume,
4146 linux_wait,
4147 linux_fetch_registers,
4148 linux_store_registers,
4149 linux_read_memory,
4150 linux_write_memory,
2f2893d9 4151 linux_look_up_symbols,
ef57601b 4152 linux_request_interrupt,
aa691b87 4153 linux_read_auxv,
d993e290
PA
4154 linux_insert_point,
4155 linux_remove_point,
e013ee27
OF
4156 linux_stopped_by_watchpoint,
4157 linux_stopped_data_address,
42c81e2a 4158#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437 4159 linux_read_offsets,
dae5f5cf
DJ
4160#else
4161 NULL,
4162#endif
4163#ifdef USE_THREAD_DB
4164 thread_db_get_tls_address,
4165#else
4166 NULL,
52fb6437 4167#endif
efcbbd14 4168 linux_qxfer_spu,
59a016f0 4169 hostio_last_error_from_errno,
07e059b5 4170 linux_qxfer_osdata,
4aa995e1 4171 linux_xfer_siginfo,
bd99dc85
PA
4172 linux_supports_non_stop,
4173 linux_async,
4174 linux_start_non_stop,
cdbfd419
PP
4175 linux_supports_multi_process,
4176#ifdef USE_THREAD_DB
dc146f7c 4177 thread_db_handle_monitor_command,
cdbfd419 4178#else
dc146f7c 4179 NULL,
cdbfd419 4180#endif
dc146f7c 4181 linux_core_of_thread
ce3a066d
DJ
4182};
4183
0d62e5e8
DJ
4184static void
4185linux_init_signals ()
4186{
4187 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
4188 to find what the cancel signal actually is. */
60c3d7b0 4189#ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 4190 signal (__SIGRTMIN+1, SIG_IGN);
60c3d7b0 4191#endif
0d62e5e8
DJ
4192}
4193
da6d8c04
DJ
4194void
4195initialize_low (void)
4196{
bd99dc85
PA
4197 struct sigaction sigchld_action;
4198 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 4199 set_target_ops (&linux_target_ops);
611cb4a5
DJ
4200 set_breakpoint_data (the_low_target.breakpoint,
4201 the_low_target.breakpoint_len);
0d62e5e8 4202 linux_init_signals ();
24a09b5f 4203 linux_test_for_tracefork ();
52fa2412
UW
4204#ifdef HAVE_LINUX_REGSETS
4205 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
4206 ;
bca929d3 4207 disabled_regsets = xmalloc (num_regsets);
52fa2412 4208#endif
bd99dc85
PA
4209
4210 sigchld_action.sa_handler = sigchld_handler;
4211 sigemptyset (&sigchld_action.sa_mask);
4212 sigchld_action.sa_flags = SA_RESTART;
4213 sigaction (SIGCHLD, &sigchld_action, NULL);
da6d8c04 4214}